Browse Source

deps: upgrade v8 to 3.18.0

v0.11.1-release
Ben Noordhuis 12 years ago
parent
commit
9f682265d6
  1. 19
      deps/v8/ChangeLog
  2. 130
      deps/v8/Makefile
  3. 96
      deps/v8/Makefile.nacl
  4. 2
      deps/v8/PRESUBMIT.py
  5. 148
      deps/v8/build/common.gypi
  6. 4
      deps/v8/build/standalone.gypi
  7. 2
      deps/v8/include/v8-preparser.h
  8. 49
      deps/v8/include/v8-profiler.h
  9. 121
      deps/v8/include/v8.h
  10. 6
      deps/v8/preparser/preparser-process.cc
  11. 17
      deps/v8/preparser/preparser.gyp
  12. 58
      deps/v8/src/accessors.cc
  13. 7
      deps/v8/src/accessors.h
  14. 6
      deps/v8/src/allocation.cc
  15. 237
      deps/v8/src/api.cc
  16. 4
      deps/v8/src/api.h
  17. 21
      deps/v8/src/arguments.h
  18. 18
      deps/v8/src/arm/assembler-arm-inl.h
  19. 202
      deps/v8/src/arm/assembler-arm.cc
  20. 19
      deps/v8/src/arm/assembler-arm.h
  21. 1112
      deps/v8/src/arm/code-stubs-arm.cc
  22. 6
      deps/v8/src/arm/code-stubs-arm.h
  23. 26
      deps/v8/src/arm/codegen-arm.cc
  24. 2
      deps/v8/src/arm/constants-arm.cc
  25. 19
      deps/v8/src/arm/constants-arm.h
  26. 204
      deps/v8/src/arm/deoptimizer-arm.cc
  27. 5
      deps/v8/src/arm/disasm-arm.cc
  28. 12
      deps/v8/src/arm/frames-arm.h
  29. 78
      deps/v8/src/arm/full-codegen-arm.cc
  30. 157
      deps/v8/src/arm/lithium-arm.cc
  31. 148
      deps/v8/src/arm/lithium-arm.h
  32. 551
      deps/v8/src/arm/lithium-codegen-arm.cc
  33. 14
      deps/v8/src/arm/lithium-codegen-arm.h
  34. 11
      deps/v8/src/arm/lithium-gap-resolver-arm.cc
  35. 235
      deps/v8/src/arm/macro-assembler-arm.cc
  36. 43
      deps/v8/src/arm/macro-assembler-arm.h
  37. 211
      deps/v8/src/arm/simulator-arm.cc
  38. 8
      deps/v8/src/arm/simulator-arm.h
  39. 611
      deps/v8/src/arm/stub-cache-arm.cc
  40. 13
      deps/v8/src/array.js
  41. 20
      deps/v8/src/assembler.cc
  42. 4
      deps/v8/src/assembler.h
  43. 6
      deps/v8/src/ast.cc
  44. 19
      deps/v8/src/ast.h
  45. 7
      deps/v8/src/atomicops_internals_x86_gcc.cc
  46. 2
      deps/v8/src/atomicops_internals_x86_gcc.h
  47. 9
      deps/v8/src/bignum.cc
  48. 120
      deps/v8/src/bootstrapper.cc
  49. 40
      deps/v8/src/builtins-decls.h
  50. 40
      deps/v8/src/builtins.cc
  51. 2
      deps/v8/src/builtins.h
  52. 72
      deps/v8/src/code-stubs-hydrogen.cc
  53. 6
      deps/v8/src/code-stubs.cc
  54. 56
      deps/v8/src/code-stubs.h
  55. 97
      deps/v8/src/collection.js
  56. 31
      deps/v8/src/compiler.cc
  57. 18
      deps/v8/src/contexts.h
  58. 2
      deps/v8/src/conversions-inl.h
  59. 4
      deps/v8/src/cpu-profiler.cc
  60. 1
      deps/v8/src/cpu-profiler.h
  61. 12
      deps/v8/src/d8.cc
  62. 7
      deps/v8/src/d8.gyp
  63. 19
      deps/v8/src/date.js
  64. 16
      deps/v8/src/debug.cc
  65. 116
      deps/v8/src/deoptimizer.cc
  66. 47
      deps/v8/src/deoptimizer.h
  67. 11
      deps/v8/src/disassembler.cc
  68. 4
      deps/v8/src/elements.cc
  69. 24
      deps/v8/src/execution.cc
  70. 5
      deps/v8/src/execution.h
  71. 20
      deps/v8/src/factory.cc
  72. 35
      deps/v8/src/flag-definitions.h
  73. 29
      deps/v8/src/flags.cc
  74. 13
      deps/v8/src/frames.h
  75. 34
      deps/v8/src/full-codegen.cc
  76. 19
      deps/v8/src/full-codegen.h
  77. 39
      deps/v8/src/gdb-jit.cc
  78. 74
      deps/v8/src/generator.js
  79. 7
      deps/v8/src/global-handles.h
  80. 12
      deps/v8/src/globals.h
  81. 1
      deps/v8/src/handles-inl.h
  82. 27
      deps/v8/src/handles.cc
  83. 12
      deps/v8/src/handles.h
  84. 22
      deps/v8/src/heap-inl.h
  85. 2
      deps/v8/src/heap-snapshot-generator.cc
  86. 124
      deps/v8/src/heap.cc
  87. 60
      deps/v8/src/heap.h
  88. 97
      deps/v8/src/hydrogen-instructions.cc
  89. 289
      deps/v8/src/hydrogen-instructions.h
  90. 613
      deps/v8/src/hydrogen.cc
  91. 72
      deps/v8/src/hydrogen.h
  92. 6
      deps/v8/src/ia32/assembler-ia32.cc
  93. 7
      deps/v8/src/ia32/assembler-ia32.h
  94. 35
      deps/v8/src/ia32/builtins-ia32.cc
  95. 191
      deps/v8/src/ia32/code-stubs-ia32.cc
  96. 579
      deps/v8/src/ia32/codegen-ia32.cc
  97. 2
      deps/v8/src/ia32/cpu-ia32.cc
  98. 114
      deps/v8/src/ia32/deoptimizer-ia32.cc
  99. 12
      deps/v8/src/ia32/frames-ia32.h
  100. 34
      deps/v8/src/ia32/full-codegen-ia32.cc

19
deps/v8/ChangeLog

@ -1,3 +1,22 @@
2013-04-17: Version 3.18.0
Enabled pretenuring of fast literals in high promotion mode.
Removed preparser library; link preparser executable against full V8.
Fixed set-up of intrinsic's 'constructor' properties.
(Chromium issue 229445)
ES6 symbols: extended V8 API to support symbols (issue 2158).
Removed ARM support for VFP2.
Made __proto__ a real JavaScript accessor property.
(issue 1949 and issue 2606)
Performance and stability improvements on all platforms.
2013-04-04: Version 3.17.16
Stack trace API: poison stack frames below the first strict mode frame.

130
deps/v8/Makefile

@ -36,6 +36,7 @@ TESTFLAGS ?=
ANDROID_NDK_ROOT ?=
ANDROID_TOOLCHAIN ?=
ANDROID_V8 ?= /data/local/v8
NACL_SDK_ROOT ?=
# Special build flags. Use them like this: "make library=shared"
@ -83,22 +84,18 @@ endif
ifeq ($(gdbjit), on)
GYPFLAGS += -Dv8_enable_gdbjit=1
endif
# vfp2=off
ifeq ($(vfp2), off)
GYPFLAGS += -Dv8_can_use_vfp2_instructions=false
else
GYPFLAGS += -Dv8_can_use_vfp2_instructions=true -Darm_fpu=vfpv2
endif
# vfp3=off
ifeq ($(vfp3), off)
GYPFLAGS += -Dv8_can_use_vfp3_instructions=false
else
GYPFLAGS += -Dv8_can_use_vfp3_instructions=true -Darm_fpu=vfpv3
# vtunejit=on
ifeq ($(vtunejit), on)
GYPFLAGS += -Dv8_enable_vtunejit=1
endif
# debuggersupport=off
ifeq ($(debuggersupport), off)
GYPFLAGS += -Dv8_enable_debugger_support=0
endif
# unalignedaccess=on
ifeq ($(unalignedaccess), on)
GYPFLAGS += -Dv8_can_use_unaligned_accesses=true
endif
# soname_version=1.2.3
ifdef soname_version
GYPFLAGS += -Dsoname_version=$(soname_version)
@ -119,13 +116,66 @@ endif
ifeq ($(regexp), interpreted)
GYPFLAGS += -Dv8_interpreted_regexp=1
endif
# hardfp=on
ifeq ($(hardfp), on)
GYPFLAGS += -Dv8_use_arm_eabi_hardfloat=true
endif
# armv7=false
# arm specific flags.
# armv7=false/true
ifeq ($(armv7), false)
GYPFLAGS += -Darmv7=0
else
ifeq ($(armv7), true)
GYPFLAGS += -Darmv7=1
endif
endif
# vfp2=off. Deprecated, use armfpu=
# vfp3=off. Deprecated, use armfpu=
ifeq ($(vfp3), off)
GYPFLAGS += -Darm_fpu=vfp
endif
# hardfp=on/off. Deprecated, use armfloatabi
ifeq ($(hardfp),on)
GYPFLAGS += -Darm_float_abi=hard
else
ifeq ($(hardfp),off)
GYPFLAGS += -Darm_float_abi=softfp
endif
endif
# armneon=on/off
ifeq ($(armneon), on)
GYPFLAGS += -Darm_neon=1
endif
# fpu: armfpu=xxx
# xxx: vfp, vfpv3-d16, vfpv3, neon.
ifeq ($(armfpu),)
ifneq ($(vfp3), off)
GYPFLAGS += -Darm_fpu=default
endif
else
GYPFLAGS += -Darm_fpu=$(armfpu)
endif
# float abi: armfloatabi=softfp/hard
ifeq ($(armfloatabi),)
ifeq ($(hardfp),)
GYPFLAGS += -Darm_float_abi=default
endif
else
GYPFLAGS += -Darm_float_abi=$(armfloatabi)
endif
# armthumb=on/off
ifeq ($(armthumb), off)
GYPFLAGS += -Darm_thumb=0
else
ifeq ($(armthumb), on)
GYPFLAGS += -Darm_thumb=1
endif
endif
# armtest=on
# With this flag set, by default v8 will only use features implied
# by the compiler (no probe). This is done by modifying the default
# values of enable_armv7, enable_vfp2, enable_vfp3 and enable_32dregs.
# Modifying these flags when launching v8 will enable the probing for
# the specified values.
# When using the simulator, this flag is implied.
ifeq ($(armtest), on)
GYPFLAGS += -Darm_test=on
endif
# ----------------- available targets: --------------------
@ -136,6 +186,7 @@ endif
# - "native": current host's architecture, release mode
# - any of the above with .check appended, e.g. "ia32.release.check"
# - "android": cross-compile for Android/ARM
# - "nacl" : cross-compile for Native Client (ia32 and x64)
# - default (no target specified): build all DEFAULT_ARCHES and MODES
# - "check": build all targets and run all tests
# - "<arch>.clean" for any <arch> in ARCHES
@ -149,19 +200,27 @@ ARCHES = ia32 x64 arm mipsel
DEFAULT_ARCHES = ia32 x64 arm
MODES = release debug
ANDROID_ARCHES = android_ia32 android_arm android_mipsel
NACL_ARCHES = nacl_ia32 nacl_x64
# List of files that trigger Makefile regeneration:
GYPFILES = build/all.gyp build/common.gypi build/standalone.gypi \
preparser/preparser.gyp samples/samples.gyp src/d8.gyp \
test/cctest/cctest.gyp tools/gyp/v8.gyp
# If vtunejit=on, the v8vtune.gyp will be appended.
ifeq ($(vtunejit), on)
GYPFILES += src/third_party/vtune/v8vtune.gyp
endif
# Generates all combinations of ARCHES and MODES, e.g. "ia32.release".
BUILDS = $(foreach mode,$(MODES),$(addsuffix .$(mode),$(ARCHES)))
ANDROID_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(ANDROID_ARCHES)))
NACL_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(NACL_ARCHES)))
# Generates corresponding test targets, e.g. "ia32.release.check".
CHECKS = $(addsuffix .check,$(BUILDS))
ANDROID_CHECKS = $(addsuffix .check,$(ANDROID_BUILDS))
NACL_CHECKS = $(addsuffix .check,$(NACL_BUILDS))
# File where previously used GYPFLAGS are stored.
ENVFILE = $(OUTDIR)/environment
@ -169,7 +228,9 @@ ENVFILE = $(OUTDIR)/environment
$(ARCHES) $(MODES) $(BUILDS) $(CHECKS) $(addsuffix .clean,$(ARCHES)) \
$(addsuffix .check,$(MODES)) $(addsuffix .check,$(ARCHES)) \
$(ANDROID_ARCHES) $(ANDROID_BUILDS) $(ANDROID_CHECKS) \
must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN
must-set-ANDROID_NDK_ROOT_OR_TOOLCHAIN \
$(NACL_ARCHES) $(NACL_BUILDS) $(NACL_CHECKS) \
must-set-NACL_SDK_ROOT
# Target definitions. "all" is the default.
all: $(MODES)
@ -213,6 +274,16 @@ $(ANDROID_BUILDS): $(GYPFILES) $(ENVFILE) build/android.gypi \
OUTDIR="$(OUTDIR)" \
GYPFLAGS="$(GYPFLAGS)"
$(NACL_ARCHES): $(addprefix $$@.,$(MODES))
$(NACL_BUILDS): $(GYPFILES) $(ENVFILE) \
Makefile.nacl must-set-NACL_SDK_ROOT
@$(MAKE) -f Makefile.nacl $@ \
ARCH="$(basename $@)" \
MODE="$(subst .,,$(suffix $@))" \
OUTDIR="$(OUTDIR)" \
GYPFLAGS="$(GYPFLAGS)"
# Test targets.
check: all
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
@ -244,12 +315,21 @@ $(addsuffix .check, $(ANDROID_BUILDS)): $$(basename $$@).sync
$(addsuffix .check, $(ANDROID_ARCHES)): \
$(addprefix $$(basename $$@).,$(MODES)).check
$(addsuffix .check, $(NACL_BUILDS)): $$(basename $$@)
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR) \
--arch-and-mode=$(basename $@) \
--timeout=600 --nopresubmit \
--command-prefix="tools/nacl-run.py"
$(addsuffix .check, $(NACL_ARCHES)): \
$(addprefix $$(basename $$@).,$(MODES)).check
native.check: native
@tools/run-tests.py $(TESTJOBS) --outdir=$(OUTDIR)/native \
--arch-and-mode=. $(TESTFLAGS)
# Clean targets. You can clean each architecture individually, or everything.
$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)):
$(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)):
rm -f $(OUTDIR)/Makefile.$(basename $@)
rm -rf $(OUTDIR)/$(basename $@).release
rm -rf $(OUTDIR)/$(basename $@).debug
@ -260,7 +340,7 @@ native.clean:
rm -rf $(OUTDIR)/native
find $(OUTDIR) -regex '.*\(host\|target\).native\.mk' -delete
clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES)) native.clean
clean: $(addsuffix .clean, $(ARCHES) $(ANDROID_ARCHES) $(NACL_ARCHES)) native.clean
# GYP file generation targets.
OUT_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(ARCHES))
@ -283,6 +363,18 @@ ifndef ANDROID_TOOLCHAIN
endif
endif
# Note that NACL_SDK_ROOT must be set to point to an appropriate
# Native Client SDK before using this makefile. You can download
# an SDK here:
# https://developers.google.com/native-client/sdk/download
# The path indicated by NACL_SDK_ROOT will typically end with
# a folder for a pepper version such as "pepper_25" that should
# have "tools" and "toolchain" subdirectories.
must-set-NACL_SDK_ROOT:
ifndef NACL_SDK_ROOT
$(error NACL_SDK_ROOT must be set)
endif
# Replaces the old with the new environment file if they're different, which
# will trigger GYP to regenerate Makefiles.
$(ENVFILE): $(ENVFILE).new

96
deps/v8/Makefile.nacl

@ -0,0 +1,96 @@
#
# Copyright 2013 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Those definitions should be consistent with the main Makefile
NACL_ARCHES = nacl_ia32 nacl_x64
MODES = release debug
# Generates all combinations of NACL ARCHES and MODES,
# e.g. "nacl_ia32.release" or "nacl_x64.release"
NACL_BUILDS = $(foreach mode,$(MODES), \
$(addsuffix .$(mode),$(NACL_ARCHES)))
HOST_OS = $(shell uname -s | sed -e 's/Linux/linux/;s/Darwin/mac/')
ifeq ($(HOST_OS), linux)
TOOLCHAIN_DIR = linux_x86_glibc
else
ifeq ($(HOST_OS), mac)
TOOLCHAIN_DIR = mac_x86_glibc
else
$(error Host platform "${HOST_OS}" is not supported)
endif
endif
TOOLCHAIN_PATH = ${NACL_SDK_ROOT}/toolchain
NACL_TOOLCHAIN ?= ${TOOLCHAIN_PATH}/${TOOLCHAIN_DIR}
ifeq ($(ARCH), nacl_ia32)
GYPENV = nacl_target_arch=nacl_ia32 v8_target_arch=arm v8_host_arch=ia32
TOOLCHAIN_ARCH = x86-4.4
NACL_CC = "$(NACL_TOOLCHAIN)/bin/i686-nacl-gcc"
NACL_CXX = "$(NACL_TOOLCHAIN)/bin/i686-nacl-g++"
NACL_LINK = "$(NACL_TOOLCHAIN)/bin/i686-nacl-g++"
else
ifeq ($(ARCH), nacl_x64)
GYPENV = nacl_target_arch=nacl_x64 v8_target_arch=arm v8_host_arch=ia32
TOOLCHAIN_ARCH = x86-4.4
NACL_CC = "$(NACL_TOOLCHAIN)/bin/x86_64-nacl-gcc"
NACL_CXX = "$(NACL_TOOLCHAIN)/bin/x86_64-nacl-g++"
NACL_LINK = "$(NACL_TOOLCHAIN)/bin/x86_64-nacl-g++"
else
$(error Target architecture "${ARCH}" is not supported)
endif
endif
ifeq ($(wildcard $(NACL_TOOLCHAIN)),)
$(error Cannot find Native Client toolchain in "${NACL_TOOLCHAIN}")
endif
# For mksnapshot host generation.
GYPENV += host_os=${HOST_OS}
NACL_MAKEFILES = $(addprefix $(OUTDIR)/Makefile.,$(NACL_ARCHES))
.SECONDEXPANSION:
# For some reason the $$(basename $$@) expansion didn't work here...
$(NACL_BUILDS): $(NACL_MAKEFILES)
@$(MAKE) -C "$(OUTDIR)" -f Makefile.$(basename $@) \
CXX=${NACL_CXX} \
LINK=${NACL_LINK} \
BUILDTYPE=$(shell echo $(subst .,,$(suffix $@)) | \
python -c "print raw_input().capitalize()") \
builddir="$(shell pwd)/$(OUTDIR)/$@"
# NACL GYP file generation targets.
$(NACL_MAKEFILES):
@GYP_GENERATORS=make \
GYP_DEFINES="${GYPENV}" \
CC=${NACL_CC} \
CXX=${NACL_CXX} \
build/gyp/gyp --generator-output="${OUTDIR}" build/all.gyp \
-Ibuild/standalone.gypi --depth=. \
-S.$(subst .,,$(suffix $@)) $(GYPFLAGS)

2
deps/v8/PRESUBMIT.py

@ -53,6 +53,7 @@ def _CommonChecks(input_api, output_api):
results = []
results.extend(input_api.canned_checks.CheckOwners(
input_api, output_api, source_file_filter=None))
results.extend(_V8PresubmitChecks(input_api, output_api))
return results
@ -67,5 +68,4 @@ def CheckChangeOnCommit(input_api, output_api):
results.extend(_CommonChecks(input_api, output_api))
results.extend(input_api.canned_checks.CheckChangeHasDescription(
input_api, output_api))
results.extend(_V8PresubmitChecks(input_api, output_api))
return results

148
deps/v8/build/common.gypi

@ -35,6 +35,13 @@
'CXX%': '${CXX:-$(which g++)}', # Used to assemble a shell command.
'v8_compress_startup_data%': 'off',
'v8_target_arch%': '<(target_arch)',
# Native Client builds currently use the V8 ARM JIT and
# arm/simulator-arm.cc to defer the significant effort required
# for NaCl JIT support. The nacl_target_arch variable provides
# the 'true' target arch for places in this file that need it.
# TODO(bradchen): get rid of nacl_target_arch when someday
# NaCl V8 builds stop using the ARM simulator
'nacl_target_arch%': 'none', # must be set externally
# Setting 'v8_can_use_unaligned_accesses' to 'true' will allow the code
# generated by V8 to do unaligned memory access, and setting it to 'false'
@ -44,30 +51,17 @@
# access is allowed for all CPUs.
'v8_can_use_unaligned_accesses%': 'default',
# Setting 'v8_can_use_vfp2_instructions' to 'true' will enable use of ARM VFP
# instructions in the V8 generated code. VFP instructions will be enabled
# both for the snapshot and for the ARM target. Leaving the default value
# of 'false' will avoid VFP instructions in the snapshot and use CPU feature
# probing when running on the target.
'v8_can_use_vfp2_instructions%': 'false',
'v8_can_use_vfp3_instructions%': 'false',
# Setting 'v8_can_use_vfp32dregs' to 'true' will cause V8 to use the VFP
# registers d16-d31 in the generated code, both in the snapshot and for the
# ARM target. Leaving the default value of 'false' will avoid the use of
# these registers in the snapshot and use CPU feature probing when running
# on the target.
'v8_can_use_vfp32dregs%': 'false',
'arm_test%': 'off',
# Similar to vfp but on MIPS.
'v8_can_use_fpu_instructions%': 'true',
# Setting v8_use_arm_eabi_hardfloat to true will turn on V8 support for ARM
# EABI calling convention where double arguments are passed in VFP
# registers. Note that the GCC flag '-mfloat-abi=hard' should be used as
# well when compiling for the ARM target.
'v8_use_arm_eabi_hardfloat%': 'false',
# Similar to the ARM hard float ABI but on MIPS.
'v8_use_mips_abi_hardfloat%': 'true',
@ -136,55 +130,105 @@
'defines': [
'V8_TARGET_ARCH_ARM',
],
'variables': {
'armsimulator': '<!($(echo <(CXX)) -v 2>&1 | grep -q "^Target: arm" && echo "no" || echo "yes")',
},
'conditions': [
['armv7==1', {
'defines': [
'CAN_USE_ARMV7_INSTRUCTIONS=1',
],
}],
[ 'v8_can_use_unaligned_accesses=="true"', {
'defines': [
'CAN_USE_UNALIGNED_ACCESSES=1',
],
}],
[ 'v8_can_use_unaligned_accesses=="false"', {
}, {
'defines': [
'CAN_USE_UNALIGNED_ACCESSES=0',
],
}],
# NEON implies VFP3 and VFP3 implies VFP2.
[ 'v8_can_use_vfp2_instructions=="true" or arm_neon==1 or \
arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', {
'defines': [
'CAN_USE_VFP2_INSTRUCTIONS',
],
}],
# NEON implies VFP3.
[ 'v8_can_use_vfp3_instructions=="true" or arm_neon==1 or \
arm_fpu=="vfpv3" or arm_fpu=="vfpv3-d16"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
],
}],
[ 'v8_use_arm_eabi_hardfloat=="true"', {
'defines': [
'USE_EABI_HARDFLOAT=1',
'CAN_USE_VFP2_INSTRUCTIONS',
],
['armsimulator=="no"', {
'target_conditions': [
['_toolset=="target"', {
'cflags': ['-mfloat-abi=hard',],
'conditions': [
[ 'armv7==1', {
'cflags': ['-march=armv7-a',],
}],
[ 'armv7==1 or armv7=="default"', {
'conditions': [
[ 'arm_neon==1', {
'cflags': ['-mfpu=neon',],
},
{
'conditions': [
[ 'arm_fpu!="default"', {
'cflags': ['-mfpu=<(arm_fpu)',],
}],
]
}],
]
}],
[ 'arm_float_abi!="default"', {
'cflags': ['-mfloat-abi=<(arm_float_abi)',],
}],
[ 'arm_thumb==1', {
'cflags': ['-mthumb',],
}],
[ 'arm_thumb==0', {
'cflags': ['-marm',],
}],
],
}],
],
}, {
'defines': [
'USE_EABI_HARDFLOAT=0',
'conditions': [
[ 'arm_test=="on"', {
'defines': [
'ARM_TEST',
],
}],
],
}],
[ 'v8_can_use_vfp32dregs=="true"', {
['armsimulator=="yes"', {
'defines': [
'CAN_USE_VFP32DREGS',
'ARM_TEST',
],
'conditions': [
[ 'armv7==1 or armv7=="default"', {
'defines': [
'CAN_USE_ARMV7_INSTRUCTIONS=1',
],
'conditions': [
[ 'arm_fpu=="default"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
],
}],
[ 'arm_fpu=="vfpv3-d16"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
],
}],
[ 'arm_fpu=="vfpv3"', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
'CAN_USE_VFP32DREGS',
],
}],
[ 'arm_fpu=="neon" or arm_neon==1', {
'defines': [
'CAN_USE_VFP3_INSTRUCTIONS',
'CAN_USE_VFP32DREGS',
],
}],
],
}],
[ 'arm_float_abi=="hard"', {
'defines': [
'USE_EABI_HARDFLOAT=1',
],
}],
[ 'arm_float_abi=="softfp" or arm_float_abi=="default"', {
'defines': [
'USE_EABI_HARDFLOAT=0',
],
}],
]
}],
],
}], # v8_target_arch=="arm"
@ -320,7 +364,8 @@
'clang%': 0,
},
'conditions': [
['OS!="android" or clang==1', {
['(OS!="android" or clang==1) and \
nacl_target_arch!="nacl_x64"', {
'cflags': [ '<(m32flag)' ],
'ldflags': [ '<(m32flag)' ],
}],
@ -409,6 +454,15 @@
}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="netbsd" \
or OS=="android"', {
'cflags!': [
'-O2',
'-Os',
],
'cflags': [
'-fdata-sections',
'-ffunction-sections',
'-O3',
],
'conditions': [
[ 'gcc_version==44 and clang==0', {
'cflags': [

4
deps/v8/build/standalone.gypi

@ -76,9 +76,11 @@
}],
],
# Default ARM variable settings.
'armv7%': 1,
'armv7%': 'default',
'arm_neon%': 0,
'arm_fpu%': 'vfpv3',
'arm_float_abi%': 'default',
'arm_thumb': 'default',
},
'target_defaults': {
'default_configuration': 'Debug',

2
deps/v8/include/v8-preparser.h

@ -115,4 +115,6 @@ PreParserData V8EXPORT Preparse(UnicodeInputStream* input,
} // namespace v8.
#undef V8EXPORT
#endif // PREPARSER_H

49
deps/v8/include/v8-profiler.h

@ -173,7 +173,7 @@ class V8EXPORT CpuProfiler {
*/
/** Deprecated. Use GetProfileCount instead. */
static int GetProfilesCount();
V8_DEPRECATED(static int GetProfilesCount());
/**
* Returns the number of profiles collected (doesn't include
* profiles that are being collected at the moment of call.)
@ -181,25 +181,26 @@ class V8EXPORT CpuProfiler {
int GetProfileCount();
/** Deprecated. Use GetCpuProfile instead. */
static const CpuProfile* GetProfile(
V8_DEPRECATED(static const CpuProfile* GetProfile(
int index,
Handle<Value> security_token = Handle<Value>());
Handle<Value> security_token = Handle<Value>()));
/** Returns a profile by index. */
const CpuProfile* GetCpuProfile(
int index,
Handle<Value> security_token = Handle<Value>());
/** Deprecated. Use FindProfile instead. */
static const CpuProfile* FindProfile(
V8_DEPRECATED(static const CpuProfile* FindProfile(
unsigned uid,
Handle<Value> security_token = Handle<Value>());
Handle<Value> security_token = Handle<Value>()));
/** Returns a profile by uid. */
const CpuProfile* FindCpuProfile(
unsigned uid,
Handle<Value> security_token = Handle<Value>());
/** Deprecated. Use StartCpuProfiling instead. */
static void StartProfiling(Handle<String> title, bool record_samples = false);
V8_DEPRECATED(static void StartProfiling(Handle<String> title,
bool record_samples = false));
/**
* Starts collecting CPU profile. Title may be an empty string. It
* is allowed to have several profiles being collected at
@ -214,9 +215,9 @@ class V8EXPORT CpuProfiler {
void StartCpuProfiling(Handle<String> title, bool record_samples = false);
/** Deprecated. Use StopCpuProfiling instead. */
static const CpuProfile* StopProfiling(
V8_DEPRECATED(static const CpuProfile* StopProfiling(
Handle<String> title,
Handle<Value> security_token = Handle<Value>());
Handle<Value> security_token = Handle<Value>()));
/**
* Stops collecting CPU profile with a given title and returns it.
* If the title given is empty, finishes the last profile started.
@ -226,7 +227,7 @@ class V8EXPORT CpuProfiler {
Handle<Value> security_token = Handle<Value>());
/** Deprecated. Use DeleteAllCpuProfiles instead. */
static void DeleteAllProfiles();
V8_DEPRECATED(static void DeleteAllProfiles());
/**
* Deletes all existing profiles, also cancelling all profiling
* activity. All previously returned pointers to profiles and their
@ -425,22 +426,23 @@ class V8EXPORT HeapProfiler {
(uint16_t class_id, Handle<Value> wrapper);
/** Deprecated. Use GetSnapshotCount instead. */
static int GetSnapshotsCount();
V8_DEPRECATED(static int GetSnapshotsCount());
/** Returns the number of snapshots taken. */
int GetSnapshotCount();
/** Deprecated. Use GetHeapSnapshot instead. */
static const HeapSnapshot* GetSnapshot(int index);
V8_DEPRECATED(static const HeapSnapshot* GetSnapshot(int index));
/** Returns a snapshot by index. */
const HeapSnapshot* GetHeapSnapshot(int index);
/** Deprecated. Use FindHeapSnapshot instead. */
static const HeapSnapshot* FindSnapshot(unsigned uid);
V8_DEPRECATED(static const HeapSnapshot* FindSnapshot(unsigned uid));
/** Returns a profile by uid. */
const HeapSnapshot* FindHeapSnapshot(unsigned uid);
/** Deprecated. Use GetObjectId instead. */
static SnapshotObjectId GetSnapshotObjectId(Handle<Value> value);
V8_DEPRECATED(static SnapshotObjectId GetSnapshotObjectId(
Handle<Value> value));
/**
* Returns SnapshotObjectId for a heap object referenced by |value| if
* it has been seen by the heap profiler, kUnknownObjectId otherwise.
@ -469,11 +471,11 @@ class V8EXPORT HeapProfiler {
};
/** Deprecated. Use TakeHeapSnapshot instead. */
static const HeapSnapshot* TakeSnapshot(
V8_DEPRECATED(static const HeapSnapshot* TakeSnapshot(
Handle<String> title,
HeapSnapshot::Type type = HeapSnapshot::kFull,
ActivityControl* control = NULL,
ObjectNameResolver* global_object_name_resolver = NULL);
ObjectNameResolver* global_object_name_resolver = NULL));
/**
* Takes a heap snapshot and returns it. Title may be an empty string.
*/
@ -484,7 +486,7 @@ class V8EXPORT HeapProfiler {
/** Deprecated. Use StartTrackingHeapObjects instead. */
static void StartHeapObjectsTracking();
V8_DEPRECATED(static void StartHeapObjectsTracking());
/**
* Starts tracking of heap objects population statistics. After calling
* this method, all heap objects relocations done by the garbage collector
@ -493,7 +495,8 @@ class V8EXPORT HeapProfiler {
void StartTrackingHeapObjects();
/** Deprecated. Use GetHeapStats instead. */
static SnapshotObjectId PushHeapObjectsStats(OutputStream* stream);
V8_DEPRECATED(static SnapshotObjectId PushHeapObjectsStats(
OutputStream* stream));
/**
* Adds a new time interval entry to the aggregated statistics array. The
* time interval entry contains information on the current heap objects
@ -509,7 +512,7 @@ class V8EXPORT HeapProfiler {
SnapshotObjectId GetHeapStats(OutputStream* stream);
/** Deprecated. Use StopTrackingHeapObjects instead. */
static void StopHeapObjectsTracking();
V8_DEPRECATED(static void StopHeapObjectsTracking());
/**
* Stops tracking of heap objects population statistics, cleans up all
* collected data. StartHeapObjectsTracking must be called again prior to
@ -518,7 +521,7 @@ class V8EXPORT HeapProfiler {
void StopTrackingHeapObjects();
/** Deprecated. Use DeleteAllHeapSnapshots instead. */
static void DeleteAllSnapshots();
V8_DEPRECATED(static void DeleteAllSnapshots());
/**
* Deletes all snapshots taken. All previously returned pointers to
* snapshots and their contents become invalid after this call.
@ -526,9 +529,9 @@ class V8EXPORT HeapProfiler {
void DeleteAllHeapSnapshots();
/** Deprecated. Use SetWrapperClassInfoProvider instead. */
static void DefineWrapperClass(
V8_DEPRECATED(static void DefineWrapperClass(
uint16_t class_id,
WrapperInfoCallback callback);
WrapperInfoCallback callback));
/** Binds a callback to embedder's class ID. */
void SetWrapperClassInfoProvider(
uint16_t class_id,
@ -544,10 +547,10 @@ class V8EXPORT HeapProfiler {
/**
* Deprecated. Returns the number of currently existing persistent handles.
*/
static int GetPersistentHandleCount();
V8_DEPRECATED(static int GetPersistentHandleCount());
/** Deprecated. Use GetHeapProfilerMemorySize instead. */
static size_t GetMemorySizeUsedByProfiler();
V8_DEPRECATED(static size_t GetMemorySizeUsedByProfiler());
/** Returns memory used for profiler internal data and snapshots. */
size_t GetProfilerMemorySize();

121
deps/v8/include/v8.h

@ -127,6 +127,8 @@ class StackFrame;
class StackTrace;
class String;
class StringObject;
class Symbol;
class SymbolObject;
class Uint32;
class Utils;
class Value;
@ -764,6 +766,17 @@ class V8EXPORT Script {
* debugger API.
*/
void SetData(Handle<String> data);
/**
* Returns the name value of one Script.
*/
Handle<Value> GetScriptName();
/**
* Returns zero based line number of the code_pos location in the script.
* -1 will be returned if no information available.
*/
int GetLineNumber(int code_pos);
};
@ -972,6 +985,12 @@ class V8EXPORT Value : public Data {
*/
V8_INLINE(bool IsString() const);
/**
* Returns true if this value is a symbol.
* This is an experimental feature.
*/
bool IsSymbol() const;
/**
* Returns true if this value is a function.
*/
@ -1032,6 +1051,12 @@ class V8EXPORT Value : public Data {
*/
bool IsStringObject() const;
/**
* Returns true if this value is a Symbol object.
* This is an experimental feature.
*/
bool IsSymbolObject() const;
/**
* Returns true if this value is a NativeError.
*/
@ -1311,7 +1336,11 @@ class V8EXPORT String : public Primitive {
/** Allocates a new string from 16-bit character codes.*/
static Local<String> New(const uint16_t* data, int length = -1);
/** Creates a symbol. Returns one if it exists already.*/
/**
* Creates an internalized string (historically called a "symbol",
* not to be confused with ES6 symbols). Returns one if it exists already.
* TODO(rossberg): Deprecate me when the new string API is here.
*/
static Local<String> NewSymbol(const char* data, int length = -1);
/**
@ -1449,6 +1478,29 @@ class V8EXPORT String : public Primitive {
};
/**
* A JavaScript symbol (ECMA-262 edition 6)
*
* This is an experimental feature. Use at your own risk.
*/
class V8EXPORT Symbol : public Primitive {
public:
// Returns the print name string of the symbol, or undefined if none.
Local<Value> Name() const;
// Create a symbol without a print name.
static Local<Symbol> New(Isolate* isolate);
// Create a symbol with a print name.
static Local<Symbol> New(Isolate *isolate, const char* data, int length = -1);
V8_INLINE(static Symbol* Cast(v8::Value* obj));
private:
Symbol();
static void CheckCast(v8::Value* obj);
};
/**
* A JavaScript number value (ECMA-262, 4.3.20)
*/
@ -1590,11 +1642,9 @@ class V8EXPORT Object : public Value {
*/
PropertyAttribute GetPropertyAttributes(Handle<Value> key);
// TODO(1245389): Replace the type-specific versions of these
// functions with generic ones that accept a Handle<Value> key.
bool Has(Handle<String> key);
bool Has(Handle<Value> key);
bool Delete(Handle<String> key);
bool Delete(Handle<Value> key);
// Delete a property on this object bypassing interceptors and
// ignoring dont-delete attributes.
@ -1981,6 +2031,27 @@ class V8EXPORT StringObject : public Object {
};
/**
* A Symbol object (ECMA-262 edition 6).
*
* This is an experimental feature. Use at your own risk.
*/
class V8EXPORT SymbolObject : public Object {
public:
static Local<Value> New(Isolate* isolate, Handle<Symbol> value);
/**
* Returns the Symbol held by the object.
*/
Local<Symbol> SymbolValue() const;
V8_INLINE(static SymbolObject* Cast(v8::Value* obj));
private:
static void CheckCast(v8::Value* obj);
};
/**
* An instance of the built-in RegExp constructor (ECMA-262, 15.10).
*/
@ -3036,6 +3107,9 @@ class V8EXPORT Isolate {
*/
CpuProfiler* GetCpuProfiler();
/** Returns the context that is on the top of the stack. */
Local<Context> GetCurrentContext();
private:
Isolate();
Isolate(const Isolate&);
@ -3864,11 +3938,11 @@ class V8EXPORT Context {
*/
void ReattachGlobal(Handle<Object> global_object);
/** Creates a new context.
/**
* Creates a new context and returns a handle to the newly allocated
* context.
*
* Returns a persistent handle to the newly allocated context. This
* persistent handle has to be disposed when the context is no
* longer used so the context can be garbage collected.
* \param isolate The isolate in which to create the context.
*
* \param extensions An optional extension configuration containing
* the extensions to be installed in the newly created context.
@ -3882,6 +3956,14 @@ class V8EXPORT Context {
* template. The state of the global object will be completely reset
* and only object identify will remain.
*/
static Local<Context> New(
Isolate* isolate,
ExtensionConfiguration* extensions = NULL,
Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
Handle<Value> global_object = Handle<Value>());
/** Deprecated. Use Isolate version instead. */
// TODO(mstarzinger): Put this behind the V8_DEPRECATED guard.
static Persistent<Context> New(
ExtensionConfiguration* extensions = NULL,
Handle<ObjectTemplate> global_template = Handle<ObjectTemplate>(),
@ -3890,7 +3972,8 @@ class V8EXPORT Context {
/** Returns the last entered context. */
static Local<Context> GetEntered();
/** Returns the context that is on the top of the stack. */
// TODO(svenpanne) Actually deprecate this.
/** Deprecated. Use Isolate::GetCurrentContext instead. */
static Local<Context> GetCurrent();
/**
@ -4301,7 +4384,7 @@ class Internals {
static const int kNullValueRootIndex = 7;
static const int kTrueValueRootIndex = 8;
static const int kFalseValueRootIndex = 9;
static const int kEmptyStringRootIndex = 119;
static const int kEmptyStringRootIndex = 118;
static const int kNodeClassIdOffset = 1 * kApiPointerSize;
static const int kNodeFlagsOffset = 1 * kApiPointerSize + 3;
@ -4847,6 +4930,14 @@ bool Value::QuickIsString() const {
}
Symbol* Symbol::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
#endif
return static_cast<Symbol*>(value);
}
Number* Number::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
@ -4879,6 +4970,14 @@ StringObject* StringObject::Cast(v8::Value* value) {
}
SymbolObject* SymbolObject::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);
#endif
return static_cast<SymbolObject*>(value);
}
NumberObject* NumberObject::Cast(v8::Value* value) {
#ifdef V8_ENABLE_CHECKS
CheckCast(value);

6
deps/v8/preparser/preparser-process.cc

@ -30,6 +30,7 @@
#include <stdio.h>
#include <string.h>
#include "../include/v8.h"
#include "../include/v8stdint.h"
#include "../include/v8-preparser.h"
@ -37,8 +38,7 @@
namespace i = v8::internal;
// This file is only used for testing the stand-alone preparser
// library.
// This file is only used for testing the preparser.
// The first argument must be the path of a JavaScript source file, or
// the flags "-e" and the next argument is then the source of a JavaScript
// program.
@ -320,6 +320,8 @@ int main(int argc, const char* argv[]) {
ExceptionExpectation expects =
ParseExpectation(argc - arg_index, argv + arg_index);
v8::V8::Initialize();
ScopedPointer<uint8_t> buffer;
size_t length;

17
deps/v8/preparser/preparser.gyp

@ -31,11 +31,24 @@
{
'target_name': 'preparser',
'type': 'executable',
'dependencies': [
'../tools/gyp/v8.gyp:preparser_lib',
'conditions': [
# preparser can't link against a shared library, so link against
# the underlying static targets.
['v8_use_snapshot=="true"', {
'dependencies': ['../tools/gyp/v8.gyp:v8_snapshot'],
}, {
'dependencies': [
'../tools/gyp/v8.gyp:v8_nosnapshot.<(v8_target_arch)',
],
}],
],
'include_dirs+': [
'../src',
],
'sources': [
'preparser-process.cc',
'../include/v8-preparser.h',
'../src/preparser-api.cc',
],
},
],

58
deps/v8/src/accessors.cc

@ -782,64 +782,6 @@ const AccessorDescriptor Accessors::FunctionCaller = {
};
//
// Accessors::ObjectPrototype
//
static inline Object* GetPrototypeSkipHiddenPrototypes(Isolate* isolate,
Object* receiver) {
Object* current = receiver->GetPrototype(isolate);
while (current->IsJSObject() &&
JSObject::cast(current)->map()->is_hidden_prototype()) {
current = current->GetPrototype(isolate);
}
return current;
}
MaybeObject* Accessors::ObjectGetPrototype(Object* receiver, void*) {
return GetPrototypeSkipHiddenPrototypes(Isolate::Current(), receiver);
}
MaybeObject* Accessors::ObjectSetPrototype(JSObject* receiver_raw,
Object* value_raw,
void*) {
const bool kSkipHiddenPrototypes = true;
// To be consistent with other Set functions, return the value.
if (!(FLAG_harmony_observation && receiver_raw->map()->is_observed()))
return receiver_raw->SetPrototype(value_raw, kSkipHiddenPrototypes);
Isolate* isolate = receiver_raw->GetIsolate();
HandleScope scope(isolate);
Handle<JSObject> receiver(receiver_raw);
Handle<Object> value(value_raw, isolate);
Handle<Object> old_value(GetPrototypeSkipHiddenPrototypes(isolate, *receiver),
isolate);
MaybeObject* result = receiver->SetPrototype(*value, kSkipHiddenPrototypes);
Handle<Object> hresult;
if (!result->ToHandle(&hresult, isolate)) return result;
Handle<Object> new_value(GetPrototypeSkipHiddenPrototypes(isolate, *receiver),
isolate);
if (!new_value->SameValue(*old_value)) {
JSObject::EnqueueChangeRecord(receiver, "prototype",
isolate->factory()->proto_string(),
old_value);
}
return *hresult;
}
const AccessorDescriptor Accessors::ObjectPrototype = {
ObjectGetPrototype,
ObjectSetPrototype,
0
};
//
// Accessors::MakeModuleExport
//

7
deps/v8/src/accessors.h

@ -56,8 +56,7 @@ namespace internal {
V(ScriptContextData) \
V(ScriptEvalFromScript) \
V(ScriptEvalFromScriptPosition) \
V(ScriptEvalFromFunctionName) \
V(ObjectPrototype)
V(ScriptEvalFromFunctionName)
// Accessors contains all predefined proxy accessors.
@ -111,10 +110,6 @@ class Accessors : public AllStatic {
static MaybeObject* ScriptGetEvalFromScript(Object* object, void*);
static MaybeObject* ScriptGetEvalFromScriptPosition(Object* object, void*);
static MaybeObject* ScriptGetEvalFromFunctionName(Object* object, void*);
static MaybeObject* ObjectGetPrototype(Object* receiver, void*);
static MaybeObject* ObjectSetPrototype(JSObject* receiver,
Object* value,
void*);
// Helper functions.
static Object* FlattenNumber(Object* value);

6
deps/v8/src/allocation.cc

@ -28,8 +28,8 @@
#include "allocation.h"
#include <stdlib.h> // For free, malloc.
#include <string.h> // For memcpy.
#include "checks.h"
#include "platform.h"
#include "utils.h"
namespace v8 {
@ -85,7 +85,7 @@ void AllStatic::operator delete(void* p) {
char* StrDup(const char* str) {
int length = StrLength(str);
char* result = NewArray<char>(length + 1);
memcpy(result, str, length);
OS::MemCopy(result, str, length);
result[length] = '\0';
return result;
}
@ -95,7 +95,7 @@ char* StrNDup(const char* str, int n) {
int length = StrLength(str);
if (n < length) length = n;
char* result = NewArray<char>(length + 1);
memcpy(result, str, length);
OS::MemCopy(result, str, length);
result[length] = '\0';
return result;
}

237
deps/v8/src/api.cc

@ -1655,7 +1655,7 @@ void ObjectTemplate::SetInternalFieldCount(int value) {
ScriptData* ScriptData::PreCompile(const char* input, int length) {
i::Utf8ToUtf16CharacterStream stream(
reinterpret_cast<const unsigned char*>(input), length);
return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
return i::PreParserApi::PreParse(&stream);
}
@ -1664,10 +1664,10 @@ ScriptData* ScriptData::PreCompile(v8::Handle<String> source) {
if (str->IsExternalTwoByteString()) {
i::ExternalTwoByteStringUtf16CharacterStream stream(
i::Handle<i::ExternalTwoByteString>::cast(str), 0, str->length());
return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
return i::PreParserApi::PreParse(&stream);
} else {
i::GenericStringUtf16CharacterStream stream(str, 0, str->length());
return i::ParserApi::PreParse(&stream, NULL, i::FLAG_harmony_scoping);
return i::PreParserApi::PreParse(&stream);
}
}
@ -1686,7 +1686,8 @@ ScriptData* ScriptData::New(const char* data, int length) {
}
// Copy the data to align it.
unsigned* deserialized_data = i::NewArray<unsigned>(deserialized_data_length);
i::OS::MemCopy(deserialized_data, data, length);
i::CopyBytes(reinterpret_cast<char*>(deserialized_data),
data, static_cast<size_t>(length));
return new i::ScriptDataImpl(
i::Vector<unsigned>(deserialized_data, deserialized_data_length));
@ -1852,6 +1853,34 @@ Local<Value> Script::Id() {
}
int Script::GetLineNumber(int code_pos) {
i::Isolate* isolate = i::Isolate::Current();
ON_BAILOUT(isolate, "v8::Script::GetLineNumber()", return -1);
LOG_API(isolate, "Script::GetLineNumber");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsScript()) {
i::Handle<i::Script> script = i::Handle<i::Script>(i::Script::cast(*obj));
return i::GetScriptLineNumber(script, code_pos);
} else {
return -1;
}
}
Handle<Value> Script::GetScriptName() {
i::Isolate* isolate = i::Isolate::Current();
ON_BAILOUT(isolate, "v8::Script::GetName()", return Handle<String>());
LOG_API(isolate, "Script::GetName");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
if (obj->IsScript()) {
i::Object* name = i::Script::cast(*obj)->name();
return Utils::ToLocal(i::Handle<i::Object>(name, isolate));
} else {
return Handle<String>();
}
}
void Script::SetData(v8::Handle<String> data) {
i::Isolate* isolate = i::Isolate::Current();
ON_BAILOUT(isolate, "v8::Script::SetData()", return);
@ -2367,6 +2396,12 @@ bool Value::FullIsString() const {
}
bool Value::IsSymbol() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsSymbol()")) return false;
return Utils::OpenHandle(this)->IsSymbol();
}
bool Value::IsArray() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Value::IsArray()")) return false;
return Utils::OpenHandle(this)->IsJSArray();
@ -2451,6 +2486,16 @@ bool Value::IsStringObject() const {
}
bool Value::IsSymbolObject() const {
// TODO(svenpanne): these and other test functions should be written such
// that they do not use Isolate::Current().
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Value::IsSymbolObject()")) return false;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
return obj->HasSpecificClassOf(isolate->heap()->Symbol_string());
}
bool Value::IsNumberObject() const {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::Value::IsNumberObject()")) return false;
@ -2664,6 +2709,15 @@ void v8::String::CheckCast(v8::Value* that) {
}
void v8::Symbol::CheckCast(v8::Value* that) {
if (IsDeadCheck(i::Isolate::Current(), "v8::Symbol::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->IsSymbol(),
"v8::Symbol::Cast()",
"Could not convert to symbol");
}
void v8::Number::CheckCast(v8::Value* that) {
if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
@ -2711,6 +2765,16 @@ void v8::StringObject::CheckCast(v8::Value* that) {
}
void v8::SymbolObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::SymbolObject::Cast()")) return;
i::Handle<i::Object> obj = Utils::OpenHandle(that);
ApiCheck(obj->HasSpecificClassOf(isolate->heap()->Symbol_string()),
"v8::SymbolObject::Cast()",
"Could not convert to SymbolObject");
}
void v8::NumberObject::CheckCast(v8::Value* that) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::NumberObject::Cast()")) return;
@ -3079,13 +3143,13 @@ PropertyAttribute v8::Object::GetPropertyAttributes(v8::Handle<Value> key) {
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
if (!key_obj->IsString()) {
if (!key_obj->IsName()) {
EXCEPTION_PREAMBLE(isolate);
key_obj = i::Execution::ToString(key_obj, &has_pending_exception);
EXCEPTION_BAILOUT_CHECK(isolate, static_cast<PropertyAttribute>(NONE));
}
i::Handle<i::String> key_string = i::Handle<i::String>::cast(key_obj);
PropertyAttributes result = self->GetPropertyAttribute(*key_string);
i::Handle<i::Name> key_name = i::Handle<i::Name>::cast(key_obj);
PropertyAttributes result = self->GetPropertyAttribute(*key_name);
if (result == ABSENT) return static_cast<PropertyAttribute>(NONE);
return static_cast<PropertyAttribute>(result);
}
@ -3215,7 +3279,7 @@ Local<String> v8::Object::ObjectProtoToString() {
// Write prefix.
char* ptr = buf.start();
memcpy(ptr, prefix, prefix_len * v8::internal::kCharSize);
i::OS::MemCopy(ptr, prefix, prefix_len * v8::internal::kCharSize);
ptr += prefix_len;
// Write real content.
@ -3223,7 +3287,7 @@ Local<String> v8::Object::ObjectProtoToString() {
ptr += str_len;
// Write postfix.
memcpy(ptr, postfix, postfix_len * v8::internal::kCharSize);
i::OS::MemCopy(ptr, postfix, postfix_len * v8::internal::kCharSize);
// Copy the buffer into a heap-allocated string and return it.
Local<String> result = v8::String::New(buf.start(), buf_len);
@ -3255,24 +3319,32 @@ Local<String> v8::Object::GetConstructorName() {
}
bool v8::Object::Delete(v8::Handle<String> key) {
bool v8::Object::Delete(v8::Handle<Value> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::Delete()", return false);
ENTER_V8(isolate);
i::HandleScope scope(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
return i::JSObject::DeleteProperty(self, key_obj)->IsTrue();
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj = i::DeleteProperty(self, key_obj);
has_pending_exception = obj.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, false);
return obj->IsTrue();
}
bool v8::Object::Has(v8::Handle<String> key) {
bool v8::Object::Has(v8::Handle<Value> key) {
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::Has()", return false);
ENTER_V8(isolate);
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::String> key_obj = Utils::OpenHandle(*key);
return self->HasProperty(*key_obj);
i::Handle<i::JSReceiver> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
EXCEPTION_PREAMBLE(isolate);
i::Handle<i::Object> obj = i::HasProperty(self, key_obj);
has_pending_exception = obj.is_null();
EXCEPTION_BAILOUT_CHECK(isolate, false);
return obj->IsTrue();
}
@ -3352,15 +3424,16 @@ bool v8::Object::HasRealNamedProperty(Handle<String> key) {
ON_BAILOUT(isolate, "v8::Object::HasRealNamedProperty()",
return false);
return Utils::OpenHandle(this)->HasRealNamedProperty(
isolate,
*Utils::OpenHandle(*key));
}
bool v8::Object::HasRealIndexedProperty(uint32_t index) {
ON_BAILOUT(Utils::OpenHandle(this)->GetIsolate(),
"v8::Object::HasRealIndexedProperty()",
i::Isolate* isolate = Utils::OpenHandle(this)->GetIsolate();
ON_BAILOUT(isolate, "v8::Object::HasRealIndexedProperty()",
return false);
return Utils::OpenHandle(this)->HasRealElementProperty(index);
return Utils::OpenHandle(this)->HasRealElementProperty(isolate, index);
}
@ -3371,6 +3444,7 @@ bool v8::Object::HasRealNamedCallbackProperty(Handle<String> key) {
return false);
ENTER_V8(isolate);
return Utils::OpenHandle(this)->HasRealNamedCallbackProperty(
isolate,
*Utils::OpenHandle(*key));
}
@ -4591,6 +4665,15 @@ const v8::String::ExternalAsciiStringResource*
}
Local<Value> Symbol::Name() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Symbol::Name()"))
return Local<Value>();
i::Handle<i::Symbol> sym = Utils::OpenHandle(this);
i::Handle<i::Object> name(sym->name(), sym->GetIsolate());
return Utils::ToLocal(name);
}
double Number::Value() const {
if (IsDeadCheck(i::Isolate::Current(), "v8::Number::Value()")) return 0;
i::Handle<i::Object> obj = Utils::OpenHandle(this);
@ -4861,18 +4944,14 @@ static i::Handle<i::FunctionTemplateInfo>
}
Persistent<Context> v8::Context::New(
static i::Handle<i::Context> CreateEnvironment(
i::Isolate* isolate,
v8::ExtensionConfiguration* extensions,
v8::Handle<ObjectTemplate> global_template,
v8::Handle<Value> global_object) {
i::Isolate::EnsureDefaultIsolate();
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Context::New()");
LOG_API(isolate, "Context::New");
ON_BAILOUT(isolate, "v8::Context::New()", return Persistent<Context>());
i::Handle<i::Context> env;
// Enter V8 via an ENTER_V8 scope.
i::Handle<i::Context> env;
{
ENTER_V8(isolate);
v8::Handle<ObjectTemplate> proxy_template = global_template;
@ -4927,10 +5006,43 @@ Persistent<Context> v8::Context::New(
}
// Leave V8.
if (env.is_null()) {
return Persistent<Context>();
}
return Persistent<Context>(Utils::ToLocal(env));
return env;
}
Persistent<Context> v8::Context::New(
v8::ExtensionConfiguration* extensions,
v8::Handle<ObjectTemplate> global_template,
v8::Handle<Value> global_object) {
i::Isolate::EnsureDefaultIsolate();
i::Isolate* isolate = i::Isolate::Current();
Isolate* external_isolate = reinterpret_cast<Isolate*>(isolate);
EnsureInitializedForIsolate(isolate, "v8::Context::New()");
LOG_API(isolate, "Context::New");
ON_BAILOUT(isolate, "v8::Context::New()", return Persistent<Context>());
i::HandleScope scope(isolate);
i::Handle<i::Context> env =
CreateEnvironment(isolate, extensions, global_template, global_object);
if (env.is_null()) return Persistent<Context>();
return Persistent<Context>::New(external_isolate, Utils::ToLocal(env));
}
Local<Context> v8::Context::New(
v8::Isolate* external_isolate,
v8::ExtensionConfiguration* extensions,
v8::Handle<ObjectTemplate> global_template,
v8::Handle<Value> global_object) {
i::Isolate::EnsureDefaultIsolate();
i::Isolate* isolate = reinterpret_cast<i::Isolate*>(external_isolate);
EnsureInitializedForIsolate(isolate, "v8::Context::New()");
LOG_API(isolate, "Context::New");
ON_BAILOUT(isolate, "v8::Context::New()", return Local<Context>());
i::HandleScope scope(isolate);
i::Handle<i::Context> env =
CreateEnvironment(isolate, extensions, global_template, global_object);
if (env.is_null()) return Local<Context>();
return Utils::ToLocal(scope.CloseAndEscape(env));
}
@ -5005,10 +5117,7 @@ v8::Local<v8::Context> Context::GetCurrent() {
if (IsDeadCheck(isolate, "v8::Context::GetCurrent()")) {
return Local<Context>();
}
i::Handle<i::Object> current = isolate->native_context();
if (current.is_null()) return Local<Context>();
i::Handle<i::Context> context = i::Handle<i::Context>::cast(current);
return Utils::ToLocal(context);
return reinterpret_cast<Isolate*>(isolate)->GetCurrentContext();
}
@ -5429,6 +5538,29 @@ Local<v8::String> v8::StringObject::StringValue() const {
}
Local<v8::Value> v8::SymbolObject::New(Isolate* isolate, Handle<Symbol> value) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
EnsureInitializedForIsolate(i_isolate, "v8::SymbolObject::New()");
LOG_API(i_isolate, "SymbolObject::New");
ENTER_V8(i_isolate);
i::Handle<i::Object> obj =
i_isolate->factory()->ToObject(Utils::OpenHandle(*value));
return Utils::ToLocal(obj);
}
Local<v8::Symbol> v8::SymbolObject::SymbolValue() const {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::SymbolObject::SymbolValue()"))
return Local<v8::Symbol>();
LOG_API(isolate, "SymbolObject::SymbolValue");
i::Handle<i::Object> obj = Utils::OpenHandle(this);
i::Handle<i::JSValue> jsvalue = i::Handle<i::JSValue>::cast(obj);
return Utils::ToLocal(
i::Handle<i::Symbol>(i::Symbol::cast(jsvalue->value())));
}
Local<v8::Value> v8::Date::New(double time) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Date::New()");
@ -5610,6 +5742,30 @@ Local<String> v8::String::NewSymbol(const char* data, int length) {
}
Local<Symbol> v8::Symbol::New(Isolate* isolate) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
LOG_API(i_isolate, "Symbol::New()");
ENTER_V8(i_isolate);
i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
return Utils::ToLocal(result);
}
Local<Symbol> v8::Symbol::New(Isolate* isolate, const char* data, int length) {
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
EnsureInitializedForIsolate(i_isolate, "v8::Symbol::New()");
LOG_API(i_isolate, "Symbol::New(char)");
ENTER_V8(i_isolate);
if (length == -1) length = i::StrLength(data);
i::Handle<i::String> name = i_isolate->factory()->NewStringFromUtf8(
i::Vector<const char>(data, length));
i::Handle<i::Symbol> result = i_isolate->factory()->NewSymbol();
result->set_name(*name);
return Utils::ToLocal(result);
}
Local<Number> v8::Number::New(double value) {
i::Isolate* isolate = i::Isolate::Current();
EnsureInitializedForIsolate(isolate, "v8::Number::New()");
@ -5813,6 +5969,15 @@ CpuProfiler* Isolate::GetCpuProfiler() {
}
v8::Local<v8::Context> Isolate::GetCurrentContext() {
i::Isolate* internal_isolate = reinterpret_cast<i::Isolate*>(this);
i::Handle<i::Object> current = internal_isolate->native_context();
if (current.is_null()) return Local<Context>();
i::Handle<i::Context> context = i::Handle<i::Context>::cast(current);
return Utils::ToLocal(context);
}
void V8::SetGlobalGCPrologueCallback(GCCallback callback) {
i::Isolate* isolate = i::Isolate::Current();
if (IsDeadCheck(isolate, "v8::V8::SetGlobalGCPrologueCallback()")) return;
@ -7086,7 +7251,7 @@ char* HandleScopeImplementer::ArchiveThread(char* storage) {
v8::ImplementationUtilities::HandleScopeData* current =
isolate_->handle_scope_data();
handle_scope_data_ = *current;
memcpy(storage, this, sizeof(*this));
OS::MemCopy(storage, this, sizeof(*this));
ResetAfterArchive();
current->Initialize();
@ -7101,7 +7266,7 @@ int HandleScopeImplementer::ArchiveSpacePerThread() {
char* HandleScopeImplementer::RestoreThread(char* storage) {
memcpy(this, storage, sizeof(*this));
OS::MemCopy(this, storage, sizeof(*this));
*isolate_->handle_scope_data() = handle_scope_data_;
return storage + ArchiveSpacePerThread();
}

4
deps/v8/src/api.h

@ -171,6 +171,7 @@ class RegisteredExtension {
V(Object, JSObject) \
V(Array, JSArray) \
V(String, String) \
V(Symbol, Symbol) \
V(Script, Object) \
V(Function, JSFunction) \
V(Message, JSObject) \
@ -196,6 +197,8 @@ class Utils {
v8::internal::Handle<v8::internal::JSFunction> obj);
static inline Local<String> ToLocal(
v8::internal::Handle<v8::internal::String> obj);
static inline Local<Symbol> ToLocal(
v8::internal::Handle<v8::internal::Symbol> obj);
static inline Local<RegExp> ToLocal(
v8::internal::Handle<v8::internal::JSRegExp> obj);
static inline Local<Object> ToLocal(
@ -268,6 +271,7 @@ MAKE_TO_LOCAL(ToLocal, Context, Context)
MAKE_TO_LOCAL(ToLocal, Object, Value)
MAKE_TO_LOCAL(ToLocal, JSFunction, Function)
MAKE_TO_LOCAL(ToLocal, String, String)
MAKE_TO_LOCAL(ToLocal, Symbol, Symbol)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)

21
deps/v8/src/arguments.h

@ -115,15 +115,18 @@ class CustomArguments : public Relocatable {
#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
Type Name(Arguments args, Isolate* isolate)
#define RUNTIME_FUNCTION(Type, Name) \
Type Name(Arguments args, Isolate* isolate)
#define RUNTIME_ARGUMENTS(isolate, args) args, isolate
Type Name(int args_length, Object** args_object, Isolate* isolate)
#define RUNTIME_FUNCTION(Type, Name) \
static Type __RT_impl_##Name(Arguments args, Isolate* isolate); \
Type Name(int args_length, Object** args_object, Isolate* isolate) { \
Arguments args(args_length, args_object); \
return __RT_impl_##Name(args, isolate); \
} \
static Type __RT_impl_##Name(Arguments args, Isolate* isolate)
#define RUNTIME_ARGUMENTS(isolate, args) \
args.length(), args.arguments(), isolate
} } // namespace v8::internal

18
deps/v8/src/arm/assembler-arm-inl.h

@ -48,29 +48,17 @@ namespace internal {
int Register::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(VFP2)) {
return kMaxNumAllocatableRegisters;
} else {
return kMaxNumAllocatableRegisters - kGPRsPerNonVFP2Double;
}
return kMaxNumAllocatableRegisters;
}
int DwVfpRegister::NumRegisters() {
if (CpuFeatures::IsSupported(VFP2)) {
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
} else {
return 1;
}
return CpuFeatures::IsSupported(VFP32DREGS) ? 32 : 16;
}
int DwVfpRegister::NumAllocatableRegisters() {
if (CpuFeatures::IsSupported(VFP2)) {
return NumRegisters() - kNumReservedRegisters;
} else {
return 1;
}
return NumRegisters() - kNumReservedRegisters;
}

202
deps/v8/src/arm/assembler-arm.cc

@ -63,29 +63,21 @@ ExternalReference ExternalReference::cpu_features() {
static unsigned CpuFeaturesImpliedByCompiler() {
unsigned answer = 0;
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
answer |= 1u << ARMv7;
if (FLAG_enable_armv7) {
answer |= 1u << ARMv7;
}
#endif // CAN_USE_ARMV7_INSTRUCTIONS
#ifdef CAN_USE_VFP3_INSTRUCTIONS
answer |= 1u << VFP3 | 1u << VFP2 | 1u << ARMv7;
if (FLAG_enable_vfp3) {
answer |= 1u << VFP3 | 1u << ARMv7;
}
#endif // CAN_USE_VFP3_INSTRUCTIONS
#ifdef CAN_USE_VFP2_INSTRUCTIONS
answer |= 1u << VFP2;
#endif // CAN_USE_VFP2_INSTRUCTIONS
#ifdef CAN_USE_VFP32DREGS
answer |= 1u << VFP32DREGS;
if (FLAG_enable_32dregs) {
answer |= 1u << VFP32DREGS;
}
#endif // CAN_USE_VFP32DREGS
#ifdef __arm__
// If the compiler is allowed to use VFP then we can use VFP too in our code
// generation even when generating snapshots. ARMv7 and hardware floating
// point support implies VFPv3, see ARM DDI 0406B, page A1-6.
#if defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__) \
&& !defined(__SOFTFP__)
answer |= 1u << VFP3 | 1u << ARMv7 | 1u << VFP2;
#endif // defined(CAN_USE_ARMV7_INSTRUCTIONS) && defined(__VFP_FP__)
// && !defined(__SOFTFP__)
#endif // _arm__
if (answer & (1u << ARMv7)) {
if ((answer & (1u << ARMv7)) && FLAG_enable_unaligned_accesses) {
answer |= 1u << UNALIGNED_ACCESSES;
}
@ -94,18 +86,13 @@ static unsigned CpuFeaturesImpliedByCompiler() {
const char* DwVfpRegister::AllocationIndexToString(int index) {
if (CpuFeatures::IsSupported(VFP2)) {
ASSERT(index >= 0 && index < NumAllocatableRegisters());
ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
kNumReservedRegisters - 1);
if (index >= kDoubleRegZero.code())
index += kNumReservedRegisters;
return VFPRegisters::Name(index, true);
} else {
ASSERT(index == 0);
return "sfpd0";
}
ASSERT(index >= 0 && index < NumAllocatableRegisters());
ASSERT(kScratchDoubleReg.code() - kDoubleRegZero.code() ==
kNumReservedRegisters - 1);
if (index >= kDoubleRegZero.code())
index += kNumReservedRegisters;
return VFPRegisters::Name(index, true);
}
@ -124,6 +111,8 @@ void CpuFeatures::Probe() {
if (Serializer::enabled()) {
// No probing for features if we might serialize (generate snapshot).
printf(" ");
PrintFeatures();
return;
}
@ -133,8 +122,7 @@ void CpuFeatures::Probe() {
if (FLAG_enable_vfp3) {
supported_ |=
static_cast<uint64_t>(1) << VFP3 |
static_cast<uint64_t>(1) << ARMv7 |
static_cast<uint64_t>(1) << VFP2;
static_cast<uint64_t>(1) << ARMv7;
}
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
if (FLAG_enable_armv7) {
@ -153,48 +141,127 @@ void CpuFeatures::Probe() {
supported_ |= static_cast<uint64_t>(1) << VFP32DREGS;
}
if (FLAG_enable_unaligned_accesses) {
supported_ |= static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
}
#else // __arm__
// Probe for additional features not already known to be available.
if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
if (!IsSupported(VFP3) && FLAG_enable_vfp3 && OS::ArmCpuHasFeature(VFP3)) {
// This implementation also sets the VFP flags if runtime
// detection of VFP returns true. VFPv3 implies ARMv7 and VFP2, see ARM DDI
// detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
// 0406B, page A1-6.
found_by_runtime_probing_only_ |=
static_cast<uint64_t>(1) << VFP3 |
static_cast<uint64_t>(1) << ARMv7 |
static_cast<uint64_t>(1) << VFP2;
} else if (!IsSupported(VFP2) && OS::ArmCpuHasFeature(VFP2)) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP2;
static_cast<uint64_t>(1) << ARMv7;
}
if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
if (!IsSupported(ARMv7) && FLAG_enable_armv7 && OS::ArmCpuHasFeature(ARMv7)) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << ARMv7;
}
if (!IsSupported(SUDIV) && OS::ArmCpuHasFeature(SUDIV)) {
if (!IsSupported(SUDIV) && FLAG_enable_sudiv && OS::ArmCpuHasFeature(SUDIV)) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << SUDIV;
}
if (!IsSupported(UNALIGNED_ACCESSES) && OS::ArmCpuHasFeature(ARMv7)) {
if (!IsSupported(UNALIGNED_ACCESSES) && FLAG_enable_unaligned_accesses
&& OS::ArmCpuHasFeature(ARMv7)) {
found_by_runtime_probing_only_ |=
static_cast<uint64_t>(1) << UNALIGNED_ACCESSES;
}
if (OS::GetCpuImplementer() == QUALCOMM_IMPLEMENTER &&
OS::ArmCpuHasFeature(ARMv7)) {
FLAG_enable_movw_movt && OS::ArmCpuHasFeature(ARMv7)) {
found_by_runtime_probing_only_ |=
static_cast<uint64_t>(1) << MOVW_MOVT_IMMEDIATE_LOADS;
}
if (!IsSupported(VFP32DREGS) && OS::ArmCpuHasFeature(VFP32DREGS)) {
if (!IsSupported(VFP32DREGS) && FLAG_enable_32dregs
&& OS::ArmCpuHasFeature(VFP32DREGS)) {
found_by_runtime_probing_only_ |= static_cast<uint64_t>(1) << VFP32DREGS;
}
supported_ |= found_by_runtime_probing_only_;
#endif
// Assert that VFP3 implies VFP2 and ARMv7.
ASSERT(!IsSupported(VFP3) || (IsSupported(VFP2) && IsSupported(ARMv7)));
// Assert that VFP3 implies ARMv7.
ASSERT(!IsSupported(VFP3) || IsSupported(ARMv7));
}
void CpuFeatures::PrintTarget() {
const char* arm_arch = NULL;
const char* arm_test = "";
const char* arm_fpu = "";
const char* arm_thumb = "";
const char* arm_float_abi = NULL;
#if defined CAN_USE_ARMV7_INSTRUCTIONS
arm_arch = "arm v7";
#else
arm_arch = "arm v6";
#endif
#ifdef __arm__
# ifdef ARM_TEST
arm_test = " test";
# endif
# if defined __ARM_NEON__
arm_fpu = " neon";
# elif defined CAN_USE_VFP3_INSTRUCTIONS
arm_fpu = " vfp3";
# else
arm_fpu = " vfp2";
# endif
# if (defined __thumb__) || (defined __thumb2__)
arm_thumb = " thumb";
# endif
arm_float_abi = OS::ArmUsingHardFloat() ? "hard" : "softfp";
#else // __arm__
arm_test = " simulator";
# if defined CAN_USE_VFP3_INSTRUCTIONS
# if defined CAN_USE_VFP32DREGS
arm_fpu = " vfp3";
# else
arm_fpu = " vfp3-d16";
# endif
# else
arm_fpu = " vfp2";
# endif
# if USE_EABI_HARDFLOAT == 1
arm_float_abi = "hard";
# else
arm_float_abi = "softfp";
# endif
#endif // __arm__
printf("target%s %s%s%s %s\n",
arm_test, arm_arch, arm_fpu, arm_thumb, arm_float_abi);
}
void CpuFeatures::PrintFeatures() {
printf(
"ARMv7=%d VFP3=%d VFP32DREGS=%d SUDIV=%d UNALIGNED_ACCESSES=%d "
"MOVW_MOVT_IMMEDIATE_LOADS=%d",
CpuFeatures::IsSupported(ARMv7),
CpuFeatures::IsSupported(VFP3),
CpuFeatures::IsSupported(VFP32DREGS),
CpuFeatures::IsSupported(SUDIV),
CpuFeatures::IsSupported(UNALIGNED_ACCESSES),
CpuFeatures::IsSupported(MOVW_MOVT_IMMEDIATE_LOADS));
#ifdef __arm__
bool eabi_hardfloat = OS::ArmUsingHardFloat();
#elif USE_EABI_HARDFLOAT
bool eabi_hardfloat = true;
#else
bool eabi_hardfloat = false;
#endif
printf(" USE_EABI_HARDFLOAT=%d\n", eabi_hardfloat);
}
@ -1763,7 +1830,6 @@ void Assembler::vldr(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-924.
// cond(31-28) | 1101(27-24)| U(23) | D(22) | 01(21-20) | Rbase(19-16) |
// Vd(15-12) | 1011(11-8) | offset
ASSERT(IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@ -1807,7 +1873,6 @@ void Assembler::vldr(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-628.
// cond(31-28) | 1101(27-24)| U001(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | offset
ASSERT(IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@ -1851,7 +1916,6 @@ void Assembler::vstr(const DwVfpRegister src,
// Instruction details available in ARM DDI 0406C.b, A8-1082.
// cond(31-28) | 1101(27-24)| U(23) | D(22) | 00(21-20) | Rbase(19-16) |
// Vd(15-12) | 1011(11-8) | (offset/4)
ASSERT(IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@ -1895,7 +1959,6 @@ void Assembler::vstr(const SwVfpRegister src,
// Instruction details available in ARM DDI 0406A, A8-786.
// cond(31-28) | 1101(27-24)| U000(23-20) | Rbase(19-16) |
// Vdst(15-12) | 1010(11-8) | (offset/4)
ASSERT(IsEnabled(VFP2));
int u = 1;
if (offset < 0) {
offset = -offset;
@ -1938,7 +2001,6 @@ void Assembler::vldm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406C.b, A8-922.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2)
ASSERT(IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@ -1960,7 +2022,6 @@ void Assembler::vstm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406C.b, A8-1080.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2)
ASSERT(IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@ -1981,7 +2042,6 @@ void Assembler::vldm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count/2)
ASSERT(IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@ -2002,7 +2062,6 @@ void Assembler::vstm(BlockAddrMode am,
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count/2)
ASSERT(IsEnabled(VFP2));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
@ -2016,7 +2075,7 @@ void Assembler::vstm(BlockAddrMode am,
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);
OS::MemCopy(&i, &d, 8);
*lo = i & 0xffffffff;
*hi = i >> 32;
@ -2076,8 +2135,6 @@ static bool FitsVMOVDoubleImmediate(double d, uint32_t *encoding) {
void Assembler::vmov(const DwVfpRegister dst,
double imm,
const Register scratch) {
ASSERT(IsEnabled(VFP2));
uint32_t enc;
if (CpuFeatures::IsSupported(VFP3) && FitsVMOVDoubleImmediate(imm, &enc)) {
// The double can be encoded in the instruction.
@ -2148,7 +2205,6 @@ void Assembler::vmov(const SwVfpRegister dst,
const Condition cond) {
// Sd = Sm
// Instruction details available in ARM DDI 0406B, A8-642.
ASSERT(IsEnabled(VFP2));
int sd, d, sm, m;
dst.split_code(&sd, &d);
src.split_code(&sm, &m);
@ -2163,7 +2219,6 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-938.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
// 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@ -2181,7 +2236,6 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-940.
// cond(31-28) | 1110(27-24) | 0(23) | opc1=0index(22-21) | 0(20) |
// Vd(19-16) | Rt(15-12) | 1011(11-8) | D(7) | opc2=00(6-5) | 1(4) | 0000(3-0)
ASSERT(IsEnabled(VFP2));
ASSERT(index.index == 0 || index.index == 1);
int vd, d;
dst.split_code(&vd, &d);
@ -2198,7 +2252,6 @@ void Assembler::vmov(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=0(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
ASSERT(IsEnabled(VFP2));
ASSERT(!src1.is(pc) && !src2.is(pc));
int vm, m;
dst.split_code(&vm, &m);
@ -2215,7 +2268,6 @@ void Assembler::vmov(const Register dst1,
// Instruction details available in ARM DDI 0406C.b, A8-948.
// cond(31-28) | 1100(27-24)| 010(23-21) | op=1(20) | Rt2(19-16) |
// Rt(15-12) | 1011(11-8) | 00(7-6) | M(5) | 1(4) | Vm
ASSERT(IsEnabled(VFP2));
ASSERT(!dst1.is(pc) && !dst2.is(pc));
int vm, m;
src.split_code(&vm, &m);
@ -2231,7 +2283,6 @@ void Assembler::vmov(const SwVfpRegister dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=0(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(IsEnabled(VFP2));
ASSERT(!src.is(pc));
int sn, n;
dst.split_code(&sn, &n);
@ -2246,7 +2297,6 @@ void Assembler::vmov(const Register dst,
// Instruction details available in ARM DDI 0406A, A8-642.
// cond(31-28) | 1110(27-24)| 000(23-21) | op=1(20) | Vn(19-16) |
// Rt(15-12) | 1010(11-8) | N(7)=0 | 00(6-5) | 1(4) | 0000(3-0)
ASSERT(IsEnabled(VFP2));
ASSERT(!dst.is(pc));
int sn, n;
src.split_code(&sn, &n);
@ -2371,7 +2421,6 @@ void Assembler::vcvt_f64_s32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), S32, src.code(), mode, cond));
}
@ -2380,7 +2429,6 @@ void Assembler::vcvt_f32_s32(const SwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(F32, dst.code(), S32, src.code(), mode, cond));
}
@ -2389,7 +2437,6 @@ void Assembler::vcvt_f64_u32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), U32, src.code(), mode, cond));
}
@ -2398,7 +2445,6 @@ void Assembler::vcvt_s32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(S32, dst.code(), F64, src.code(), mode, cond));
}
@ -2407,7 +2453,6 @@ void Assembler::vcvt_u32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(U32, dst.code(), F64, src.code(), mode, cond));
}
@ -2416,7 +2461,6 @@ void Assembler::vcvt_f64_f32(const DwVfpRegister dst,
const SwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(F64, dst.code(), F32, src.code(), mode, cond));
}
@ -2425,7 +2469,6 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
const DwVfpRegister src,
VFPConversionMode mode,
const Condition cond) {
ASSERT(IsEnabled(VFP2));
emit(EncodeVCVT(F32, dst.code(), F64, src.code(), mode, cond));
}
@ -2436,7 +2479,6 @@ void Assembler::vneg(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-968.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0001(19-16) | Vd(15-12) |
// 101(11-9) | sz=1(8) | 0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@ -2453,7 +2495,6 @@ void Assembler::vabs(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-524.
// cond(31-28) | 11101(27-23) | D(22) | 11(21-20) | 0000(19-16) | Vd(15-12) |
// 101(11-9) | sz=1(8) | 1(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@ -2472,7 +2513,6 @@ void Assembler::vadd(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-830.
// cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@ -2493,7 +2533,6 @@ void Assembler::vsub(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-1086.
// cond(31-28) | 11100(27-23)| D(22) | 11(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@ -2514,7 +2553,6 @@ void Assembler::vmul(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-960.
// cond(31-28) | 11100(27-23)| D(22) | 10(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@ -2571,7 +2609,6 @@ void Assembler::vdiv(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-882.
// cond(31-28) | 11101(27-23)| D(22) | 00(21-20) | Vn(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | N(7) | 0(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vn, n;
@ -2590,7 +2627,6 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406C.b, A8-864.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0100(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
src1.split_code(&vd, &d);
int vm, m;
@ -2607,7 +2643,6 @@ void Assembler::vcmp(const DwVfpRegister src1,
// Instruction details available in ARM DDI 0406C.b, A8-864.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0101(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | E=0(7) | 1(6) | 0(5) | 0(4) | 0000(3-0)
ASSERT(IsEnabled(VFP2));
ASSERT(src2 == 0.0);
int vd, d;
src1.split_code(&vd, &d);
@ -2619,7 +2654,6 @@ void Assembler::vmsr(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1110(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
ASSERT(IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xE*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@ -2629,7 +2663,6 @@ void Assembler::vmrs(Register dst, Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-652.
// cond(31-28) | 1110 (27-24) | 1111(23-20)| 0001 (19-16) |
// Rt(15-12) | 1010 (11-8) | 0(7) | 00 (6-5) | 1(4) | 0000(3-0)
ASSERT(IsEnabled(VFP2));
emit(cond | 0xE*B24 | 0xF*B20 | B16 |
dst.code()*B12 | 0xA*B8 | B4);
}
@ -2641,7 +2674,6 @@ void Assembler::vsqrt(const DwVfpRegister dst,
// Instruction details available in ARM DDI 0406C.b, A8-1058.
// cond(31-28) | 11101(27-23)| D(22) | 11(21-20) | 0001(19-16) |
// Vd(15-12) | 101(11-9) | sz=1(8) | 11(7-6) | M(5) | 0(4) | Vm(3-0)
ASSERT(IsEnabled(VFP2));
int vd, d;
dst.split_code(&vd, &d);
int vm, m;
@ -2747,9 +2779,9 @@ void Assembler::GrowBuffer() {
// Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
memmove(desc.buffer, buffer_, desc.instr_size);
memmove(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.pos(), desc.reloc_size);
OS::MemMove(desc.buffer, buffer_, desc.instr_size);
OS::MemMove(reloc_info_writer.pos() + rc_delta,
reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
DeleteArray(buffer_);
@ -2998,7 +3030,7 @@ void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
const double double_data = rinfo.data64();
uint64_t uint_data = 0;
memcpy(&uint_data, &double_data, sizeof(double_data));
OS::MemCopy(&uint_data, &double_data, sizeof(double_data));
emit(uint_data & 0xFFFFFFFF);
emit(uint_data >> 32);
}

19
deps/v8/src/arm/assembler-arm.h

@ -55,16 +55,15 @@ class CpuFeatures : public AllStatic {
// is enabled (snapshots must be portable).
static void Probe();
// Display target use when compiling.
static void PrintTarget();
// Display features.
static void PrintFeatures();
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
if (f == VFP3 && !FLAG_enable_vfp3) return false;
if (f == VFP2 && !FLAG_enable_vfp2) return false;
if (f == SUDIV && !FLAG_enable_sudiv) return false;
if (f == UNALIGNED_ACCESSES && !FLAG_enable_unaligned_accesses) {
return false;
}
if (f == VFP32DREGS && !FLAG_enable_32dregs) return false;
return (supported_ & (1u << f)) != 0;
}
@ -117,7 +116,6 @@ struct Register {
static const int kNumRegisters = 16;
static const int kMaxNumAllocatableRegisters = 8;
static const int kSizeInBytes = 4;
static const int kGPRsPerNonVFP2Double = 2;
inline static int NumAllocatableRegisters();
@ -214,6 +212,7 @@ const Register pc = { kRegister_pc_Code };
// Single word VFP register.
struct SwVfpRegister {
static const int kSizeInBytes = 4;
bool is_valid() const { return 0 <= code_ && code_ < 32; }
bool is(SwVfpRegister reg) const { return code_ == reg.code_; }
int code() const {
@ -244,6 +243,7 @@ struct DwVfpRegister {
static const int kNumReservedRegisters = 2;
static const int kMaxNumAllocatableRegisters = kMaxNumRegisters -
kNumReservedRegisters;
static const int kSizeInBytes = 8;
// Note: the number of registers can be different at snapshot and run-time.
// Any code included in the snapshot must be able to run both with 16 or 32
@ -370,9 +370,6 @@ const DwVfpRegister d29 = { 29 };
const DwVfpRegister d30 = { 30 };
const DwVfpRegister d31 = { 31 };
const Register sfpd_lo = { kRegister_r6_Code };
const Register sfpd_hi = { kRegister_r7_Code };
// Aliases for double registers. Defined using #define instead of
// "static const DwVfpRegister&" because Clang complains otherwise when a
// compilation unit that includes this header doesn't use the variables.

1112
deps/v8/src/arm/code-stubs-arm.cc

File diff suppressed because it is too large

6
deps/v8/src/arm/code-stubs-arm.h

@ -61,11 +61,11 @@ class TranscendentalCacheStub: public PlatformCodeStub {
class StoreBufferOverflowStub: public PlatformCodeStub {
public:
explicit StoreBufferOverflowStub(SaveFPRegsMode save_fp)
: save_doubles_(save_fp) { }
: save_doubles_(save_fp) {}
void Generate(MacroAssembler* masm);
virtual bool IsPregenerated();
virtual bool IsPregenerated() { return true; }
static void GenerateFixedRegStubsAheadOfTime(Isolate* isolate);
virtual bool SometimesSetsUpAFrame() { return false; }
@ -471,7 +471,6 @@ class RecordWriteStub: public PlatformCodeStub {
if (mode == kSaveFPRegs) {
// Number of d-regs not known at snapshot time.
ASSERT(!Serializer::enabled());
CpuFeatureScope scope(masm, VFP2);
masm->sub(sp,
sp,
Operand(kDoubleSize * (DwVfpRegister::NumRegisters() - 1)));
@ -489,7 +488,6 @@ class RecordWriteStub: public PlatformCodeStub {
if (mode == kSaveFPRegs) {
// Number of d-regs not known at snapshot time.
ASSERT(!Serializer::enabled());
CpuFeatureScope scope(masm, VFP2);
// Restore all VFP registers except d0.
// TODO(hans): We should probably restore d0 too. And maybe use vldm.
for (int i = DwVfpRegister::NumRegisters() - 1; i > 0; i--) {

26
deps/v8/src/arm/codegen-arm.cc

@ -62,7 +62,6 @@ double fast_exp_simulator(double x) {
UnaryMathFunction CreateExpFunction() {
if (!CpuFeatures::IsSupported(VFP2)) return &exp;
if (!FLAG_fast_math) return &exp;
size_t actual_size;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
@ -72,7 +71,6 @@ UnaryMathFunction CreateExpFunction() {
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
{
CpuFeatureScope use_vfp(&masm, VFP2);
DwVfpRegister input = d0;
DwVfpRegister result = d1;
DwVfpRegister double_scratch1 = d2;
@ -185,7 +183,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// -- r4 : scratch (elements)
// -----------------------------------
Label loop, entry, convert_hole, gc_required, only_change_map, done;
bool vfp2_supported = CpuFeatures::IsSupported(VFP2);
if (mode == TRACK_ALLOCATION_SITE) {
__ TestJSArrayForAllocationSiteInfo(r2, r4);
@ -248,7 +245,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// r5: kHoleNanUpper32
// r6: end of destination FixedDoubleArray, not tagged
// r7: begin of FixedDoubleArray element fields, not tagged
if (!vfp2_supported) __ Push(r1, r0);
__ b(&entry);
@ -276,23 +272,10 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ UntagAndJumpIfNotSmi(r9, r9, &convert_hole);
// Normal smi, convert to double and store.
if (vfp2_supported) {
CpuFeatureScope scope(masm, VFP2);
__ vmov(s0, r9);
__ vcvt_f64_s32(d0, s0);
__ vstr(d0, r7, 0);
__ add(r7, r7, Operand(8));
} else {
FloatingPointHelper::ConvertIntToDouble(masm,
r9,
FloatingPointHelper::kCoreRegisters,
d0,
r0,
r1,
lr,
s0);
__ Strd(r0, r1, MemOperand(r7, 8, PostIndex));
}
__ vmov(s0, r9);
__ vcvt_f64_s32(d0, s0);
__ vstr(d0, r7, 0);
__ add(r7, r7, Operand(8));
__ b(&entry);
// Hole found, store the-hole NaN.
@ -310,7 +293,6 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
__ cmp(r7, r6);
__ b(lt, &loop);
if (!vfp2_supported) __ Pop(r1, r0);
__ pop(lr);
__ bind(&done);
}

2
deps/v8/src/arm/constants-arm.cc

@ -51,7 +51,7 @@ double Instruction::DoubleImmedVmov() const {
uint64_t imm = high16 << 48;
double d;
memcpy(&d, &imm, 8);
OS::MemCopy(&d, &imm, 8);
return d;
}

19
deps/v8/src/arm/constants-arm.h

@ -44,21 +44,25 @@
defined(__ARM_ARCH_7R__) || \
defined(__ARM_ARCH_7__)
# define CAN_USE_ARMV7_INSTRUCTIONS 1
#ifndef CAN_USE_VFP3_INSTRUCTIONS
# define CAN_USE_VFP3_INSTRUCTIONS
#endif
#endif
#if defined(__ARM_ARCH_6__) || \
defined(__ARM_ARCH_6J__) || \
defined(__ARM_ARCH_6K__) || \
defined(__ARM_ARCH_6Z__) || \
#if defined(__ARM_ARCH_6__) || \
defined(__ARM_ARCH_6J__) || \
defined(__ARM_ARCH_6K__) || \
defined(__ARM_ARCH_6Z__) || \
defined(__ARM_ARCH_6ZK__) || \
defined(__ARM_ARCH_6T2__) || \
defined(CAN_USE_ARMV7_INSTRUCTIONS)
# define CAN_USE_ARMV6_INSTRUCTIONS 1
#endif
#if defined(__ARM_ARCH_5T__) || \
defined(__ARM_ARCH_5TE__) || \
defined(__ARM_ARCH_5TEJ__) || \
#if defined(__ARM_ARCH_5__) || \
defined(__ARM_ARCH_5T__) || \
defined(__ARM_ARCH_5TE__) || \
defined(__ARM_ARCH_5TEJ__) || \
defined(CAN_USE_ARMV6_INSTRUCTIONS)
# define CAN_USE_ARMV5_INSTRUCTIONS 1
# define CAN_USE_THUMB_INSTRUCTIONS 1
@ -403,6 +407,7 @@ const uint32_t kVFPOverflowExceptionBit = 1 << 2;
const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
const uint32_t kVFPInexactExceptionBit = 1 << 4;
const uint32_t kVFPFlushToZeroMask = 1 << 24;
const uint32_t kVFPDefaultNaNModeControlBit = 1 << 25;
const uint32_t kVFPNConditionFlagBit = 1 << 31;
const uint32_t kVFPZConditionFlagBit = 1 << 30;

204
deps/v8/src/arm/deoptimizer-arm.cc

@ -117,45 +117,39 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
static const int32_t kBranchBeforeInterrupt = 0x5a000004;
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
// The back edge bookkeeping code matches the pattern:
//
// <decrement profiling counter>
// 2a 00 00 01 bpl ok
// e5 9f c? ?? ldr ip, [pc, <stack guard address>]
// e1 2f ff 3c blx ip
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
// We patch the code to the following form:
//
// <decrement profiling counter>
// e1 a0 00 00 mov r0, r0 (NOP)
// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
// e1 2f ff 3c blx ip
// and overwrite the constant containing the
// address of the stack check stub.
// Replace conditional jump with NOP.
// The back edge bookkeeping code matches the pattern:
//
// <decrement profiling counter>
// 2a 00 00 01 bpl ok
// e5 9f c? ?? ldr ip, [pc, <interrupt stub address>]
// e1 2f ff 3c blx ip
// ok-label
//
// We patch the code to the following form:
//
// <decrement profiling counter>
// e1 a0 00 00 mov r0, r0 (NOP)
// e5 9f c? ?? ldr ip, [pc, <on-stack replacement address>]
// e1 2f ff 3c blx ip
// ok-label
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
ASSERT(!InterruptCodeIsPatched(unoptimized_code,
pc_after,
interrupt_code,
replacement_code));
static const int kInstrSize = Assembler::kInstrSize;
// Turn the jump into nops.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
patcher.masm()->nop();
// Replace the stack check address in the constant pool
// with the entry address of the replacement code.
uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
// Replace the call address.
uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
2 * kInstrSize) & 0xfff;
Address stack_check_address_pointer = pc_after + stack_check_address_offset;
ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
reinterpret_cast<uint32_t>(check_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
Address interrupt_address_pointer = pc_after + interrupt_address_offset;
Memory::uint32_at(interrupt_address_pointer) =
reinterpret_cast<uint32_t>(replacement_code->entry());
unoptimized_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
@ -163,34 +157,61 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
}
void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
ASSERT(InterruptCodeIsPatched(unoptimized_code,
pc_after,
interrupt_code,
replacement_code));
static const int kInstrSize = Assembler::kInstrSize;
// Restore the original jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
patcher.masm()->b(4 * kInstrSize, pl); // ok-label is 4 instructions later.
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
// Restore the original call address.
uint32_t interrupt_address_offset = Memory::uint16_at(pc_after -
2 * kInstrSize) & 0xfff;
Address interrupt_address_pointer = pc_after + interrupt_address_offset;
Memory::uint32_at(interrupt_address_pointer) =
reinterpret_cast<uint32_t>(interrupt_code->entry());
interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 2 * kInstrSize, interrupt_code);
}
#ifdef DEBUG
bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
Address pc_after,
Code* check_code,
Code* interrupt_code,
Code* replacement_code) {
const int kInstrSize = Assembler::kInstrSize;
static const int kInstrSize = Assembler::kInstrSize;
ASSERT(Memory::int32_at(pc_after - kInstrSize) == kBlxIp);
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
// Replace NOP with conditional jump.
CodePatcher patcher(pc_after - 3 * kInstrSize, 1);
patcher.masm()->b(+16, pl);
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
uint32_t interrupt_address_offset =
Memory::uint16_at(pc_after - 2 * kInstrSize) & 0xfff;
Address interrupt_address_pointer = pc_after + interrupt_address_offset;
// Replace the stack check address in the constant pool
// with the entry address of the replacement code.
uint32_t stack_check_address_offset = Memory::uint16_at(pc_after -
2 * kInstrSize) & 0xfff;
Address stack_check_address_pointer = pc_after + stack_check_address_offset;
ASSERT(Memory::uint32_at(stack_check_address_pointer) ==
reinterpret_cast<uint32_t>(replacement_code->entry()));
Memory::uint32_at(stack_check_address_pointer) =
reinterpret_cast<uint32_t>(check_code->entry());
check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, pc_after - 2 * kInstrSize, check_code);
if (Assembler::IsNop(Assembler::instr_at(pc_after - 3 * kInstrSize))) {
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
ASSERT(reinterpret_cast<uint32_t>(replacement_code->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
return true;
} else {
ASSERT(Assembler::IsLdrPcImmediateOffset(
Assembler::instr_at(pc_after - 2 * kInstrSize)));
ASSERT_EQ(kBranchBeforeInterrupt,
Memory::int32_at(pc_after - 3 * kInstrSize));
ASSERT(reinterpret_cast<uint32_t>(interrupt_code->entry()) ==
Memory::uint32_at(interrupt_address_pointer));
return false;
}
}
#endif // DEBUG
static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {
@ -594,23 +615,18 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize =
kDoubleSize * DwVfpRegister::kMaxNumAllocatableRegisters;
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
// Save all allocatable VFP registers before messing with them.
ASSERT(kDoubleRegZero.code() == 14);
ASSERT(kScratchDoubleReg.code() == 15);
// Save all allocatable VFP registers before messing with them.
ASSERT(kDoubleRegZero.code() == 14);
ASSERT(kScratchDoubleReg.code() == 15);
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip);
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip);
// Push registers d0-d13, and possibly d16-d31, on the stack.
// If d16-d31 are not pushed, decrease the stack pointer instead.
__ vstm(db_w, sp, d16, d31, ne);
__ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
__ vstm(db_w, sp, d0, d13);
} else {
__ sub(sp, sp, Operand(kDoubleRegsSize));
}
// Push registers d0-d13, and possibly d16-d31, on the stack.
// If d16-d31 are not pushed, decrease the stack pointer instead.
__ vstm(db_w, sp, d16, d31, ne);
__ sub(sp, sp, Operand(16 * kDoubleSize), LeaveCC, eq);
__ vstm(db_w, sp, d0, d13);
// Push all 16 registers (needed to populate FrameDescription::registers_).
// TODO(1588) Note that using pc with stm is deprecated, so we should perhaps
@ -669,17 +685,14 @@ void Deoptimizer::EntryGenerator::Generate() {
__ str(r2, MemOperand(r1, offset));
}
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
// Copy VFP registers to
// double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ vldr(d0, sp, src_offset);
__ vstr(d0, r1, dst_offset);
}
// Copy VFP registers to
// double_registers_[DoubleRegister::kMaxNumAllocatableRegisters]
int double_regs_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < DwVfpRegister::kMaxNumAllocatableRegisters; ++i) {
int dst_offset = i * kDoubleSize + double_regs_offset;
int src_offset = i * kDoubleSize + kNumberOfRegisters * kPointerSize;
__ vldr(d0, sp, src_offset);
__ vstr(d0, r1, dst_offset);
}
// Remove the bailout id, eventually return address, and the saved registers
@ -749,21 +762,18 @@ void Deoptimizer::EntryGenerator::Generate() {
__ cmp(r4, r1);
__ b(lt, &outer_push_loop);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip);
// Check CPU flags for number of registers, setting the Z condition flag.
__ CheckFor32DRegs(ip);
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
int src_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
if (i == kDoubleRegZero.code()) continue;
if (i == kScratchDoubleReg.code()) continue;
__ ldr(r1, MemOperand(r0, Deoptimizer::input_offset()));
int src_offset = FrameDescription::double_registers_offset();
for (int i = 0; i < DwVfpRegister::kMaxNumRegisters; ++i) {
if (i == kDoubleRegZero.code()) continue;
if (i == kScratchDoubleReg.code()) continue;
const DwVfpRegister reg = DwVfpRegister::from_code(i);
__ vldr(reg, r1, src_offset, i < 16 ? al : ne);
src_offset += kDoubleSize;
}
const DwVfpRegister reg = DwVfpRegister::from_code(i);
__ vldr(reg, r1, src_offset, i < 16 ? al : ne);
src_offset += kDoubleSize;
}
// Push state, pc, and continuation from the last output frame.

5
deps/v8/src/arm/disasm-arm.cc

@ -1561,8 +1561,9 @@ void Disassembler::Disassemble(FILE* f, byte* begin, byte* end) {
buffer[0] = '\0';
byte* prev_pc = pc;
pc += d.InstructionDecode(buffer, pc);
fprintf(f, "%p %08x %s\n",
prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
v8::internal::PrintF(
f, "%p %08x %s\n",
prev_pc, *reinterpret_cast<int32_t*>(prev_pc), buffer.start());
}
}

12
deps/v8/src/arm/frames-arm.h

@ -100,18 +100,6 @@ const int kNumSafepointSavedRegisters = kNumJSCallerSaved + kNumCalleeSaved;
// ----------------------------------------------------
class StackHandlerConstants : public AllStatic {
public:
static const int kNextOffset = 0 * kPointerSize;
static const int kCodeOffset = 1 * kPointerSize;
static const int kStateOffset = 2 * kPointerSize;
static const int kContextOffset = 3 * kPointerSize;
static const int kFPOffset = 4 * kPointerSize;
static const int kSize = kFPOffset + kPointerSize;
};
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset = -3 * kPointerSize;

78
deps/v8/src/arm/full-codegen-arm.cc

@ -162,8 +162,6 @@ void FullCodeGenerator::Generate() {
// the frame (that is done below).
FrameScope frame_scope(masm_, StackFrame::MANUAL);
int locals_count = info->scope()->num_stack_slots();
info->set_prologue_offset(masm_->pc_offset());
{
PredictableCodeSizeScope predictible_code_size_scope(
@ -179,6 +177,9 @@ void FullCodeGenerator::Generate() {
}
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
ASSERT(!info->function()->is_generator() || locals_count == 0);
for (int i = 0; i < locals_count; i++) {
__ push(ip);
}
@ -313,7 +314,7 @@ void FullCodeGenerator::Generate() {
EmitReturnSequence();
// Force emit the constant pool, so it doesn't get emitted in the middle
// of the stack check table.
// of the back edge table.
masm()->CheckConstPool(true, false);
}
@ -350,7 +351,7 @@ void FullCodeGenerator::EmitProfilingCounterReset() {
void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
Label* back_edge_target) {
Comment cmnt(masm_, "[ Back edge bookkeeping");
// Block literal pools whilst emitting stack check code.
// Block literal pools whilst emitting back edge code.
Assembler::BlockConstPoolScope block_const_pool(masm_);
Label ok;
@ -1268,7 +1269,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->language_mode());
FastNewClosureStub stub(info->language_mode(), info->is_generator());
__ mov(r0, Operand(info));
__ push(r0);
__ CallStub(&stub);
@ -1562,7 +1563,7 @@ void FullCodeGenerator::VisitRegExpLiteral(RegExpLiteral* expr) {
// r0: Newly allocated regexp.
// r5: Materialized regexp.
// r2: temp.
__ CopyFields(r0, r5, r2.bit(), size / kPointerSize);
__ CopyFields(r0, r5, d0, s0, size / kPointerSize);
context()->Plug(r0);
}
@ -1727,7 +1728,6 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ ldr(r3, FieldMemOperand(r3, JSFunction::kLiteralsOffset));
__ mov(r2, Operand(Smi::FromInt(expr->literal_index())));
__ mov(r1, Operand(constant_elements));
__ Push(r3, r2, r1);
if (has_fast_elements && constant_elements_values->map() ==
isolate()->heap()->fixed_cow_array_map()) {
FastCloneShallowArrayStub stub(
@ -1738,8 +1738,11 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
__ IncrementCounter(
isolate()->counters()->cow_arrays_created_stub(), 1, r1, r2);
} else if (expr->depth() > 1) {
__ Push(r3, r2, r1);
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
} else if (Serializer::enabled() ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ Push(r3, r2, r1);
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
@ -3024,37 +3027,26 @@ void FullCodeGenerator::EmitRandomHeapNumber(CallRuntime* expr) {
// Convert 32 random bits in r0 to 0.(32 random bits) in a double
// by computing:
// ( 1.(20 0s)(32 random bits) x 2^20 ) - (1.0 x 2^20)).
if (CpuFeatures::IsSupported(VFP2)) {
__ PrepareCallCFunction(1, r0);
__ ldr(r0,
ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
CpuFeatureScope scope(masm(), VFP2);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load.
__ mov(r1, Operand(0x41000000));
__ orr(r1, r1, Operand(0x300000));
// Move 0x41300000xxxxxxxx (x = random bits) to VFP.
__ vmov(d7, r0, r1);
// Move 0x4130000000000000 to VFP.
__ mov(r0, Operand::Zero());
__ vmov(d8, r0, r1);
// Subtract and store the result in the heap number.
__ vsub(d7, d7, d8);
__ sub(r0, r4, Operand(kHeapObjectTag));
__ vstr(d7, r0, HeapNumber::kValueOffset);
__ mov(r0, r4);
} else {
__ PrepareCallCFunction(2, r0);
__ ldr(r1,
ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
__ mov(r0, Operand(r4));
__ ldr(r1, FieldMemOperand(r1, GlobalObject::kNativeContextOffset));
__ CallCFunction(
ExternalReference::fill_heap_number_with_random_function(isolate()), 2);
}
__ PrepareCallCFunction(1, r0);
__ ldr(r0,
ContextOperand(context_register(), Context::GLOBAL_OBJECT_INDEX));
__ ldr(r0, FieldMemOperand(r0, GlobalObject::kNativeContextOffset));
__ CallCFunction(ExternalReference::random_uint32_function(isolate()), 1);
// 0x41300000 is the top half of 1.0 x 2^20 as a double.
// Create this constant using mov/orr to avoid PC relative load.
__ mov(r1, Operand(0x41000000));
__ orr(r1, r1, Operand(0x300000));
// Move 0x41300000xxxxxxxx (x = random bits) to VFP.
__ vmov(d7, r0, r1);
// Move 0x4130000000000000 to VFP.
__ mov(r0, Operand::Zero());
__ vmov(d8, r0, r1);
// Subtract and store the result in the heap number.
__ vsub(d7, d7, d8);
__ sub(r0, r4, Operand(kHeapObjectTag));
__ vstr(d7, r0, HeapNumber::kValueOffset);
__ mov(r0, r4);
context()->Plug(r0);
}
@ -3191,12 +3183,8 @@ void FullCodeGenerator::EmitMathPow(CallRuntime* expr) {
ASSERT(args->length() == 2);
VisitForStackValue(args->at(0));
VisitForStackValue(args->at(1));
if (CpuFeatures::IsSupported(VFP2)) {
MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
} else {
__ CallRuntime(Runtime::kMath_pow, 2);
}
MathPowStub stub(MathPowStub::ON_STACK);
__ CallStub(&stub);
context()->Plug(r0);
}

157
deps/v8/src/arm/lithium-arm.cc

@ -302,17 +302,6 @@ void LCallConstantFunction::PrintDataTo(StringStream* stream) {
}
void LUnaryMathOperation::PrintDataTo(StringStream* stream) {
stream->Add("/%s ", hydrogen()->OpName());
value()->PrintTo(stream);
}
void LMathExp::PrintDataTo(StringStream* stream) {
value()->PrintTo(stream);
}
void LLoadContextSlot::PrintDataTo(StringStream* stream) {
context()->PrintTo(stream);
stream->Add("[%d]", slot_index());
@ -1124,47 +1113,101 @@ LInstruction* LChunkBuilder::DoInvokeFunction(HInvokeFunction* instr) {
LInstruction* LChunkBuilder::DoUnaryMathOperation(HUnaryMathOperation* instr) {
BuiltinFunctionId op = instr->op();
if (op == kMathLog || op == kMathSin || op == kMathCos || op == kMathTan) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, NULL);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
} else if (op == kMathExp) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseTempRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll.
LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
return DefineAsRegister(result);
} else if (op == kMathPowHalf) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LOperand* temp = FixedTemp(d3);
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
return DefineFixedDouble(result, d2);
} else {
LOperand* input = UseRegister(instr->value());
LOperand* temp = (op == kMathRound) ? FixedTemp(d3) : NULL;
LUnaryMathOperation* result = new(zone()) LUnaryMathOperation(input, temp);
switch (op) {
case kMathAbs:
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
case kMathFloor:
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
case kMathSqrt:
return DefineAsRegister(result);
case kMathRound:
return AssignEnvironment(DefineAsRegister(result));
default:
UNREACHABLE();
return NULL;
}
switch (instr->op()) {
case kMathFloor: return DoMathFloor(instr);
case kMathRound: return DoMathRound(instr);
case kMathAbs: return DoMathAbs(instr);
case kMathLog: return DoMathLog(instr);
case kMathSin: return DoMathSin(instr);
case kMathCos: return DoMathCos(instr);
case kMathTan: return DoMathTan(instr);
case kMathExp: return DoMathExp(instr);
case kMathSqrt: return DoMathSqrt(instr);
case kMathPowHalf: return DoMathPowHalf(instr);
default:
UNREACHABLE();
return NULL;
}
}
LInstruction* LChunkBuilder::DoMathFloor(HUnaryMathOperation* instr) {
LOperand* input = UseRegister(instr->value());
LMathFloor* result = new(zone()) LMathFloor(input);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoMathRound(HUnaryMathOperation* instr) {
LOperand* input = UseRegister(instr->value());
LOperand* temp = FixedTemp(d3);
LMathRound* result = new(zone()) LMathRound(input, temp);
return AssignEnvironment(DefineAsRegister(result));
}
LInstruction* LChunkBuilder::DoMathAbs(HUnaryMathOperation* instr) {
LOperand* input = UseRegister(instr->value());
LMathAbs* result = new(zone()) LMathAbs(input);
return AssignEnvironment(AssignPointerMap(DefineAsRegister(result)));
}
LInstruction* LChunkBuilder::DoMathLog(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LMathLog* result = new(zone()) LMathLog(input);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
}
LInstruction* LChunkBuilder::DoMathSin(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LMathSin* result = new(zone()) LMathSin(input);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
}
LInstruction* LChunkBuilder::DoMathCos(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LMathCos* result = new(zone()) LMathCos(input);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
}
LInstruction* LChunkBuilder::DoMathTan(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LMathTan* result = new(zone()) LMathTan(input);
return MarkAsCall(DefineFixedDouble(result, d2), instr);
}
LInstruction* LChunkBuilder::DoMathExp(HUnaryMathOperation* instr) {
ASSERT(instr->representation().IsDouble());
ASSERT(instr->value()->representation().IsDouble());
LOperand* input = UseTempRegister(instr->value());
LOperand* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LOperand* double_temp = FixedTemp(d3); // Chosen by fair dice roll.
LMathExp* result = new(zone()) LMathExp(input, double_temp, temp1, temp2);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoMathSqrt(HUnaryMathOperation* instr) {
LOperand* input = UseRegister(instr->value());
LMathSqrt* result = new(zone()) LMathSqrt(input);
return DefineAsRegister(result);
}
LInstruction* LChunkBuilder::DoMathPowHalf(HUnaryMathOperation* instr) {
LOperand* input = UseFixedDouble(instr->value(), d2);
LOperand* temp = FixedTemp(d3);
LMathPowHalf* result = new(zone()) LMathPowHalf(input, temp);
return DefineFixedDouble(result, d2);
}
LInstruction* LChunkBuilder::DoCallKeyed(HCallKeyed* instr) {
ASSERT(instr->key()->representation().IsTagged());
argument_count_ -= instr->argument_count();
@ -1933,7 +1976,7 @@ LInstruction* LChunkBuilder::DoCheckPrototypeMaps(HCheckPrototypeMaps* instr) {
LUnallocated* temp1 = TempRegister();
LOperand* temp2 = TempRegister();
LCheckPrototypeMaps* result = new(zone()) LCheckPrototypeMaps(temp1, temp2);
return AssignEnvironment(Define(result, temp1));
return AssignEnvironment(result);
}
@ -2133,16 +2176,7 @@ LInstruction* LChunkBuilder::DoLoadKeyed(HLoadKeyed* instr) {
(instr->representation().IsDouble() &&
((elements_kind == EXTERNAL_FLOAT_ELEMENTS) ||
(elements_kind == EXTERNAL_DOUBLE_ELEMENTS))));
// float->double conversion on non-VFP2 requires an extra scratch
// register. For convenience, just mark the elements register as "UseTemp"
// so that it can be used as a temp during the float->double conversion
// after it's no longer needed after the float load.
bool needs_temp =
!CpuFeatures::IsSupported(VFP2) &&
(elements_kind == EXTERNAL_FLOAT_ELEMENTS);
LOperand* external_pointer = needs_temp
? UseTempRegister(instr->elements())
: UseRegister(instr->elements());
LOperand* external_pointer = UseRegister(instr->elements());
result = new(zone()) LLoadKeyed(external_pointer, key);
}
@ -2341,11 +2375,6 @@ LInstruction* LChunkBuilder::DoAllocate(HAllocate* instr) {
}
LInstruction* LChunkBuilder::DoFastLiteral(HFastLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LFastLiteral, r0), instr);
}
LInstruction* LChunkBuilder::DoArrayLiteral(HArrayLiteral* instr) {
return MarkAsCall(DefineFixed(new(zone()) LArrayLiteral, r0), instr);
}

148
deps/v8/src/arm/lithium-arm.h

@ -97,7 +97,6 @@ class LCodeGen;
V(DoubleToI) \
V(DummyUse) \
V(ElementsKind) \
V(FastLiteral) \
V(FixedArrayBaseLength) \
V(FunctionLiteral) \
V(GetCachedArrayIndex) \
@ -134,9 +133,18 @@ class LCodeGen;
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(MapEnumLength) \
V(MathAbs) \
V(MathCos) \
V(MathExp) \
V(MathFloor) \
V(MathFloorOfDiv) \
V(MathLog) \
V(MathMinMax) \
V(MathPowHalf) \
V(MathRound) \
V(MathSin) \
V(MathSqrt) \
V(MathTan) \
V(ModI) \
V(MulI) \
V(MultiplyAddD) \
@ -181,7 +189,6 @@ class LCodeGen;
V(TrapAllocationMemento) \
V(Typeof) \
V(TypeofIsAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf) \
V(ForInPrepareMap) \
@ -703,9 +710,22 @@ class LCmpIDAndBranch: public LControlInstruction<2, 0> {
};
class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
class LMathFloor: public LTemplateInstruction<1, 1, 0> {
public:
LUnaryMathOperation(LOperand* value, LOperand* temp) {
explicit LMathFloor(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathFloor, "math-floor")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
class LMathRound: public LTemplateInstruction<1, 1, 1> {
public:
LMathRound(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
@ -713,11 +733,69 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(UnaryMathOperation, "unary-math-operation")
DECLARE_CONCRETE_INSTRUCTION(MathRound, "math-round")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
virtual void PrintDataTo(StringStream* stream);
BuiltinFunctionId op() const { return hydrogen()->op(); }
class LMathAbs: public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathAbs(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathAbs, "math-abs")
DECLARE_HYDROGEN_ACCESSOR(UnaryMathOperation)
};
class LMathLog: public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathLog(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathLog, "math-log")
};
class LMathSin: public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSin(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathSin, "math-sin")
};
class LMathCos: public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathCos(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathCos, "math-cos")
};
class LMathTan: public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathTan(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathTan, "math-tan")
};
@ -740,8 +818,32 @@ class LMathExp: public LTemplateInstruction<1, 1, 3> {
LOperand* double_temp() { return temps_[2]; }
DECLARE_CONCRETE_INSTRUCTION(MathExp, "math-exp")
};
virtual void PrintDataTo(StringStream* stream);
class LMathSqrt: public LTemplateInstruction<1, 1, 0> {
public:
explicit LMathSqrt(LOperand* value) {
inputs_[0] = value;
}
LOperand* value() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathSqrt, "math-sqrt")
};
class LMathPowHalf: public LTemplateInstruction<1, 1, 1> {
public:
LMathPowHalf(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
LOperand* value() { return inputs_[0]; }
LOperand* temp() { return temps_[0]; }
DECLARE_CONCRETE_INSTRUCTION(MathPowHalf, "math-pow-half")
};
@ -1310,7 +1412,7 @@ class LMathMinMax: public LTemplateInstruction<1, 2, 0> {
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "min-max")
DECLARE_CONCRETE_INSTRUCTION(MathMinMax, "math-min-max")
DECLARE_HYDROGEN_ACCESSOR(MathMinMax)
};
@ -2074,7 +2176,13 @@ class LStoreKeyed: public LTemplateInstruction<0, 3, 0> {
DECLARE_HYDROGEN_ACCESSOR(StoreKeyed)
virtual void PrintDataTo(StringStream* stream);
bool NeedsCanonicalization() { return hydrogen()->NeedsCanonicalization(); }
bool NeedsCanonicalization() {
if (hydrogen()->value()->IsAdd() || hydrogen()->value()->IsSub() ||
hydrogen()->value()->IsMul() || hydrogen()->value()->IsDiv()) {
return false;
}
return hydrogen()->NeedsCanonicalization();
}
uint32_t additional_index() const { return hydrogen()->index_offset(); }
};
@ -2239,7 +2347,7 @@ class LCheckMaps: public LTemplateInstruction<0, 1, 0> {
};
class LCheckPrototypeMaps: public LTemplateInstruction<1, 0, 2> {
class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
public:
LCheckPrototypeMaps(LOperand* temp, LOperand* temp2) {
temps_[0] = temp;
@ -2355,13 +2463,6 @@ class LAllocate: public LTemplateInstruction<1, 2, 2> {
};
class LFastLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(FastLiteral, "fast-literal")
DECLARE_HYDROGEN_ACCESSOR(FastLiteral)
};
class LArrayLiteral: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(ArrayLiteral, "array-literal")
@ -2611,6 +2712,17 @@ class LChunkBuilder BASE_EMBEDDED {
static HValue* SimplifiedDividendForMathFloorOfDiv(HValue* val);
static HValue* SimplifiedDivisorForMathFloorOfDiv(HValue* val);
LInstruction* DoMathFloor(HUnaryMathOperation* instr);
LInstruction* DoMathRound(HUnaryMathOperation* instr);
LInstruction* DoMathAbs(HUnaryMathOperation* instr);
LInstruction* DoMathLog(HUnaryMathOperation* instr);
LInstruction* DoMathSin(HUnaryMathOperation* instr);
LInstruction* DoMathCos(HUnaryMathOperation* instr);
LInstruction* DoMathTan(HUnaryMathOperation* instr);
LInstruction* DoMathExp(HUnaryMathOperation* instr);
LInstruction* DoMathSqrt(HUnaryMathOperation* instr);
LInstruction* DoMathPowHalf(HUnaryMathOperation* instr);
private:
enum Status {
UNUSED,

551
deps/v8/src/arm/lithium-codegen-arm.cc

@ -113,7 +113,7 @@ void LCodeGen::Comment(const char* format, ...) {
// issues when the stack allocated buffer goes out of scope.
size_t length = builder.position();
Vector<char> copy = Vector<char>::New(length + 1);
memcpy(copy.start(), builder.Finalize(), copy.length());
OS::MemCopy(copy.start(), builder.Finalize(), copy.length());
masm()->RecordComment(copy.start());
}
@ -195,8 +195,7 @@ bool LCodeGen::GeneratePrologue() {
}
}
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
if (info()->saves_caller_doubles()) {
Comment(";;; Save clobbered callee double registers");
int count = 0;
BitVector* doubles = chunk()->allocated_double_registers();
@ -852,7 +851,9 @@ void LCodeGen::DeoptimizeIf(Condition cc, LEnvironment* environment) {
}
ASSERT(FLAG_deopt_every_n_times < 2); // Other values not supported on ARM.
if (FLAG_deopt_every_n_times == 1 && info_->opt_count() == id) {
if (FLAG_deopt_every_n_times == 1 &&
!info()->IsStub() &&
info()->opt_count() == id) {
__ Jump(entry, RelocInfo::RUNTIME_ENTRY);
return;
}
@ -1209,8 +1210,6 @@ void LCodeGen::DoModI(LModI* instr) {
Label vfp_modulo, both_positive, right_negative;
CpuFeatureScope scope(masm(), VFP2);
// Check for x % 0.
if (instr->hydrogen()->CheckFlag(HValue::kCanBeDivByZero)) {
__ cmp(right, Operand::Zero());
@ -1615,7 +1614,6 @@ void LCodeGen::DoDeferredBinaryOpStub(LPointerMap* pointer_map,
LOperand* left_argument,
LOperand* right_argument,
Token::Value op) {
CpuFeatureScope vfp_scope(masm(), VFP2);
Register left = ToRegister(left_argument);
Register right = ToRegister(right_argument);
@ -1901,7 +1899,6 @@ void LCodeGen::DoConstantI(LConstantI* instr) {
void LCodeGen::DoConstantD(LConstantD* instr) {
ASSERT(instr->result()->IsDoubleRegister());
DwVfpRegister result = ToDoubleRegister(instr->result());
CpuFeatureScope scope(masm(), VFP2);
double v = instr->value();
__ Vmov(result, v, scratch0());
}
@ -2072,7 +2069,6 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
__ mov(result_reg, right_op, LeaveCC, NegateCondition(condition));
} else {
ASSERT(instr->hydrogen()->representation().IsDouble());
CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister left_reg = ToDoubleRegister(left);
DwVfpRegister right_reg = ToDoubleRegister(right);
DwVfpRegister result_reg = ToDoubleRegister(instr->result());
@ -2118,7 +2114,6 @@ void LCodeGen::DoMathMinMax(LMathMinMax* instr) {
void LCodeGen::DoArithmeticD(LArithmeticD* instr) {
CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister left = ToDoubleRegister(instr->left());
DwVfpRegister right = ToDoubleRegister(instr->right());
DwVfpRegister result = ToDoubleRegister(instr->result());
@ -2209,7 +2204,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
__ cmp(reg, Operand::Zero());
EmitBranch(true_block, false_block, ne);
} else if (r.IsDouble()) {
CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister reg = ToDoubleRegister(instr->value());
Register scratch = scratch0();
@ -2301,7 +2295,6 @@ void LCodeGen::DoBranch(LBranch* instr) {
}
if (expected.Contains(ToBooleanStub::HEAP_NUMBER)) {
CpuFeatureScope scope(masm(), VFP2);
// heap number -> false iff +0, -0, or NaN.
DwVfpRegister dbl_scratch = double_scratch0();
Label not_heap_number;
@ -2381,7 +2374,6 @@ void LCodeGen::DoCmpIDAndBranch(LCmpIDAndBranch* instr) {
EmitGoto(next_block);
} else {
if (instr->is_double()) {
CpuFeatureScope scope(masm(), VFP2);
// Compare left and right operands as doubles and load the
// resulting flags into the normal status register.
__ VFPCompareAndSetFlags(ToDoubleRegister(left), ToDoubleRegister(right));
@ -2936,8 +2928,7 @@ void LCodeGen::DoReturn(LReturn* instr) {
__ push(r0);
__ CallRuntime(Runtime::kTraceExit, 1);
}
if (info()->saves_caller_doubles() && CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
if (info()->saves_caller_doubles()) {
ASSERT(NeedsEagerFrame());
BitVector* doubles = chunk()->allocated_double_registers();
BitVector::Iterator save_iterator(doubles);
@ -3319,58 +3310,11 @@ void LCodeGen::DoLoadKeyedExternalArray(LLoadKeyed* instr) {
? Operand(constant_key << element_size_shift)
: Operand(key, LSL, shift_size);
__ add(scratch0(), external_pointer, operand);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset);
__ vcvt_f64_f32(result, kScratchDoubleReg.low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), additional_offset);
}
} else {
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
Register value = external_pointer;
__ ldr(value, MemOperand(scratch0(), additional_offset));
__ and_(sfpd_lo, value, Operand(kBinary32MantissaMask));
__ mov(scratch0(), Operand(value, LSR, kBinary32MantissaBits));
__ and_(scratch0(), scratch0(),
Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
Label exponent_rebiased;
__ teq(scratch0(), Operand(0x00));
__ b(eq, &exponent_rebiased);
__ teq(scratch0(), Operand(0xff));
__ mov(scratch0(), Operand(0x7ff), LeaveCC, eq);
__ b(eq, &exponent_rebiased);
// Rebias exponent.
__ add(scratch0(),
scratch0(),
Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
__ bind(&exponent_rebiased);
__ and_(sfpd_hi, value, Operand(kBinary32SignMask));
__ orr(sfpd_hi, sfpd_hi,
Operand(scratch0(), LSL, HeapNumber::kMantissaBitsInTopWord));
// Shift mantissa.
static const int kMantissaShiftForHiWord =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaShiftForLoWord =
kBitsPerInt - kMantissaShiftForHiWord;
__ orr(sfpd_hi, sfpd_hi,
Operand(sfpd_lo, LSR, kMantissaShiftForHiWord));
__ mov(sfpd_lo, Operand(sfpd_lo, LSL, kMantissaShiftForLoWord));
} else {
__ ldr(sfpd_lo, MemOperand(scratch0(), additional_offset));
__ ldr(sfpd_hi, MemOperand(scratch0(),
additional_offset + kPointerSize));
}
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
__ vldr(kScratchDoubleReg.low(), scratch0(), additional_offset);
__ vcvt_f64_f32(result, kScratchDoubleReg.low());
} else { // i.e. elements_kind == EXTERNAL_DOUBLE_ELEMENTS
__ vldr(result, scratch0(), additional_offset);
}
} else {
Register result = ToRegister(instr->result());
@ -3444,23 +3388,12 @@ void LCodeGen::DoLoadKeyedFixedDoubleArray(LLoadKeyed* instr) {
if (!key_is_constant) {
__ add(elements, elements, Operand(key, LSL, shift_size));
}
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
__ add(elements, elements, Operand(base_offset));
__ vldr(result, elements, 0);
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
} else {
__ ldr(sfpd_hi, MemOperand(elements, base_offset + kPointerSize));
__ ldr(sfpd_lo, MemOperand(elements, base_offset));
if (instr->hydrogen()->RequiresHoleCheck()) {
ASSERT(kPointerSize == sizeof(kHoleNanLower32));
__ cmp(sfpd_hi, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
__ add(elements, elements, Operand(base_offset));
__ vldr(result, elements, 0);
if (instr->hydrogen()->RequiresHoleCheck()) {
__ ldr(scratch, MemOperand(elements, sizeof(kHoleNanLower32)));
__ cmp(scratch, Operand(kHoleNanUpper32));
DeoptimizeIf(eq, instr->environment());
}
}
@ -3821,7 +3754,7 @@ void LCodeGen::DoCallConstantFunction(LCallConstantFunction* instr) {
}
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
Register scratch = scratch0();
@ -3887,7 +3820,7 @@ void LCodeGen::DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr) {
}
void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
void LCodeGen::EmitIntegerMathAbs(LMathAbs* instr) {
Register input = ToRegister(instr->value());
Register result = ToRegister(instr->result());
__ cmp(input, Operand::Zero());
@ -3901,20 +3834,18 @@ void LCodeGen::EmitIntegerMathAbs(LUnaryMathOperation* instr) {
}
void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
CpuFeatureScope scope(masm(), VFP2);
void LCodeGen::DoMathAbs(LMathAbs* instr) {
// Class for deferred case.
class DeferredMathAbsTaggedHeapNumber: public LDeferredCode {
public:
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen,
LUnaryMathOperation* instr)
DeferredMathAbsTaggedHeapNumber(LCodeGen* codegen, LMathAbs* instr)
: LDeferredCode(codegen), instr_(instr) { }
virtual void Generate() {
codegen()->DoDeferredMathAbsTaggedHeapNumber(instr_);
}
virtual LInstruction* instr() { return instr_; }
private:
LUnaryMathOperation* instr_;
LMathAbs* instr_;
};
Representation r = instr->hydrogen()->value()->representation();
@ -3938,8 +3869,7 @@ void LCodeGen::DoMathAbs(LUnaryMathOperation* instr) {
}
void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
CpuFeatureScope scope(masm(), VFP2);
void LCodeGen::DoMathFloor(LMathFloor* instr) {
DwVfpRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
Register input_high = scratch0();
@ -3961,8 +3891,7 @@ void LCodeGen::DoMathFloor(LUnaryMathOperation* instr) {
}
void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
CpuFeatureScope scope(masm(), VFP2);
void LCodeGen::DoMathRound(LMathRound* instr) {
DwVfpRegister input = ToDoubleRegister(instr->value());
Register result = ToRegister(instr->result());
DwVfpRegister double_scratch1 = ToDoubleRegister(instr->temp());
@ -4001,16 +3930,14 @@ void LCodeGen::DoMathRound(LUnaryMathOperation* instr) {
}
void LCodeGen::DoMathSqrt(LUnaryMathOperation* instr) {
CpuFeatureScope scope(masm(), VFP2);
void LCodeGen::DoMathSqrt(LMathSqrt* instr) {
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
__ vsqrt(result, input);
}
void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
CpuFeatureScope scope(masm(), VFP2);
void LCodeGen::DoMathPowHalf(LMathPowHalf* instr) {
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
DwVfpRegister temp = ToDoubleRegister(instr->temp());
@ -4032,7 +3959,6 @@ void LCodeGen::DoMathPowHalf(LUnaryMathOperation* instr) {
void LCodeGen::DoPower(LPower* instr) {
CpuFeatureScope scope(masm(), VFP2);
Representation exponent_type = instr->hydrogen()->right()->representation();
// Having marked this as a call, we can use any registers.
// Just make sure that the input/output registers are the expected ones.
@ -4065,7 +3991,6 @@ void LCodeGen::DoPower(LPower* instr) {
void LCodeGen::DoRandom(LRandom* instr) {
CpuFeatureScope scope(masm(), VFP2);
class DeferredDoRandom: public LDeferredCode {
public:
DeferredDoRandom(LCodeGen* codegen, LRandom* instr)
@ -4144,7 +4069,6 @@ void LCodeGen::DoDeferredRandom(LRandom* instr) {
void LCodeGen::DoMathExp(LMathExp* instr) {
CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister input = ToDoubleRegister(instr->value());
DwVfpRegister result = ToDoubleRegister(instr->result());
DwVfpRegister double_scratch1 = ToDoubleRegister(instr->double_temp());
@ -4158,7 +4082,7 @@ void LCodeGen::DoMathExp(LMathExp* instr) {
}
void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
void LCodeGen::DoMathLog(LMathLog* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::LOG,
TranscendentalCacheStub::UNTAGGED);
@ -4166,7 +4090,7 @@ void LCodeGen::DoMathLog(LUnaryMathOperation* instr) {
}
void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
void LCodeGen::DoMathTan(LMathTan* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::TAN,
TranscendentalCacheStub::UNTAGGED);
@ -4174,7 +4098,7 @@ void LCodeGen::DoMathTan(LUnaryMathOperation* instr) {
}
void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
void LCodeGen::DoMathCos(LMathCos* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::COS,
TranscendentalCacheStub::UNTAGGED);
@ -4182,7 +4106,7 @@ void LCodeGen::DoMathCos(LUnaryMathOperation* instr) {
}
void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
void LCodeGen::DoMathSin(LMathSin* instr) {
ASSERT(ToDoubleRegister(instr->result()).is(d2));
TranscendentalCacheStub stub(TranscendentalCache::SIN,
TranscendentalCacheStub::UNTAGGED);
@ -4190,42 +4114,6 @@ void LCodeGen::DoMathSin(LUnaryMathOperation* instr) {
}
void LCodeGen::DoUnaryMathOperation(LUnaryMathOperation* instr) {
switch (instr->op()) {
case kMathAbs:
DoMathAbs(instr);
break;
case kMathFloor:
DoMathFloor(instr);
break;
case kMathRound:
DoMathRound(instr);
break;
case kMathSqrt:
DoMathSqrt(instr);
break;
case kMathPowHalf:
DoMathPowHalf(instr);
break;
case kMathCos:
DoMathCos(instr);
break;
case kMathSin:
DoMathSin(instr);
break;
case kMathTan:
DoMathTan(instr);
break;
case kMathLog:
DoMathLog(instr);
break;
default:
Abort("Unimplemented type of LUnaryMathOperation.");
UNREACHABLE();
}
}
void LCodeGen::DoInvokeFunction(LInvokeFunction* instr) {
ASSERT(ToRegister(instr->function()).is(r1));
ASSERT(instr->HasPointerMap());
@ -4442,7 +4330,6 @@ void LCodeGen::DoBoundsCheck(LBoundsCheck* instr) {
void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
CpuFeatureScope scope(masm(), VFP2);
Register external_pointer = ToRegister(instr->elements());
Register key = no_reg;
ElementsKind elements_kind = instr->elements_kind();
@ -4463,7 +4350,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS ||
elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
CpuFeatureScope scope(masm(), VFP3);
DwVfpRegister value(ToDoubleRegister(instr->value()));
Operand operand(key_is_constant
? Operand(constant_key << element_size_shift)
@ -4513,7 +4399,6 @@ void LCodeGen::DoStoreKeyedExternalArray(LStoreKeyed* instr) {
void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister value = ToDoubleRegister(instr->value());
Register elements = ToRegister(instr->elements());
Register key = no_reg;
@ -4545,18 +4430,14 @@ void LCodeGen::DoStoreKeyedFixedDoubleArray(LStoreKeyed* instr) {
}
if (instr->NeedsCanonicalization()) {
// Check for NaN. All NaNs must be canonicalized.
__ VFPCompareAndSetFlags(value, value);
Label after_canonicalization;
// Only load canonical NaN if the comparison above set the overflow.
__ b(vc, &after_canonicalization);
__ Vmov(value,
FixedDoubleArray::canonical_not_the_hole_nan_as_double());
__ bind(&after_canonicalization);
// Force a canonical NaN.
if (masm()->emit_debug_code()) {
__ vmrs(ip);
__ tst(ip, Operand(kVFPDefaultNaNModeControlBit));
__ Assert(ne, "Default NaN mode not set");
}
__ VFPCanonicalizeNaN(value);
}
__ vstr(value, scratch, instr->additional_index() << element_size_shift);
}
@ -4814,7 +4695,6 @@ void LCodeGen::DoStringLength(LStringLength* instr) {
void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
CpuFeatureScope scope(masm(), VFP2);
LOperand* input = instr->value();
ASSERT(input->IsRegister() || input->IsStackSlot());
LOperand* output = instr->result();
@ -4832,7 +4712,6 @@ void LCodeGen::DoInteger32ToDouble(LInteger32ToDouble* instr) {
void LCodeGen::DoUint32ToDouble(LUint32ToDouble* instr) {
CpuFeatureScope scope(masm(), VFP2);
LOperand* input = instr->value();
LOperand* output = instr->result();
@ -4894,43 +4773,6 @@ void LCodeGen::DoNumberTagU(LNumberTagU* instr) {
}
// Convert unsigned integer with specified number of leading zeroes in binary
// representation to IEEE 754 double.
// Integer to convert is passed in register src.
// Resulting double is returned in registers hiword:loword.
// This functions does not work correctly for 0.
static void GenerateUInt2Double(MacroAssembler* masm,
Register src,
Register hiword,
Register loword,
Register scratch,
int leading_zeroes) {
const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
const int mantissa_shift_for_hi_word =
meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
const int mantissa_shift_for_lo_word =
kBitsPerInt - mantissa_shift_for_hi_word;
masm->mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
if (mantissa_shift_for_hi_word > 0) {
masm->mov(loword, Operand(src, LSL, mantissa_shift_for_lo_word));
masm->orr(hiword, scratch,
Operand(src, LSR, mantissa_shift_for_hi_word));
} else {
masm->mov(loword, Operand::Zero());
masm->orr(hiword, scratch,
Operand(src, LSL, -mantissa_shift_for_hi_word));
}
// If least significant bit of biased exponent was not 1 it was corrupted
// by most significant bit of mantissa so we should fix that.
if (!(biased_exponent & 1)) {
masm->bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
}
}
void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
LOperand* value,
IntegerSignedness signedness) {
@ -4952,35 +4794,11 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
__ SmiUntag(src, dst);
__ eor(src, src, Operand(0x80000000));
}
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
__ vmov(flt_scratch, src);
__ vcvt_f64_s32(dbl_scratch, flt_scratch);
} else {
FloatingPointHelper::Destination dest =
FloatingPointHelper::kCoreRegisters;
FloatingPointHelper::ConvertIntToDouble(masm(), src, dest, d0,
sfpd_lo, sfpd_hi,
scratch0(), s0);
}
__ vmov(flt_scratch, src);
__ vcvt_f64_s32(dbl_scratch, flt_scratch);
} else {
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
__ vmov(flt_scratch, src);
__ vcvt_f64_u32(dbl_scratch, flt_scratch);
} else {
Label no_leading_zero, convert_done;
__ tst(src, Operand(0x80000000));
__ b(ne, &no_leading_zero);
// Integer has one leading zeros.
GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, r9, 1);
__ b(&convert_done);
__ bind(&no_leading_zero);
GenerateUInt2Double(masm(), src, sfpd_hi, sfpd_lo, r9, 0);
__ bind(&convert_done);
}
__ vmov(flt_scratch, src);
__ vcvt_f64_u32(dbl_scratch, flt_scratch);
}
if (FLAG_inline_new) {
@ -4996,30 +4814,16 @@ void LCodeGen::DoDeferredNumberTagI(LInstruction* instr,
// TODO(3095996): Put a valid pointer value in the stack slot where the result
// register is stored, as this register is in the pointer map, but contains an
// integer value.
if (!CpuFeatures::IsSupported(VFP2)) {
// Preserve sfpd_lo.
__ mov(r9, sfpd_lo);
}
__ mov(ip, Operand::Zero());
__ StoreToSafepointRegisterSlot(ip, dst);
CallRuntimeFromDeferred(Runtime::kAllocateHeapNumber, 0, instr);
__ Move(dst, r0);
if (!CpuFeatures::IsSupported(VFP2)) {
// Restore sfpd_lo.
__ mov(sfpd_lo, r9);
}
__ sub(dst, dst, Operand(kHeapObjectTag));
// Done. Put the value in dbl_scratch into the value of the allocated heap
// number.
__ bind(&done);
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
__ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
} else {
__ str(sfpd_lo, MemOperand(dst, HeapNumber::kMantissaOffset));
__ str(sfpd_hi, MemOperand(dst, HeapNumber::kExponentOffset));
}
__ vstr(dbl_scratch, dst, HeapNumber::kValueOffset);
__ add(dst, dst, Operand(kHeapObjectTag));
__ StoreToSafepointRegisterSlot(dst, dst);
}
@ -5052,45 +4856,16 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
Label no_special_nan_handling;
Label done;
if (convert_hole) {
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
DwVfpRegister input_reg = ToDoubleRegister(instr->value());
__ VFPCompareAndSetFlags(input_reg, input_reg);
__ b(vc, &no_special_nan_handling);
__ vmov(reg, scratch0(), input_reg);
__ cmp(scratch0(), Operand(kHoleNanUpper32));
Label canonicalize;
__ b(ne, &canonicalize);
__ Move(reg, factory()->the_hole_value());
__ b(&done);
__ bind(&canonicalize);
__ Vmov(input_reg,
FixedDoubleArray::canonical_not_the_hole_nan_as_double(),
no_reg);
} else {
Label not_hole;
__ cmp(sfpd_hi, Operand(kHoleNanUpper32));
__ b(ne, &not_hole);
__ Move(reg, factory()->the_hole_value());
__ b(&done);
__ bind(&not_hole);
__ and_(scratch, sfpd_hi, Operand(0x7ff00000));
__ cmp(scratch, Operand(0x7ff00000));
__ b(ne, &no_special_nan_handling);
Label special_nan_handling;
__ tst(sfpd_hi, Operand(0x000FFFFF));
__ b(ne, &special_nan_handling);
__ cmp(sfpd_lo, Operand(0));
__ b(eq, &no_special_nan_handling);
__ bind(&special_nan_handling);
double canonical_nan =
FixedDoubleArray::canonical_not_the_hole_nan_as_double();
uint64_t casted_nan = BitCast<uint64_t>(canonical_nan);
__ mov(sfpd_lo,
Operand(static_cast<uint32_t>(casted_nan & 0xFFFFFFFF)));
__ mov(sfpd_hi,
Operand(static_cast<uint32_t>(casted_nan >> 32)));
}
DwVfpRegister input_reg = ToDoubleRegister(instr->value());
__ VFPCompareAndSetFlags(input_reg, input_reg);
__ b(vc, &no_special_nan_handling);
__ vmov(scratch, input_reg.high());
__ cmp(scratch, Operand(kHoleNanUpper32));
// If not the hole NaN, force the NaN to be canonical.
__ VFPCanonicalizeNaN(input_reg, ne);
__ b(ne, &no_special_nan_handling);
__ Move(reg, factory()->the_hole_value());
__ b(&done);
}
__ bind(&no_special_nan_handling);
@ -5104,13 +4879,7 @@ void LCodeGen::DoNumberTagD(LNumberTagD* instr) {
__ jmp(deferred->entry());
}
__ bind(deferred->exit());
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm(), VFP2);
__ vstr(input_reg, reg, HeapNumber::kValueOffset);
} else {
__ str(sfpd_lo, MemOperand(reg, HeapNumber::kValueOffset));
__ str(sfpd_hi, MemOperand(reg, HeapNumber::kValueOffset + kPointerSize));
}
__ vstr(input_reg, reg, HeapNumber::kValueOffset);
// Now that we have finished with the object's real address tag it
__ add(reg, reg, Operand(kHeapObjectTag));
__ bind(&done);
@ -5160,7 +4929,6 @@ void LCodeGen::EmitNumberUntagD(Register input_reg,
Register scratch = scratch0();
SwVfpRegister flt_scratch = double_scratch0().low();
ASSERT(!result_reg.is(double_scratch0()));
CpuFeatureScope scope(masm(), VFP2);
Label load_smi, heap_number, done;
@ -5249,7 +5017,6 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ cmp(scratch1, Operand(ip));
if (instr->truncating()) {
CpuFeatureScope scope(masm(), VFP2);
Register scratch3 = ToRegister(instr->temp2());
ASSERT(!scratch3.is(input_reg) &&
!scratch3.is(scratch1) &&
@ -5270,11 +5037,10 @@ void LCodeGen::DoDeferredTaggedToI(LTaggedToI* instr) {
__ sub(scratch1, input_reg, Operand(kHeapObjectTag));
__ vldr(double_scratch2, scratch1, HeapNumber::kValueOffset);
__ ECMAToInt32VFP(input_reg, double_scratch2, double_scratch,
scratch1, scratch2, scratch3);
__ ECMAToInt32(input_reg, double_scratch2, double_scratch,
scratch1, scratch2, scratch3);
} else {
CpuFeatureScope scope(masm(), VFP3);
// Deoptimize if we don't have a heap number.
DeoptimizeIf(ne, instr->environment());
@ -5369,8 +5135,8 @@ void LCodeGen::DoDoubleToI(LDoubleToI* instr) {
if (instr->truncating()) {
Register scratch3 = ToRegister(instr->temp2());
__ ECMAToInt32VFP(result_reg, double_input, double_scratch,
scratch1, scratch2, scratch3);
__ ECMAToInt32(result_reg, double_input, double_scratch,
scratch1, scratch2, scratch3);
} else {
__ TryDoubleToInt32Exact(result_reg, double_input, double_scratch);
// Deoptimize if the input wasn't a int32 (inside a double).
@ -5486,7 +5252,6 @@ void LCodeGen::DoCheckMaps(LCheckMaps* instr) {
void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
CpuFeatureScope vfp_scope(masm(), VFP2);
DwVfpRegister value_reg = ToDoubleRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
DwVfpRegister temp_reg = ToDoubleRegister(instr->temp());
@ -5495,7 +5260,6 @@ void LCodeGen::DoClampDToUint8(LClampDToUint8* instr) {
void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
CpuFeatureScope scope(masm(), VFP2);
Register unclamped_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
__ ClampUint8(result_reg, unclamped_reg);
@ -5503,7 +5267,6 @@ void LCodeGen::DoClampIToUint8(LClampIToUint8* instr) {
void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
CpuFeatureScope scope(masm(), VFP2);
Register scratch = scratch0();
Register input_reg = ToRegister(instr->unclamped());
Register result_reg = ToRegister(instr->result());
@ -5541,7 +5304,6 @@ void LCodeGen::DoClampTToUint8(LClampTToUint8* instr) {
void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
ASSERT(instr->temp()->Equals(instr->result()));
Register prototype_reg = ToRegister(instr->temp());
Register map_reg = ToRegister(instr->temp2());
@ -5554,8 +5316,6 @@ void LCodeGen::DoCheckPrototypeMaps(LCheckPrototypeMaps* instr) {
for (int i = 0; i < maps->length(); i++) {
prototype_maps_.Add(maps->at(i), info()->zone());
}
__ LoadHeapObject(prototype_reg,
prototypes->at(prototypes->length() - 1));
} else {
for (int i = 0; i < prototypes->length(); i++) {
__ LoadHeapObject(prototype_reg, prototypes->at(i));
@ -5671,11 +5431,11 @@ void LCodeGen::DoAllocate(LAllocate* instr) {
if (instr->hydrogen()->MustAllocateDoubleAligned()) {
flags = static_cast<AllocationFlags>(flags | DOUBLE_ALIGNMENT);
}
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
}
if (instr->size()->IsConstantOperand()) {
int32_t size = ToInteger32(LConstantOperand::cast(instr->size()));
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
flags = static_cast<AllocationFlags>(flags | PRETENURE_OLD_POINTER_SPACE);
}
__ Allocate(size, result, scratch, scratch2, deferred->entry(), flags);
} else {
Register size = ToRegister(instr->size());
@ -5703,7 +5463,13 @@ void LCodeGen::DoDeferredAllocate(LAllocate* instr) {
PushSafepointRegistersScope scope(this, Safepoint::kWithRegisters);
__ SmiTag(size, size);
__ push(size);
CallRuntimeFromDeferred(Runtime::kAllocateInNewSpace, 1, instr);
if (instr->hydrogen()->CanAllocateInOldPointerSpace()) {
CallRuntimeFromDeferred(
Runtime::kAllocateInOldPointerSpace, 1, instr);
} else {
CallRuntimeFromDeferred(
Runtime::kAllocateInNewSpace, 1, instr);
}
__ StoreToSafepointRegisterSlot(r0, result);
}
@ -5737,7 +5503,6 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
// Boilerplate already exists, constant elements are never accessed.
// Pass an empty fixed array.
__ mov(r1, Operand(isolate()->factory()->empty_fixed_array()));
__ Push(r3, r2, r1);
// Pick the right runtime function or stub to call.
int length = instr->hydrogen()->length();
@ -5748,8 +5513,10 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
FastCloneShallowArrayStub stub(mode, DONT_TRACK_ALLOCATION_SITE, length);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);
} else if (instr->hydrogen()->depth() > 1) {
__ Push(r3, r2, r1);
CallRuntime(Runtime::kCreateArrayLiteral, 3, instr);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ Push(r3, r2, r1);
CallRuntime(Runtime::kCreateArrayLiteralShallow, 3, instr);
} else {
FastCloneShallowArrayStub::Mode mode =
@ -5762,170 +5529,6 @@ void LCodeGen::DoArrayLiteral(LArrayLiteral* instr) {
}
void LCodeGen::EmitDeepCopy(Handle<JSObject> object,
Register result,
Register source,
int* offset,
AllocationSiteMode mode) {
ASSERT(!source.is(r2));
ASSERT(!result.is(r2));
bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
object->map()->CanTrackAllocationSite();
// Only elements backing stores for non-COW arrays need to be copied.
Handle<FixedArrayBase> elements(object->elements());
bool has_elements = elements->length() > 0 &&
elements->map() != isolate()->heap()->fixed_cow_array_map();
// Increase the offset so that subsequent objects end up right after
// this object and its backing store.
int object_offset = *offset;
int object_size = object->map()->instance_size();
int elements_size = has_elements ? elements->Size() : 0;
int elements_offset = *offset + object_size;
if (create_allocation_site_info) {
elements_offset += AllocationSiteInfo::kSize;
*offset += AllocationSiteInfo::kSize;
}
*offset += object_size + elements_size;
// Copy object header.
ASSERT(object->properties()->length() == 0);
int inobject_properties = object->map()->inobject_properties();
int header_size = object_size - inobject_properties * kPointerSize;
for (int i = 0; i < header_size; i += kPointerSize) {
if (has_elements && i == JSObject::kElementsOffset) {
__ add(r2, result, Operand(elements_offset));
} else {
__ ldr(r2, FieldMemOperand(source, i));
}
__ str(r2, FieldMemOperand(result, object_offset + i));
}
// Copy in-object properties.
for (int i = 0; i < inobject_properties; i++) {
int total_offset = object_offset + object->GetInObjectPropertyOffset(i);
Handle<Object> value = Handle<Object>(object->InObjectPropertyAt(i),
isolate());
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ add(r2, result, Operand(*offset));
__ str(r2, FieldMemOperand(result, total_offset));
__ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset,
DONT_TRACK_ALLOCATION_SITE);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
__ str(r2, FieldMemOperand(result, total_offset));
} else {
__ mov(r2, Operand(value));
__ str(r2, FieldMemOperand(result, total_offset));
}
}
// Build Allocation Site Info if desired
if (create_allocation_site_info) {
__ mov(r2, Operand(Handle<Map>(isolate()->heap()->
allocation_site_info_map())));
__ str(r2, FieldMemOperand(result, object_size));
__ str(source, FieldMemOperand(result, object_size + kPointerSize));
}
if (has_elements) {
// Copy elements backing store header.
__ LoadHeapObject(source, elements);
for (int i = 0; i < FixedArray::kHeaderSize; i += kPointerSize) {
__ ldr(r2, FieldMemOperand(source, i));
__ str(r2, FieldMemOperand(result, elements_offset + i));
}
// Copy elements backing store content.
int elements_length = has_elements ? elements->length() : 0;
if (elements->IsFixedDoubleArray()) {
Handle<FixedDoubleArray> double_array =
Handle<FixedDoubleArray>::cast(elements);
for (int i = 0; i < elements_length; i++) {
int64_t value = double_array->get_representation(i);
// We only support little endian mode...
int32_t value_low = static_cast<int32_t>(value & 0xFFFFFFFF);
int32_t value_high = static_cast<int32_t>(value >> 32);
int total_offset =
elements_offset + FixedDoubleArray::OffsetOfElementAt(i);
__ mov(r2, Operand(value_low));
__ str(r2, FieldMemOperand(result, total_offset));
__ mov(r2, Operand(value_high));
__ str(r2, FieldMemOperand(result, total_offset + 4));
}
} else if (elements->IsFixedArray()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
for (int i = 0; i < elements_length; i++) {
int total_offset = elements_offset + FixedArray::OffsetOfElementAt(i);
Handle<Object> value(fast_elements->get(i), isolate());
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
__ add(r2, result, Operand(*offset));
__ str(r2, FieldMemOperand(result, total_offset));
__ LoadHeapObject(source, value_object);
EmitDeepCopy(value_object, result, source, offset,
DONT_TRACK_ALLOCATION_SITE);
} else if (value->IsHeapObject()) {
__ LoadHeapObject(r2, Handle<HeapObject>::cast(value));
__ str(r2, FieldMemOperand(result, total_offset));
} else {
__ mov(r2, Operand(value));
__ str(r2, FieldMemOperand(result, total_offset));
}
}
} else {
UNREACHABLE();
}
}
}
void LCodeGen::DoFastLiteral(LFastLiteral* instr) {
int size = instr->hydrogen()->total_size();
ElementsKind boilerplate_elements_kind =
instr->hydrogen()->boilerplate()->GetElementsKind();
// Deopt if the array literal boilerplate ElementsKind is of a type different
// than the expected one. The check isn't necessary if the boilerplate has
// already been converted to TERMINAL_FAST_ELEMENTS_KIND.
if (CanTransitionToMoreGeneralFastElementsKind(
boilerplate_elements_kind, true)) {
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
// Load map into r2.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
// Load the map's "bit field 2".
__ ldrb(r2, FieldMemOperand(r2, Map::kBitField2Offset));
// Retrieve elements_kind from bit field 2.
__ ubfx(r2, r2, Map::kElementsKindShift, Map::kElementsKindBitCount);
__ cmp(r2, Operand(boilerplate_elements_kind));
DeoptimizeIf(ne, instr->environment());
}
// Allocate all objects that are part of the literal in one big
// allocation. This avoids multiple limit checks.
Label allocated, runtime_allocate;
__ Allocate(size, r0, r2, r3, &runtime_allocate, TAG_OBJECT);
__ jmp(&allocated);
__ bind(&runtime_allocate);
__ mov(r0, Operand(Smi::FromInt(size)));
__ push(r0);
CallRuntime(Runtime::kAllocateInNewSpace, 1, instr);
__ bind(&allocated);
int offset = 0;
__ LoadHeapObject(r1, instr->hydrogen()->boilerplate());
EmitDeepCopy(instr->hydrogen()->boilerplate(), r0, r1, &offset,
instr->hydrogen()->allocation_site_mode());
ASSERT_EQ(size, offset);
}
void LCodeGen::DoObjectLiteral(LObjectLiteral* instr) {
Handle<FixedArray> literals(instr->environment()->closure()->literals());
Handle<FixedArray> constant_properties =
@ -6002,17 +5605,8 @@ void LCodeGen::DoRegExpLiteral(LRegExpLiteral* instr) {
__ bind(&allocated);
// Copy the content into the newly allocated memory.
// (Unroll copy loop once for better throughput).
for (int i = 0; i < size - kPointerSize; i += 2 * kPointerSize) {
__ ldr(r3, FieldMemOperand(r1, i));
__ ldr(r2, FieldMemOperand(r1, i + kPointerSize));
__ str(r3, FieldMemOperand(r0, i));
__ str(r2, FieldMemOperand(r0, i + kPointerSize));
}
if ((size % (2 * kPointerSize)) != 0) {
__ ldr(r3, FieldMemOperand(r1, size - kPointerSize));
__ str(r3, FieldMemOperand(r0, size - kPointerSize));
}
__ CopyFields(r0, r1, double_scratch0(), double_scratch0().low(),
size / kPointerSize);
}
@ -6022,7 +5616,8 @@ void LCodeGen::DoFunctionLiteral(LFunctionLiteral* instr) {
Handle<SharedFunctionInfo> shared_info = instr->shared_info();
bool pretenure = instr->hydrogen()->pretenure();
if (!pretenure && shared_info->num_literals() == 0) {
FastNewClosureStub stub(shared_info->language_mode());
FastNewClosureStub stub(shared_info->language_mode(),
shared_info->is_generator());
__ mov(r1, Operand(shared_info));
__ push(r1);
CallCode(stub.GetCode(isolate()), RelocInfo::CODE_TARGET, instr);

14
deps/v8/src/arm/lithium-codegen-arm.h

@ -137,7 +137,7 @@ class LCodeGen BASE_EMBEDDED {
IntegerSignedness signedness);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredMathAbsTaggedHeapNumber(LMathAbs* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredRandom(LRandom* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
@ -294,17 +294,7 @@ class LCodeGen BASE_EMBEDDED {
Register ToRegister(int index) const;
DwVfpRegister ToDoubleRegister(int index) const;
// Specific math operations - used from DoUnaryMathOperation.
void EmitIntegerMathAbs(LUnaryMathOperation* instr);
void DoMathAbs(LUnaryMathOperation* instr);
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
void DoMathPowHalf(LUnaryMathOperation* instr);
void DoMathLog(LUnaryMathOperation* instr);
void DoMathTan(LUnaryMathOperation* instr);
void DoMathCos(LUnaryMathOperation* instr);
void DoMathSin(LUnaryMathOperation* instr);
void EmitIntegerMathAbs(LMathAbs* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,

11
deps/v8/src/arm/lithium-gap-resolver-arm.cc

@ -171,10 +171,8 @@ void LGapResolver::BreakCycle(int index) {
} else if (source->IsStackSlot()) {
__ ldr(kSavedValueRegister, cgen_->ToMemOperand(source));
} else if (source->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vmov(kScratchDoubleReg, cgen_->ToDoubleRegister(source));
} else if (source->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vldr(kScratchDoubleReg, cgen_->ToMemOperand(source));
} else {
UNREACHABLE();
@ -194,10 +192,8 @@ void LGapResolver::RestoreValue() {
} else if (saved_destination_->IsStackSlot()) {
__ str(kSavedValueRegister, cgen_->ToMemOperand(saved_destination_));
} else if (saved_destination_->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vmov(cgen_->ToDoubleRegister(saved_destination_), kScratchDoubleReg);
} else if (saved_destination_->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
__ vstr(kScratchDoubleReg, cgen_->ToMemOperand(saved_destination_));
} else {
UNREACHABLE();
@ -233,8 +229,7 @@ void LGapResolver::EmitMove(int index) {
MemOperand destination_operand = cgen_->ToMemOperand(destination);
if (in_cycle_) {
if (!destination_operand.OffsetIsUint12Encodable()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
// ip is overwritten while saving the value to the destination.
// ip is overwritten while saving the value to the destination.
// Therefore we can't use ip. It is OK if the read from the source
// destroys ip, since that happens before the value is read.
__ vldr(kScratchDoubleReg.low(), source_operand);
@ -272,7 +267,6 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleRegister()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
DwVfpRegister source_register = cgen_->ToDoubleRegister(source);
if (destination->IsDoubleRegister()) {
__ vmov(cgen_->ToDoubleRegister(destination), source_register);
@ -282,8 +276,7 @@ void LGapResolver::EmitMove(int index) {
}
} else if (source->IsDoubleStackSlot()) {
CpuFeatureScope scope(cgen_->masm(), VFP2);
MemOperand source_operand = cgen_->ToMemOperand(source);
MemOperand source_operand = cgen_->ToMemOperand(source);
if (destination->IsDoubleRegister()) {
__ vldr(cgen_->ToDoubleRegister(destination), source_operand);
} else {

235
deps/v8/src/arm/macro-assembler-arm.cc

@ -291,8 +291,6 @@ void MacroAssembler::Move(Register dst, Register src, Condition cond) {
void MacroAssembler::Move(DwVfpRegister dst, DwVfpRegister src) {
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatureScope scope(this, VFP2);
if (!dst.is(src)) {
vmov(dst, src);
}
@ -775,6 +773,23 @@ void MacroAssembler::Strd(Register src1, Register src2,
}
void MacroAssembler::VFPEnsureFPSCRState(Register scratch) {
// If needed, restore wanted bits of FPSCR.
Label fpscr_done;
vmrs(scratch);
tst(scratch, Operand(kVFPDefaultNaNModeControlBit));
b(ne, &fpscr_done);
orr(scratch, scratch, Operand(kVFPDefaultNaNModeControlBit));
vmsr(scratch);
bind(&fpscr_done);
}
void MacroAssembler::VFPCanonicalizeNaN(const DwVfpRegister value,
const Condition cond) {
vsub(value, value, kDoubleRegZero, cond);
}
void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
const Condition cond) {
@ -811,7 +826,6 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm,
const Register scratch) {
ASSERT(IsEnabled(VFP2));
static const DoubleRepresentation minus_zero(-0.0);
static const DoubleRepresentation zero(0.0);
DoubleRepresentation value(imm);
@ -873,7 +887,6 @@ void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
// Optionally save all double registers.
if (save_doubles) {
CpuFeatureScope scope(this, VFP2);
// Check CPU flags for number of registers, setting the Z condition flag.
CheckFor32DRegs(ip);
@ -938,7 +951,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
Register argument_count) {
// Optionally restore all double registers.
if (save_doubles) {
CpuFeatureScope scope(this, VFP2);
// Calculate the stack location of the saved doubles and restore them.
const int offset = 2 * kPointerSize;
sub(r3, fp,
@ -975,7 +987,6 @@ void MacroAssembler::LeaveExitFrame(bool save_doubles,
}
void MacroAssembler::GetCFunctionDoubleResult(const DwVfpRegister dst) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(dst, d0);
} else {
@ -1402,7 +1413,6 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
// TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
// that ip is clobbered as part of cmp with an object Operand.
push(holder_reg); // Temporarily save holder on the stack.
@ -1421,7 +1431,6 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Check the context is a native context.
if (emit_debug_code()) {
// TODO(119): avoid push(holder_reg)/pop(holder_reg)
// Cannot use ip as a temporary in this verification code. Due to the fact
// that ip is clobbered as part of cmp with an object Operand.
push(holder_reg); // Temporarily save holder on the stack.
@ -1991,7 +2000,7 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
Register scratch4,
Label* fail,
int elements_offset) {
Label smi_value, maybe_nan, have_double_value, is_nan, done;
Label smi_value, store;
Register mantissa_reg = scratch2;
Register exponent_reg = scratch3;
@ -2005,73 +2014,28 @@ void MacroAssembler::StoreNumberToDoubleElements(Register value_reg,
fail,
DONT_DO_SMI_CHECK);
// Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
// in the exponent.
mov(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
ldr(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
cmp(exponent_reg, scratch1);
b(ge, &maybe_nan);
ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
bind(&have_double_value);
add(scratch1, elements_reg,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
str(mantissa_reg, FieldMemOperand(
scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
sizeof(kHoleNanLower32);
str(exponent_reg, FieldMemOperand(scratch1, offset));
jmp(&done);
bind(&maybe_nan);
// Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise
// it's an Infinity, and the non-NaN code path applies.
b(gt, &is_nan);
ldr(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
cmp(mantissa_reg, Operand::Zero());
b(eq, &have_double_value);
bind(&is_nan);
// Load canonical NaN for storing into the double array.
uint64_t nan_int64 = BitCast<uint64_t>(
FixedDoubleArray::canonical_not_the_hole_nan_as_double());
mov(mantissa_reg, Operand(static_cast<uint32_t>(nan_int64)));
mov(exponent_reg, Operand(static_cast<uint32_t>(nan_int64 >> 32)));
jmp(&have_double_value);
vldr(d0, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
// Force a canonical NaN.
if (emit_debug_code()) {
vmrs(ip);
tst(ip, Operand(kVFPDefaultNaNModeControlBit));
Assert(ne, "Default NaN mode not set");
}
VFPCanonicalizeNaN(d0);
b(&store);
bind(&smi_value);
Register untagged_value = scratch1;
SmiUntag(untagged_value, value_reg);
FloatingPointHelper::ConvertIntToDouble(
this, untagged_value, FloatingPointHelper::kVFPRegisters, d0,
mantissa_reg, exponent_reg, scratch4, s2);
bind(&store);
add(scratch1, elements_reg,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
elements_offset));
add(scratch1, scratch1,
Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize));
// scratch1 is now effective address of the double element
FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(VFP2)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
Register untagged_value = elements_reg;
SmiUntag(untagged_value, value_reg);
FloatingPointHelper::ConvertIntToDouble(this,
untagged_value,
destination,
d0,
mantissa_reg,
exponent_reg,
scratch4,
s2);
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatureScope scope(this, VFP2);
vstr(d0, scratch1, 0);
} else {
str(mantissa_reg, MemOperand(scratch1, 0));
str(exponent_reg, MemOperand(scratch1, Register::kSizeInBytes));
}
bind(&done);
vstr(d0, FieldMemOperand(scratch1,
FixedDoubleArray::kHeaderSize - elements_offset));
}
@ -2425,9 +2389,6 @@ void MacroAssembler::SmiToDoubleVFPRegister(Register smi,
void MacroAssembler::TestDoubleIsInt32(DwVfpRegister double_input,
DwVfpRegister double_scratch) {
ASSERT(!double_input.is(double_scratch));
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatureScope scope(this, VFP2);
vcvt_s32_f64(double_scratch.low(), double_input);
vcvt_f64_s32(double_scratch, double_scratch.low());
VFPCompareAndSetFlags(double_input, double_scratch);
@ -2438,9 +2399,6 @@ void MacroAssembler::TryDoubleToInt32Exact(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch) {
ASSERT(!double_input.is(double_scratch));
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatureScope scope(this, VFP2);
vcvt_s32_f64(double_scratch.low(), double_input);
vmov(result, double_scratch.low());
vcvt_f64_s32(double_scratch, double_scratch.low());
@ -2456,8 +2414,6 @@ void MacroAssembler::TryInt32Floor(Register result,
Label* exact) {
ASSERT(!result.is(input_high));
ASSERT(!double_input.is(double_scratch));
ASSERT(CpuFeatures::IsSupported(VFP2));
CpuFeatureScope scope(this, VFP2);
Label negative, exception;
// Test for NaN and infinities.
@ -2502,26 +2458,18 @@ void MacroAssembler::ECMAConvertNumberToInt32(Register source,
Register scratch,
DwVfpRegister double_scratch1,
DwVfpRegister double_scratch2) {
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(this, VFP2);
vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset));
ECMAToInt32VFP(result, double_scratch1, double_scratch2,
scratch, input_high, input_low);
} else {
Ldrd(input_low, input_high,
FieldMemOperand(source, HeapNumber::kValueOffset));
ECMAToInt32NoVFP(result, scratch, input_high, input_low);
}
vldr(double_scratch1, FieldMemOperand(source, HeapNumber::kValueOffset));
ECMAToInt32(result, double_scratch1, double_scratch2,
scratch, input_high, input_low);
}
void MacroAssembler::ECMAToInt32VFP(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch,
Register scratch,
Register input_high,
Register input_low) {
CpuFeatureScope scope(this, VFP2);
void MacroAssembler::ECMAToInt32(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch,
Register scratch,
Register input_high,
Register input_low) {
ASSERT(!input_high.is(result));
ASSERT(!input_low.is(result));
ASSERT(!input_low.is(input_high));
@ -2561,58 +2509,6 @@ void MacroAssembler::ECMAToInt32VFP(Register result,
}
void MacroAssembler::ECMAToInt32NoVFP(Register result,
Register scratch,
Register input_high,
Register input_low) {
ASSERT(!result.is(scratch));
ASSERT(!result.is(input_high));
ASSERT(!result.is(input_low));
ASSERT(!scratch.is(input_high));
ASSERT(!scratch.is(input_low));
ASSERT(!input_high.is(input_low));
Label both, out_of_range, negate, done;
Ubfx(scratch, input_high,
HeapNumber::kExponentShift, HeapNumber::kExponentBits);
// Load scratch with exponent.
sub(scratch, scratch, Operand(HeapNumber::kExponentBias));
// If exponent is negative, 0 < input < 1, the result is 0.
// If exponent is greater than or equal to 84, the 32 less significant
// bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits),
// the result is 0.
// This test also catch Nan and infinities which also return 0.
cmp(scratch, Operand(84));
// We do an unsigned comparison so negative numbers are treated as big
// positive number and the two tests above are done in one test.
b(hs, &out_of_range);
// Load scratch with 20 - exponent.
rsb(scratch, scratch, Operand(20), SetCC);
b(mi, &both);
// Test 0 and -0.
bic(result, input_high, Operand(HeapNumber::kSignMask));
orr(result, result, Operand(input_low), SetCC);
b(eq, &done);
// 0 <= exponent <= 20, shift only input_high.
// Scratch contains: 20 - exponent.
Ubfx(result, input_high,
0, HeapNumber::kMantissaBitsInTopWord);
// Set the implicit 1 before the mantissa part in input_high.
orr(result, result, Operand(1 << HeapNumber::kMantissaBitsInTopWord));
mov(result, Operand(result, LSR, scratch));
b(&negate);
bind(&both);
// Restore scratch to exponent - 1 to be consistent with ECMAToInt32VFP.
rsb(scratch, scratch, Operand(19));
ECMAToInt32Tail(result, scratch, input_high, input_low,
&out_of_range, &negate, &done);
}
void MacroAssembler::ECMAToInt32Tail(Register result,
Register scratch,
Register input_high,
@ -2715,10 +2611,7 @@ void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) {
const Runtime::Function* function = Runtime::FunctionForId(id);
mov(r0, Operand(function->nargs));
mov(r1, Operand(ExternalReference(function, isolate())));
SaveFPRegsMode mode = CpuFeatures::IsSupported(VFP2)
? kSaveFPRegs
: kDontSaveFPRegs;
CEntryStub stub(1, mode);
CEntryStub stub(1, kSaveFPRegs);
CallStub(&stub);
}
@ -3244,27 +3137,24 @@ void MacroAssembler::AllocateHeapNumberWithValue(Register result,
// Copies a fixed number of fields of heap objects from src to dst.
void MacroAssembler::CopyFields(Register dst,
Register src,
RegList temps,
DwVfpRegister double_scratch,
SwVfpRegister single_scratch,
int field_count) {
// At least one bit set in the first 15 registers.
ASSERT((temps & ((1 << 15) - 1)) != 0);
ASSERT((temps & dst.bit()) == 0);
ASSERT((temps & src.bit()) == 0);
// Primitive implementation using only one temporary register.
Register tmp = no_reg;
// Find a temp register in temps list.
for (int i = 0; i < 15; i++) {
if ((temps & (1 << i)) != 0) {
tmp.set_code(i);
break;
}
int double_count = field_count / (DwVfpRegister::kSizeInBytes / kPointerSize);
for (int i = 0; i < double_count; i++) {
vldr(double_scratch, FieldMemOperand(src, i * DwVfpRegister::kSizeInBytes));
vstr(double_scratch, FieldMemOperand(dst, i * DwVfpRegister::kSizeInBytes));
}
ASSERT(!tmp.is(no_reg));
for (int i = 0; i < field_count; i++) {
ldr(tmp, FieldMemOperand(src, i * kPointerSize));
str(tmp, FieldMemOperand(dst, i * kPointerSize));
STATIC_ASSERT(SwVfpRegister::kSizeInBytes == kPointerSize);
STATIC_ASSERT(2 * SwVfpRegister::kSizeInBytes == DwVfpRegister::kSizeInBytes);
int remain = field_count % (DwVfpRegister::kSizeInBytes / kPointerSize);
if (remain != 0) {
vldr(single_scratch,
FieldMemOperand(src, (field_count - 1) * kPointerSize));
vstr(single_scratch,
FieldMemOperand(dst, (field_count - 1) * kPointerSize));
}
}
@ -3463,7 +3353,6 @@ void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(d0, dreg);
} else {
@ -3474,7 +3363,6 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg) {
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
DwVfpRegister dreg2) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
if (dreg2.is(d0)) {
ASSERT(!dreg1.is(d1));
@ -3493,7 +3381,6 @@ void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg1,
void MacroAssembler::SetCallCDoubleArguments(DwVfpRegister dreg,
Register reg) {
ASSERT(CpuFeatures::IsSupported(VFP2));
if (use_eabi_hardfloat()) {
Move(d0, dreg);
Move(r0, reg);

43
deps/v8/src/arm/macro-assembler-arm.h

@ -460,6 +460,19 @@ class MacroAssembler: public Assembler {
const MemOperand& dst,
Condition cond = al);
// Ensure that FPSCR contains values needed by JavaScript.
// We need the NaNModeControlBit to be sure that operations like
// vadd and vsub generate the Canonical NaN (if a NaN must be generated).
// In VFP3 it will be always the Canonical NaN.
// In VFP2 it will be either the Canonical NaN or the negative version
// of the Canonical NaN. It doesn't matter if we have two values. The aim
// is to be sure to never generate the hole NaN.
void VFPEnsureFPSCRState(Register scratch);
// If the value is a NaN, canonicalize the value else, do nothing.
void VFPCanonicalizeNaN(const DwVfpRegister value,
const Condition cond = al);
// Compare double values and move the result to the normal condition flags.
void VFPCompareAndSetFlags(const DwVfpRegister src1,
const DwVfpRegister src2,
@ -743,7 +756,11 @@ class MacroAssembler: public Assembler {
Label* gc_required);
// Copies a fixed number of fields of heap objects from src to dst.
void CopyFields(Register dst, Register src, RegList temps, int field_count);
void CopyFields(Register dst,
Register src,
DwVfpRegister double_scratch,
SwVfpRegister single_scratch,
int field_count);
// Copies a number of bytes from src to dst. All registers are clobbered. On
// exit src and dst will point to the place just after where the last byte was
@ -969,20 +986,12 @@ class MacroAssembler: public Assembler {
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer and all other registers clobbered.
void ECMAToInt32VFP(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch,
Register scratch,
Register input_high,
Register input_low);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer.
void ECMAToInt32NoVFP(Register result,
Register scratch,
Register input_high,
Register input_low);
void ECMAToInt32(Register result,
DwVfpRegister double_input,
DwVfpRegister double_scratch,
Register scratch,
Register input_high,
Register input_low);
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer
@ -1140,7 +1149,9 @@ class MacroAssembler: public Assembler {
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
#if USE_EABI_HARDFLOAT
#ifdef __arm__
return OS::ArmUsingHardFloat();
#elif USE_EABI_HARDFLOAT
return true;
#else
return false;

211
deps/v8/src/arm/simulator-arm.cc

@ -721,7 +721,7 @@ void Simulator::CheckICache(v8::internal::HashMap* i_cache,
Instruction::kInstrSize) == 0);
} else {
// Cache miss. Load memory into the cache.
memcpy(cached_line, line, CachePage::kLineLength);
OS::MemCopy(cached_line, line, CachePage::kLineLength);
*cache_valid_byte = CachePage::LINE_VALID;
}
}
@ -773,6 +773,7 @@ Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
c_flag_FPSCR_ = false;
v_flag_FPSCR_ = false;
FPSCR_rounding_mode_ = RZ;
FPSCR_default_NaN_mode_ = true;
inv_op_vfp_flag_ = false;
div_zero_vfp_flag_ = false;
@ -902,8 +903,8 @@ double Simulator::get_double_from_register_pair(int reg) {
// Read the bits from the unsigned integer register_[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(vfp_registers_[0])];
memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
OS::MemCopy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
OS::MemCopy(&dm_val, buffer, 2 * sizeof(registers_[0]));
return(dm_val);
}
@ -953,9 +954,9 @@ void Simulator::SetVFPRegister(int reg_index, const InputType& value) {
if (register_size == 2) ASSERT(reg_index < DwVfpRegister::NumRegisters());
char buffer[register_size * sizeof(vfp_registers_[0])];
memcpy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
memcpy(&vfp_registers_[reg_index * register_size], buffer,
register_size * sizeof(vfp_registers_[0]));
OS::MemCopy(buffer, &value, register_size * sizeof(vfp_registers_[0]));
OS::MemCopy(&vfp_registers_[reg_index * register_size], buffer,
register_size * sizeof(vfp_registers_[0]));
}
@ -967,64 +968,34 @@ ReturnType Simulator::GetFromVFPRegister(int reg_index) {
ReturnType value = 0;
char buffer[register_size * sizeof(vfp_registers_[0])];
memcpy(buffer, &vfp_registers_[register_size * reg_index],
register_size * sizeof(vfp_registers_[0]));
memcpy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
OS::MemCopy(buffer, &vfp_registers_[register_size * reg_index],
register_size * sizeof(vfp_registers_[0]));
OS::MemCopy(&value, buffer, register_size * sizeof(vfp_registers_[0]));
return value;
}
// For use in calls that take two double values, constructed either
// Runtime FP routines take up to two double arguments and zero
// or one integer arguments. All are constructed here,
// from r0-r3 or d0 and d1.
void Simulator::GetFpArgs(double* x, double* y) {
void Simulator::GetFpArgs(double* x, double* y, int32_t* z) {
if (use_eabi_hardfloat()) {
*x = vfp_registers_[0];
*y = vfp_registers_[1];
*z = registers_[1];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
memcpy(buffer, registers_, sizeof(*x));
memcpy(x, buffer, sizeof(*x));
OS::MemCopy(buffer, registers_, sizeof(*x));
OS::MemCopy(x, buffer, sizeof(*x));
// Registers 2 and 3 -> y.
memcpy(buffer, registers_ + 2, sizeof(*y));
memcpy(y, buffer, sizeof(*y));
}
}
// For use in calls that take one double value, constructed either
// from r0 and r1 or d0.
void Simulator::GetFpArgs(double* x) {
if (use_eabi_hardfloat()) {
*x = vfp_registers_[0];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
memcpy(buffer, registers_, sizeof(*x));
memcpy(x, buffer, sizeof(*x));
}
}
// For use in calls that take one double value constructed either
// from r0 and r1 or d0 and one integer value.
void Simulator::GetFpArgs(double* x, int32_t* y) {
if (use_eabi_hardfloat()) {
*x = vfp_registers_[0];
*y = registers_[1];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
memcpy(buffer, registers_, sizeof(*x));
memcpy(x, buffer, sizeof(*x));
// Register 2 -> y.
memcpy(buffer, registers_ + 2, sizeof(*y));
memcpy(y, buffer, sizeof(*y));
OS::MemCopy(buffer, registers_ + 2, sizeof(*y));
OS::MemCopy(y, buffer, sizeof(*y));
// Register 2 -> z.
OS::MemCopy(buffer, registers_ + 2, sizeof(*z));
OS::MemCopy(z, buffer, sizeof(*z));
}
}
@ -1033,14 +1004,14 @@ void Simulator::GetFpArgs(double* x, int32_t* y) {
void Simulator::SetFpResult(const double& result) {
if (use_eabi_hardfloat()) {
char buffer[2 * sizeof(vfp_registers_[0])];
memcpy(buffer, &result, sizeof(buffer));
OS::MemCopy(buffer, &result, sizeof(buffer));
// Copy result to d0.
memcpy(vfp_registers_, buffer, sizeof(buffer));
OS::MemCopy(vfp_registers_, buffer, sizeof(buffer));
} else {
char buffer[2 * sizeof(registers_[0])];
memcpy(buffer, &result, sizeof(buffer));
OS::MemCopy(buffer, &result, sizeof(buffer));
// Copy result to r0 and r1.
memcpy(registers_, buffer, sizeof(buffer));
OS::MemCopy(registers_, buffer, sizeof(buffer));
}
}
@ -1619,12 +1590,12 @@ void Simulator::HandleVList(Instruction* instr) {
ReadW(reinterpret_cast<int32_t>(address + 1), instr)
};
double d;
memcpy(&d, data, 8);
OS::MemCopy(&d, data, 8);
set_d_register_from_double(reg, d);
} else {
int32_t data[2];
double d = get_double_from_d_register(reg);
memcpy(data, &d, 8);
OS::MemCopy(data, &d, 8);
WriteW(reinterpret_cast<int32_t>(address), data[0], instr);
WriteW(reinterpret_cast<int32_t>(address + 1), data[1], instr);
}
@ -1647,10 +1618,12 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg3,
int32_t arg4,
int32_t arg5);
typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
int32_t arg1,
int32_t arg2,
int32_t arg3);
// These prototypes handle the four types of FP calls.
typedef int64_t (*SimulatorRuntimeCompareCall)(double darg0, double darg1);
typedef double (*SimulatorRuntimeFPFPCall)(double darg0, double darg1);
typedef double (*SimulatorRuntimeFPCall)(double darg0);
typedef double (*SimulatorRuntimeFPIntCall)(double darg0, int32_t arg0);
// This signature supports direct call in to API function native callback
// (refer to InvocationCallback in v8.h).
@ -1716,27 +1689,27 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
intptr_t external =
reinterpret_cast<intptr_t>(redirection->external_function());
if (fp_call) {
double dval0, dval1; // one or two double parameters
int32_t ival; // zero or one integer parameters
int64_t iresult = 0; // integer return value
double dresult = 0; // double return value
GetFpArgs(&dval0, &dval1, &ival);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external);
double dval0, dval1;
int32_t ival;
SimulatorRuntimeCall generic_target =
reinterpret_cast<SimulatorRuntimeCall>(external);
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
GetFpArgs(&dval0, &dval1);
PrintF("Call to host function at %p with args %f, %f",
FUNCTION_ADDR(target), dval0, dval1);
FUNCTION_ADDR(generic_target), dval0, dval1);
break;
case ExternalReference::BUILTIN_FP_CALL:
GetFpArgs(&dval0);
PrintF("Call to host function at %p with arg %f",
FUNCTION_ADDR(target), dval0);
FUNCTION_ADDR(generic_target), dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
GetFpArgs(&dval0, &ival);
PrintF("Call to host function at %p with args %f, %d",
FUNCTION_ADDR(target), dval0, ival);
FUNCTION_ADDR(generic_target), dval0, ival);
break;
default:
UNREACHABLE();
@ -1748,22 +1721,54 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
PrintF("\n");
}
CHECK(stack_aligned);
if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
switch (redirection->type()) {
case ExternalReference::BUILTIN_COMPARE_CALL: {
SimulatorRuntimeCompareCall target =
reinterpret_cast<SimulatorRuntimeCompareCall>(external);
iresult = target(dval0, dval1);
set_register(r0, static_cast<int32_t>(iresult));
set_register(r1, static_cast<int32_t>(iresult >> 32));
break;
}
case ExternalReference::BUILTIN_FP_FP_CALL: {
SimulatorRuntimeFPFPCall target =
reinterpret_cast<SimulatorRuntimeFPFPCall>(external);
dresult = target(dval0, dval1);
SetFpResult(dresult);
break;
}
case ExternalReference::BUILTIN_FP_CALL: {
SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external);
double result = target(arg0, arg1, arg2, arg3);
SetFpResult(result);
} else {
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %08x\n", lo_res);
reinterpret_cast<SimulatorRuntimeFPCall>(external);
dresult = target(dval0);
SetFpResult(dresult);
break;
}
case ExternalReference::BUILTIN_FP_INT_CALL: {
SimulatorRuntimeFPIntCall target =
reinterpret_cast<SimulatorRuntimeFPIntCall>(external);
dresult = target(dval0, ival);
SetFpResult(dresult);
break;
}
default:
UNREACHABLE();
break;
}
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
switch (redirection->type()) {
case ExternalReference::BUILTIN_COMPARE_CALL:
PrintF("Returned %08x\n", static_cast<int32_t>(iresult));
break;
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_FP_CALL:
case ExternalReference::BUILTIN_FP_INT_CALL:
PrintF("Returned %f\n", dresult);
break;
default:
UNREACHABLE();
break;
}
set_register(r0, lo_res);
set_register(r1, hi_res);
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
SimulatorRuntimeDirectApiCall target =
@ -1864,6 +1869,11 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
}
double Simulator::canonicalizeNaN(double value) {
return (FPSCR_default_NaN_mode_ && isnan(value)) ?
FixedDoubleArray::canonical_not_the_hole_nan_as_double() : value;
}
// Stop helper functions.
bool Simulator::isStopInstruction(Instruction* instr) {
return (instr->Bits(27, 24) == 0xF) && (instr->SvcValue() >= kStopCode);
@ -2724,11 +2734,13 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
// vabs
double dm_value = get_double_from_d_register(vm);
double dd_value = fabs(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
// vneg
double dm_value = get_double_from_d_register(vm);
double dd_value = -dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
@ -2744,6 +2756,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
// vsqrt
double dm_value = get_double_from_d_register(vm);
double dd_value = sqrt(dm_value);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if (instr->Opc3Value() == 0x0) {
// vmov immediate.
@ -2765,12 +2778,14 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value - dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
// vadd
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value + dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
}
} else if ((instr->Opc1Value() == 0x2) && !(instr->Opc3Value() & 0x1)) {
@ -2782,6 +2797,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dn_value = get_double_from_d_register(vn);
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value * dm_value;
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc1Value() == 0x0)) {
// vmla, vmls
@ -2799,9 +2815,13 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
// result with too high precision.
set_d_register_from_double(vd, dn_val * dm_val);
if (is_vmls) {
set_d_register_from_double(vd, dd_val - get_double_from_d_register(vd));
set_d_register_from_double(
vd,
canonicalizeNaN(dd_val - get_double_from_d_register(vd)));
} else {
set_d_register_from_double(vd, dd_val + get_double_from_d_register(vd));
set_d_register_from_double(
vd,
canonicalizeNaN(dd_val + get_double_from_d_register(vd)));
}
} else if ((instr->Opc1Value() == 0x4) && !(instr->Opc3Value() & 0x1)) {
// vdiv
@ -2813,6 +2833,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dm_value = get_double_from_d_register(vm);
double dd_value = dn_value / dm_value;
div_zero_vfp_flag_ = (dm_value == 0);
dd_value = canonicalizeNaN(dd_value);
set_d_register_from_double(vd, dd_value);
} else {
UNIMPLEMENTED(); // Not used by V8.
@ -2828,9 +2849,9 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
int vd = instr->Bits(19, 16) | (instr->Bit(7) << 4);
double dd_value = get_double_from_d_register(vd);
int32_t data[2];
memcpy(data, &dd_value, 8);
OS::MemCopy(data, &dd_value, 8);
data[instr->Bit(21)] = get_register(instr->RtValue());
memcpy(&dd_value, data, 8);
OS::MemCopy(&dd_value, data, 8);
set_d_register_from_double(vd, dd_value);
} else if ((instr->VLValue() == 0x1) &&
(instr->VCValue() == 0x0) &&
@ -2846,6 +2867,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
(z_flag_FPSCR_ << 30) |
(c_flag_FPSCR_ << 29) |
(v_flag_FPSCR_ << 28) |
(FPSCR_default_NaN_mode_ << 25) |
(inexact_vfp_flag_ << 4) |
(underflow_vfp_flag_ << 3) |
(overflow_vfp_flag_ << 2) |
@ -2868,6 +2890,7 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
z_flag_FPSCR_ = (rt_value >> 30) & 1;
c_flag_FPSCR_ = (rt_value >> 29) & 1;
v_flag_FPSCR_ = (rt_value >> 28) & 1;
FPSCR_default_NaN_mode_ = (rt_value >> 25) & 1;
inexact_vfp_flag_ = (rt_value >> 4) & 1;
underflow_vfp_flag_ = (rt_value >> 3) & 1;
overflow_vfp_flag_ = (rt_value >> 2) & 1;
@ -3179,13 +3202,13 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
if (instr->HasL()) {
int32_t data[2];
double d = get_double_from_d_register(vm);
memcpy(data, &d, 8);
OS::MemCopy(data, &d, 8);
set_register(rt, data[0]);
set_register(rn, data[1]);
} else {
int32_t data[] = { get_register(rt), get_register(rn) };
double d;
memcpy(&d, data, 8);
OS::MemCopy(&d, data, 8);
set_d_register_from_double(vm, d);
}
}
@ -3208,13 +3231,13 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
ReadW(address + 4, instr)
};
double val;
memcpy(&val, data, 8);
OS::MemCopy(&val, data, 8);
set_d_register_from_double(vd, val);
} else {
// Store double to memory: vstr.
int32_t data[2];
double val = get_double_from_d_register(vd);
memcpy(data, &val, 8);
OS::MemCopy(data, &val, 8);
WriteW(address, data[0], instr);
WriteW(address + 4, data[1], instr);
}
@ -3437,9 +3460,9 @@ double Simulator::CallFP(byte* entry, double d0, double d1) {
} else {
int buffer[2];
ASSERT(sizeof(buffer[0]) * 2 == sizeof(d0));
memcpy(buffer, &d0, sizeof(d0));
OS::MemCopy(buffer, &d0, sizeof(d0));
set_dw_register(0, buffer);
memcpy(buffer, &d1, sizeof(d1));
OS::MemCopy(buffer, &d1, sizeof(d1));
set_dw_register(2, buffer);
}
CallInternal(entry);

8
deps/v8/src/arm/simulator-arm.h

@ -274,6 +274,7 @@ class Simulator {
// Support for VFP.
void Compute_FPSCR_Flags(double val1, double val2);
void Copy_FPSCR_to_APSR();
inline double canonicalizeNaN(double value);
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instruction* instr, bool* carry_out);
@ -347,10 +348,8 @@ class Simulator {
void* external_function,
v8::internal::ExternalReference::Type type);
// For use in calls that take double value arguments.
void GetFpArgs(double* x, double* y);
void GetFpArgs(double* x);
void GetFpArgs(double* x, int32_t* y);
// Handle arguments and return value for runtime FP functions.
void GetFpArgs(double* x, double* y, int32_t* z);
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
@ -381,6 +380,7 @@ class Simulator {
// VFP rounding mode. See ARM DDI 0406B Page A2-29.
VFPRoundingMode FPSCR_rounding_mode_;
bool FPSCR_default_NaN_mode_;
// VFP FP exception flags architecture state.
bool inv_op_vfp_flag_;

611
deps/v8/src/arm/stub-cache-arm.cc

@ -417,30 +417,48 @@ void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
}
// Generate StoreField code, value is passed in r0 register.
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
static void GenerateCheckPropertyCell(MacroAssembler* masm,
Handle<GlobalObject> global,
Handle<Name> name,
Register scratch,
Label* miss) {
Handle<JSGlobalPropertyCell> cell =
GlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
__ mov(scratch, Operand(cell));
__ ldr(scratch,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
__ b(ne, miss);
}
// Generate StoreTransition code, value is passed in r0 register.
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object,
LookupResult* lookup,
Handle<Map> transition,
Handle<Name> name,
Register receiver_reg,
Register name_reg,
Register value_reg,
Register scratch1,
Register scratch2,
Label* miss_label,
Label* miss_restore_name) {
void StubCompiler::GenerateStoreTransition(MacroAssembler* masm,
Handle<JSObject> object,
LookupResult* lookup,
Handle<Map> transition,
Handle<Name> name,
Register receiver_reg,
Register name_reg,
Register value_reg,
Register scratch1,
Register scratch2,
Label* miss_label,
Label* miss_restore_name) {
// r0 : value
Label exit;
// Check that the map of the object hasn't changed.
CompareMapMode mode = transition.is_null() ? ALLOW_ELEMENT_TRANSITION_MAPS
: REQUIRE_EXACT_MAP;
__ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
DO_SMI_CHECK, mode);
DO_SMI_CHECK, REQUIRE_EXACT_MAP);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
@ -448,7 +466,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
}
// Check that we are allowed to write this.
if (!transition.is_null() && object->GetPrototype()->IsJSObject()) {
if (object->GetPrototype()->IsJSObject()) {
JSObject* holder;
// holder == object indicates that no property was found.
if (lookup->holder() != *object) {
@ -466,12 +484,18 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
// If no property was found, and the holder (the last object in the
// prototype chain) is in slow mode, we need to do a negative lookup on the
// holder.
if (lookup->holder() == *object &&
!holder->HasFastProperties() &&
!holder->IsJSGlobalProxy() &&
!holder->IsJSGlobalObject()) {
GenerateDictionaryNegativeLookup(
masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
if (lookup->holder() == *object) {
if (holder->IsJSGlobalObject()) {
GenerateCheckPropertyCell(
masm,
Handle<GlobalObject>(GlobalObject::cast(holder)),
name,
scratch1,
miss_restore_name);
} else if (!holder->HasFastProperties() && !holder->IsJSGlobalProxy()) {
GenerateDictionaryNegativeLookup(
masm, miss_restore_name, holder_reg, name, scratch1, scratch2);
}
}
}
@ -480,7 +504,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
// Perform map transition for the receiver if necessary.
if (!transition.is_null() && (object->map()->unused_property_fields() == 0)) {
if (object->map()->unused_property_fields() == 0) {
// The properties must be extended before we can store the value.
// We jump to a runtime call that extends the properties array.
__ push(receiver_reg);
@ -494,33 +518,113 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
return;
}
int index;
if (!transition.is_null()) {
// Update the map of the object.
__ mov(scratch1, Operand(transition));
__ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
// Update the map of the object.
__ mov(scratch1, Operand(transition));
__ str(scratch1, FieldMemOperand(receiver_reg, HeapObject::kMapOffset));
// Update the write barrier for the map field and pass the now unused
// name_reg as scratch register.
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
name_reg,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
int index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
// Update the write barrier for the map field and pass the now unused
// name_reg as scratch register.
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
__ str(value_reg, FieldMemOperand(receiver_reg, offset));
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
// Update the write barrier for the array address.
// Pass the now unused name_reg as a scratch register.
__ mov(name_reg, value_reg);
__ RecordWriteField(receiver_reg,
HeapObject::kMapOffset,
scratch1,
offset,
name_reg,
scratch1,
kLRHasNotBeenSaved,
kDontSaveFPRegs,
OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
index = transition->instance_descriptors()->GetFieldIndex(
transition->LastAdded());
kDontSaveFPRegs);
} else {
index = lookup->GetFieldIndex().field_index();
// Write to the properties array.
int offset = index * kPointerSize + FixedArray::kHeaderSize;
// Get the properties array
__ ldr(scratch1,
FieldMemOperand(receiver_reg, JSObject::kPropertiesOffset));
__ str(value_reg, FieldMemOperand(scratch1, offset));
// Skip updating write barrier if storing a smi.
__ JumpIfSmi(value_reg, &exit);
// Update the write barrier for the array address.
// Ok to clobber receiver_reg and name_reg, since we return.
__ mov(name_reg, value_reg);
__ RecordWriteField(scratch1,
offset,
name_reg,
receiver_reg,
kLRHasNotBeenSaved,
kDontSaveFPRegs);
}
// Return the value (register r0).
ASSERT(value_reg.is(r0));
__ bind(&exit);
__ Ret();
}
// Generate StoreField code, value is passed in r0 register.
// When leaving generated code after success, the receiver_reg and name_reg
// may be clobbered. Upon branch to miss_label, the receiver and name
// registers have their original values.
void StubCompiler::GenerateStoreField(MacroAssembler* masm,
Handle<JSObject> object,
LookupResult* lookup,
Register receiver_reg,
Register name_reg,
Register value_reg,
Register scratch1,
Register scratch2,
Label* miss_label) {
// r0 : value
Label exit;
// Check that the map of the object hasn't changed.
__ CheckMap(receiver_reg, scratch1, Handle<Map>(object->map()), miss_label,
DO_SMI_CHECK, ALLOW_ELEMENT_TRANSITION_MAPS);
// Perform global security token check if needed.
if (object->IsJSGlobalProxy()) {
__ CheckAccessGlobalProxy(receiver_reg, scratch1, miss_label);
}
// Stub never generated for non-global objects that require access
// checks.
ASSERT(object->IsJSGlobalProxy() || !object->IsAccessCheckNeeded());
int index = lookup->GetFieldIndex().field_index();
// Adjust for the number of properties stored in the object. Even in the
// face of a transition we can use the old map here because the size of the
// object and the number of in-object properties is not going to change.
index -= object->map()->inobject_properties();
// TODO(verwaest): Share this code as a code stub.
if (index < 0) {
// Set the property straight into the object.
int offset = object->map()->instance_size() + (index * kPointerSize);
@ -926,26 +1030,6 @@ class CallInterceptorCompiler BASE_EMBEDDED {
};
// Generate code to check that a global property cell is empty. Create
// the property cell at compilation time if no cell exists for the
// property.
static void GenerateCheckPropertyCell(MacroAssembler* masm,
Handle<GlobalObject> global,
Handle<Name> name,
Register scratch,
Label* miss) {
Handle<JSGlobalPropertyCell> cell =
GlobalObject::EnsurePropertyCell(global, name);
ASSERT(cell->value()->IsTheHole());
__ mov(scratch, Operand(cell));
__ ldr(scratch,
FieldMemOperand(scratch, JSGlobalPropertyCell::kValueOffset));
__ LoadRoot(ip, Heap::kTheHoleValueRootIndex);
__ cmp(scratch, ip);
__ b(ne, miss);
}
// Calls GenerateCheckPropertyCell for each global object in the prototype chain
// from object to (but not including) holder.
static void GenerateCheckPropertyCells(MacroAssembler* masm,
@ -975,66 +1059,11 @@ static void StoreIntAsFloat(MacroAssembler* masm,
Register dst,
Register wordoffset,
Register ival,
Register fval,
Register scratch1,
Register scratch2) {
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm, VFP2);
__ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0);
__ vstr(s0, scratch1, 0);
} else {
Label not_special, done;
// Move sign bit from source to destination. This works because the sign
// bit in the exponent word of the double has the same position and polarity
// as the 2's complement sign bit in a Smi.
ASSERT(kBinary32SignMask == 0x80000000u);
__ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
// Negate value if it is negative.
__ rsb(ival, ival, Operand::Zero(), LeaveCC, ne);
// We have -1, 0 or 1, which we treat specially. Register ival contains
// absolute value: it is either equal to 1 (special case of -1 and 1),
// greater than 1 (not a special case) or less than 1 (special case of 0).
__ cmp(ival, Operand(1));
__ b(gt, &not_special);
// For 1 or -1 we need to or in the 0 exponent (biased).
static const uint32_t exponent_word_for_1 =
kBinary32ExponentBias << kBinary32ExponentShift;
__ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
__ b(&done);
__ bind(&not_special);
// Count leading zeros.
// Gets the wrong answer for 0, but we already checked for that case above.
Register zeros = scratch2;
__ CountLeadingZeros(zeros, ival, scratch1);
// Compute exponent and or it into the exponent register.
__ rsb(scratch1,
zeros,
Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
__ orr(fval,
fval,
Operand(scratch1, LSL, kBinary32ExponentShift));
// Shift up the source chopping the top bit off.
__ add(zeros, zeros, Operand(1));
// This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
__ mov(ival, Operand(ival, LSL, zeros));
// And the top (top 20 bits).
__ orr(fval,
fval,
Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
__ bind(&done);
__ str(fval, MemOperand(dst, wordoffset, LSL, 2));
}
Register scratch1) {
__ vmov(s0, ival);
__ add(scratch1, dst, Operand(wordoffset, LSL, 2));
__ vcvt_f32_s32(s0, s0);
__ vstr(s0, scratch1, 0);
}
@ -1225,7 +1254,7 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
Handle<GlobalObject> global) {
Label miss;
Register reg = HandlerFrontendHeader(object, receiver(), last, name, &miss);
HandlerFrontendHeader(object, receiver(), last, name, &miss);
// If the last object in the prototype chain is a global object,
// check that the global property cell is empty.
@ -1233,13 +1262,6 @@ void BaseLoadStubCompiler::NonexistentHandlerFrontend(
GenerateCheckPropertyCell(masm(), global, name, scratch2(), &miss);
}
if (!last->HasFastProperties()) {
__ ldr(scratch2(), FieldMemOperand(reg, HeapObject::kMapOffset));
__ ldr(scratch2(), FieldMemOperand(scratch2(), Map::kPrototypeOffset));
__ cmp(scratch2(), Operand(isolate()->factory()->null_value()));
__ b(ne, &miss);
}
HandlerFrontendFooter(success, &miss);
}
@ -1599,7 +1621,7 @@ Handle<Code> CallStubCompiler::CompileArrayPushCall(
__ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize));
__ StoreNumberToDoubleElements(
r4, r0, elements, r3, r5, r2, r9,
r4, r0, elements, r5, r2, r3, r9,
&call_builtin, argc * kDoubleSize);
// Save new length.
@ -2089,11 +2111,6 @@ Handle<Code> CallStubCompiler::CompileMathFloorCall(
// -- sp[argc * 4] : receiver
// -----------------------------------
if (!CpuFeatures::IsSupported(VFP2)) {
return Handle<Code>::null();
}
CpuFeatureScope scope_vfp2(masm(), VFP2);
const int argc = arguments().immediate();
// If the object is not a JSObject or we got an unexpected number of
// arguments, bail out to the regular call.
@ -3133,36 +3150,6 @@ void KeyedLoadStubCompiler::GenerateLoadDictionaryElement(
}
static bool IsElementTypeSigned(ElementsKind elements_kind) {
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_INT_ELEMENTS:
return true;
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
case EXTERNAL_PIXEL_ELEMENTS:
return false;
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
return false;
}
return false;
}
static void GenerateSmiKeyCheck(MacroAssembler* masm,
Register key,
Register scratch0,
@ -3170,29 +3157,23 @@ static void GenerateSmiKeyCheck(MacroAssembler* masm,
DwVfpRegister double_scratch0,
DwVfpRegister double_scratch1,
Label* fail) {
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm, VFP2);
Label key_ok;
// Check for smi or a smi inside a heap number. We convert the heap
// number and check if the conversion is exact and fits into the smi
// range.
__ JumpIfSmi(key, &key_ok);
__ CheckMap(key,
scratch0,
Heap::kHeapNumberMapRootIndex,
fail,
DONT_DO_SMI_CHECK);
__ sub(ip, key, Operand(kHeapObjectTag));
__ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1);
__ b(ne, fail);
__ TrySmiTag(scratch0, fail, scratch1);
__ mov(key, scratch0);
__ bind(&key_ok);
} else {
// Check that the key is a smi.
__ JumpIfNotSmi(key, fail);
}
Label key_ok;
// Check for smi or a smi inside a heap number. We convert the heap
// number and check if the conversion is exact and fits into the smi
// range.
__ JumpIfSmi(key, &key_ok);
__ CheckMap(key,
scratch0,
Heap::kHeapNumberMapRootIndex,
fail,
DONT_DO_SMI_CHECK);
__ sub(ip, key, Operand(kHeapObjectTag));
__ vldr(double_scratch0, ip, HeapNumber::kValueOffset);
__ TryDoubleToInt32Exact(scratch0, double_scratch0, double_scratch1);
__ b(ne, fail);
__ TrySmiTag(scratch0, fail, scratch1);
__ mov(key, scratch0);
__ bind(&key_ok);
}
@ -3262,28 +3243,18 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
case EXTERNAL_FLOAT_ELEMENTS:
// Perform int-to-float conversion and store to memory.
__ SmiUntag(r4, key);
StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9);
StoreIntAsFloat(masm, r3, r4, r5, r7);
break;
case EXTERNAL_DOUBLE_ELEMENTS:
__ add(r3, r3, Operand(key, LSL, 2));
// r3: effective address of the double element
FloatingPointHelper::Destination destination;
if (CpuFeatures::IsSupported(VFP2)) {
destination = FloatingPointHelper::kVFPRegisters;
} else {
destination = FloatingPointHelper::kCoreRegisters;
}
destination = FloatingPointHelper::kVFPRegisters;
FloatingPointHelper::ConvertIntToDouble(
masm, r5, destination,
d0, r6, r7, // These are: double_dst, dst_mantissa, dst_exponent.
r4, s2); // These are: scratch2, single_scratch.
if (destination == FloatingPointHelper::kVFPRegisters) {
CpuFeatureScope scope(masm, VFP2);
__ vstr(d0, r3, 0);
} else {
__ str(r6, MemOperand(r3, 0));
__ str(r7, MemOperand(r3, Register::kSizeInBytes));
}
__ vstr(d0, r3, 0);
break;
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
@ -3313,201 +3284,59 @@ void KeyedStoreStubCompiler::GenerateStoreExternalArray(
// The WebGL specification leaves the behavior of storing NaN and
// +/-Infinity into integer arrays basically undefined. For more
// reproducible behavior, convert these to zero.
if (CpuFeatures::IsSupported(VFP2)) {
CpuFeatureScope scope(masm, VFP2);
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(key, LSL, 1));
__ vcvt_f32_f64(s0, d0);
__ vstr(s0, r5, 0);
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(key, LSL, 2));
__ vstr(d0, r5, 0);
} else {
// Hoisted load. vldr requires offset to be a multiple of 4 so we can
// not include -kHeapObjectTag into it.
__ sub(r5, value, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ ECMAToInt32VFP(r5, d0, d1, r6, r7, r9);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(r5, MemOperand(r3, key, LSR, 1));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(r5, MemOperand(r3, key, LSL, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(r5, MemOperand(r3, key, LSL, 1));
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
}
// Entry registers are intact, r0 holds the value which is the return
// value.
__ Ret();
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
// vldr requires offset to be a multiple of 4 so we can not
// include -kHeapObjectTag into it.
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(key, LSL, 1));
__ vcvt_f32_f64(s0, d0);
__ vstr(s0, r5, 0);
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
__ sub(r5, r0, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ add(r5, r3, Operand(key, LSL, 2));
__ vstr(d0, r5, 0);
} else {
// VFP3 is not available do manual conversions.
__ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
__ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) {
Label done, nan_or_infinity_or_zero;
static const int kMantissaInHiWordShift =
kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
static const int kMantissaInLoWordShift =
kBitsPerInt - kMantissaInHiWordShift;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r7, Operand(HeapNumber::kExponentMask));
__ and_(r9, r5, Operand(r7), SetCC);
__ b(eq, &nan_or_infinity_or_zero);
__ teq(r9, Operand(r7));
__ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
__ b(eq, &nan_or_infinity_or_zero);
// Rebias exponent.
__ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
__ add(r9,
r9,
Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
__ cmp(r9, Operand(kBinary32MaxExponent));
__ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
__ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
__ b(gt, &done);
__ cmp(r9, Operand(kBinary32MinExponent));
__ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
__ b(lt, &done);
__ and_(r7, r5, Operand(HeapNumber::kSignMask));
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
__ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
__ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
__ bind(&done);
__ str(r5, MemOperand(r3, key, LSL, 1));
// Entry registers are intact, r0 holds the value which is the return
// value.
__ Ret();
__ bind(&nan_or_infinity_or_zero);
__ and_(r7, r5, Operand(HeapNumber::kSignMask));
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r9, r9, r7);
__ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
__ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
__ b(&done);
} else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) {
__ add(r7, r3, Operand(key, LSL, 2));
// r7: effective address of destination element.
__ str(r6, MemOperand(r7, 0));
__ str(r5, MemOperand(r7, Register::kSizeInBytes));
__ Ret();
} else {
bool is_signed_type = IsElementTypeSigned(elements_kind);
int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
Label done, sign;
// Test for all special exponent values: zeros, subnormal numbers, NaNs
// and infinities. All these should be converted to 0.
__ mov(r7, Operand(HeapNumber::kExponentMask));
__ and_(r9, r5, Operand(r7), SetCC);
__ mov(r5, Operand::Zero(), LeaveCC, eq);
__ b(eq, &done);
__ teq(r9, Operand(r7));
__ mov(r5, Operand::Zero(), LeaveCC, eq);
__ b(eq, &done);
// Unbias exponent.
__ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
__ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
// If exponent is negative then result is 0.
__ mov(r5, Operand::Zero(), LeaveCC, mi);
__ b(mi, &done);
// If exponent is too big then result is minimal value.
__ cmp(r9, Operand(meaningfull_bits - 1));
__ mov(r5, Operand(min_value), LeaveCC, ge);
__ b(ge, &done);
__ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
__ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
__ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
__ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
__ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
__ b(pl, &sign);
__ rsb(r9, r9, Operand::Zero());
__ mov(r5, Operand(r5, LSL, r9));
__ rsb(r9, r9, Operand(meaningfull_bits));
__ orr(r5, r5, Operand(r6, LSR, r9));
__ bind(&sign);
__ teq(r7, Operand::Zero());
__ rsb(r5, r5, Operand::Zero(), LeaveCC, ne);
__ bind(&done);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(r5, MemOperand(r3, key, LSR, 1));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(r5, MemOperand(r3, key, LSL, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(r5, MemOperand(r3, key, LSL, 1));
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
// Hoisted load. vldr requires offset to be a multiple of 4 so we can
// not include -kHeapObjectTag into it.
__ sub(r5, value, Operand(kHeapObjectTag));
__ vldr(d0, r5, HeapNumber::kValueOffset);
__ ECMAToInt32(r5, d0, d1, r6, r7, r9);
switch (elements_kind) {
case EXTERNAL_BYTE_ELEMENTS:
case EXTERNAL_UNSIGNED_BYTE_ELEMENTS:
__ strb(r5, MemOperand(r3, key, LSR, 1));
break;
case EXTERNAL_SHORT_ELEMENTS:
case EXTERNAL_UNSIGNED_SHORT_ELEMENTS:
__ strh(r5, MemOperand(r3, key, LSL, 0));
break;
case EXTERNAL_INT_ELEMENTS:
case EXTERNAL_UNSIGNED_INT_ELEMENTS:
__ str(r5, MemOperand(r3, key, LSL, 1));
break;
case EXTERNAL_PIXEL_ELEMENTS:
case EXTERNAL_FLOAT_ELEMENTS:
case EXTERNAL_DOUBLE_ELEMENTS:
case FAST_ELEMENTS:
case FAST_SMI_ELEMENTS:
case FAST_DOUBLE_ELEMENTS:
case FAST_HOLEY_ELEMENTS:
case FAST_HOLEY_SMI_ELEMENTS:
case FAST_HOLEY_DOUBLE_ELEMENTS:
case DICTIONARY_ELEMENTS:
case NON_STRICT_ARGUMENTS_ELEMENTS:
UNREACHABLE();
break;
}
}
// Entry registers are intact, r0 holds the value which is the return
// value.
__ Ret();
}
// Slow case, key and receiver still in r0 and r1.
@ -3757,9 +3586,9 @@ void KeyedStoreStubCompiler::GenerateStoreFastDoubleElement(
// All registers after this are overwritten.
elements_reg,
scratch1,
scratch2,
scratch3,
scratch4,
scratch2,
&transition_elements_kind);
__ Ret();

13
deps/v8/src/array.js

@ -1017,7 +1017,7 @@ function ArrayFilter(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver)) {
} else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
receiver = ToObject(receiver);
}
@ -1068,9 +1068,10 @@ function ArrayForEach(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver)) {
} else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
receiver = ToObject(receiver);
}
if (%DebugCallbackSupportsStepping(f)) {
for (var i = 0; i < length; i++) {
if (i in array) {
@ -1111,7 +1112,7 @@ function ArraySome(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver)) {
} else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
receiver = ToObject(receiver);
}
@ -1154,7 +1155,7 @@ function ArrayEvery(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver)) {
} else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
receiver = ToObject(receiver);
}
@ -1196,7 +1197,7 @@ function ArrayMap(f, receiver) {
}
if (IS_NULL_OR_UNDEFINED(receiver)) {
receiver = %GetDefaultReceiver(f) || receiver;
} else if (!IS_SPEC_OBJECT(receiver)) {
} else if (!IS_SPEC_OBJECT(receiver) && %IsClassicModeFunction(f)) {
receiver = ToObject(receiver);
}
@ -1453,8 +1454,10 @@ function ArrayIsArray(obj) {
// -------------------------------------------------------------------
function SetUpArray() {
%CheckIsBootstrapping();
// Set up non-enumerable constructor property on the Array.prototype
// object.
%SetProperty($Array.prototype, "constructor", $Array, DONT_ENUM);

20
deps/v8/src/assembler.cc

@ -191,11 +191,9 @@ CpuFeatureScope::CpuFeatureScope(AssemblerBase* assembler, CpuFeature f)
uint64_t mask = static_cast<uint64_t>(1) << f;
// TODO(svenpanne) This special case below doesn't belong here!
#if V8_TARGET_ARCH_ARM
// VFP2 and ARMv7 are implied by VFP3.
// ARMv7 is implied by VFP3.
if (f == VFP3) {
mask |=
static_cast<uint64_t>(1) << VFP2 |
static_cast<uint64_t>(1) << ARMv7;
mask |= static_cast<uint64_t>(1) << ARMv7;
}
#endif
assembler_->set_enabled_cpu_features(old_enabled_ | mask);
@ -1191,6 +1189,20 @@ ExternalReference ExternalReference::old_pointer_space_allocation_limit_address(
}
ExternalReference ExternalReference::old_data_space_allocation_top_address(
Isolate* isolate) {
return ExternalReference(
isolate->heap()->OldDataSpaceAllocationTopAddress());
}
ExternalReference ExternalReference::old_data_space_allocation_limit_address(
Isolate* isolate) {
return ExternalReference(
isolate->heap()->OldDataSpaceAllocationLimitAddress());
}
ExternalReference ExternalReference::handle_scope_level_address(
Isolate* isolate) {
return ExternalReference(HandleScope::current_level_address(isolate));

4
deps/v8/src/assembler.h

@ -753,6 +753,10 @@ class ExternalReference BASE_EMBEDDED {
Isolate* isolate);
static ExternalReference old_pointer_space_allocation_limit_address(
Isolate* isolate);
static ExternalReference old_data_space_allocation_top_address(
Isolate* isolate);
static ExternalReference old_data_space_allocation_limit_address(
Isolate* isolate);
static ExternalReference double_fp_operation(Token::Value operation,
Isolate* isolate);

6
deps/v8/src/ast.cc

@ -509,6 +509,11 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
}
LookupResult lookup(type->GetIsolate());
while (true) {
// If a dictionary map is found in the prototype chain before the actual
// target, a new target can always appear. In that case, bail out.
// TODO(verwaest): Alternatively a runtime negative lookup on the normal
// receiver or prototype could be added.
if (type->is_dictionary_map()) return false;
type->LookupDescriptor(NULL, *name, &lookup);
if (lookup.IsFound()) {
switch (lookup.type()) {
@ -534,7 +539,6 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
if (!type->prototype()->IsJSObject()) return false;
// Go up the prototype chain, recording where we are currently.
holder_ = Handle<JSObject>(JSObject::cast(type->prototype()));
if (!holder_->HasFastProperties()) return false;
type = Handle<Map>(holder()->map());
}
}

19
deps/v8/src/ast.h

@ -1811,7 +1811,8 @@ class CountOperation: public Expression {
Token::Value op_;
bool is_prefix_ : 1;
bool is_monomorphic_ : 1;
KeyedAccessStoreMode store_mode_: 4;
KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
// must have extra bit.
Expression* expression_;
int pos_;
const BailoutId assignment_id_;
@ -1953,7 +1954,8 @@ class Assignment: public Expression {
const BailoutId assignment_id_;
bool is_monomorphic_ : 1;
KeyedAccessStoreMode store_mode_ : 4;
KeyedAccessStoreMode store_mode_ : 5; // Windows treats as signed,
// must have extra bit.
SmallMapList receiver_types_;
};
@ -1962,21 +1964,25 @@ class Yield: public Expression {
public:
DECLARE_NODE_TYPE(Yield)
Expression* generator_object() const { return generator_object_; }
Expression* expression() const { return expression_; }
bool is_delegating_yield() const { return is_delegating_yield_; }
virtual int position() const { return pos_; }
protected:
Yield(Isolate* isolate,
Expression* generator_object,
Expression* expression,
bool is_delegating_yield,
int pos)
: Expression(isolate),
generator_object_(generator_object),
expression_(expression),
is_delegating_yield_(is_delegating_yield),
pos_(pos) { }
private:
Expression* generator_object_;
Expression* expression_;
bool is_delegating_yield_;
int pos_;
@ -2958,9 +2964,12 @@ class AstNodeFactory BASE_EMBEDDED {
VISIT_AND_RETURN(Assignment, assign)
}
Yield* NewYield(Expression* expression, bool is_delegating_yield, int pos) {
Yield* yield =
new(zone_) Yield(isolate_, expression, is_delegating_yield, pos);
Yield* NewYield(Expression *generator_object,
Expression* expression,
bool is_delegating_yield,
int pos) {
Yield* yield = new(zone_) Yield(
isolate_, generator_object, expression, is_delegating_yield, pos);
VISIT_AND_RETURN(Yield, yield)
}

7
deps/v8/src/atomicops_internals_x86_gcc.cc

@ -31,6 +31,7 @@
#include <string.h>
#include "atomicops.h"
#include "platform.h"
// This file only makes sense with atomicops_internals_x86_gcc.h -- it
// depends on structs that are defined in that file. If atomicops.h
@ -84,9 +85,9 @@ void AtomicOps_Internalx86CPUFeaturesInit() {
// Get vendor string (issue CPUID with eax = 0)
cpuid(eax, ebx, ecx, edx, 0);
char vendor[13];
memcpy(vendor, &ebx, 4);
memcpy(vendor + 4, &edx, 4);
memcpy(vendor + 8, &ecx, 4);
v8::internal::OS::MemCopy(vendor, &ebx, 4);
v8::internal::OS::MemCopy(vendor + 4, &edx, 4);
v8::internal::OS::MemCopy(vendor + 8, &ecx, 4);
vendor[12] = 0;
// get feature flags in ecx/edx, and family/model in eax

2
deps/v8/src/atomicops_internals_x86_gcc.h

@ -168,7 +168,7 @@ inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
return *ptr;
}
#if defined(__x86_64__)
#if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT)
// 64-bit low-level operations on 64-bit platform.

9
deps/v8/src/bignum.cc

@ -735,6 +735,13 @@ void Bignum::BigitsShiftLeft(int shift_amount) {
void Bignum::SubtractTimes(const Bignum& other, int factor) {
#ifdef DEBUG
Bignum a, b;
a.AssignBignum(*this);
b.AssignBignum(other);
b.MultiplyByUInt32(factor);
a.SubtractBignum(b);
#endif
ASSERT(exponent_ <= other.exponent_);
if (factor < 3) {
for (int i = 0; i < factor; ++i) {
@ -758,9 +765,9 @@ void Bignum::SubtractTimes(const Bignum& other, int factor) {
Chunk difference = bigits_[i] - borrow;
bigits_[i] = difference & kBigitMask;
borrow = difference >> (kChunkSize - 1);
++i;
}
Clamp();
ASSERT(Bignum::Equal(a, *this));
}

120
deps/v8/src/bootstrapper.cc

@ -199,6 +199,8 @@ class Genesis BASE_EMBEDDED {
const char* name,
ElementsKind elements_kind);
bool InstallNatives();
void InstallTypedArray(const char* name);
bool InstallExperimentalNatives();
void InstallBuiltinFunctionIds();
void InstallJSFunctionResultCaches();
@ -303,14 +305,11 @@ Handle<Context> Bootstrapper::CreateEnvironment(
v8::ExtensionConfiguration* extensions) {
HandleScope scope(isolate_);
Genesis genesis(isolate_, global_object, global_template, extensions);
if (!genesis.result().is_null()) {
Handle<Object> ctx(isolate_->global_handles()->Create(*genesis.result()));
Handle<Context> env = Handle<Context>::cast(ctx);
if (InstallExtensions(env, extensions)) {
return env;
}
Handle<Context> env = genesis.result();
if (env.is_null() || !InstallExtensions(env, extensions)) {
return Handle<Context>();
}
return Handle<Context>();
return scope.CloseAndEscape(env);
}
@ -477,25 +476,10 @@ Handle<JSFunction> Genesis::CreateEmptyFunction(Isolate* isolate) {
native_context()->set_object_function(*object_fun);
// Allocate a new prototype for the object function.
Handle<Map> object_prototype_map =
factory->NewMap(JS_OBJECT_TYPE, JSObject::kHeaderSize);
Handle<DescriptorArray> prototype_descriptors(
factory->NewDescriptorArray(0, 1));
DescriptorArray::WhitenessWitness witness(*prototype_descriptors);
Handle<Foreign> object_prototype(
factory->NewForeign(&Accessors::ObjectPrototype));
PropertyAttributes attribs = static_cast<PropertyAttributes>(DONT_ENUM);
object_prototype_map->set_instance_descriptors(*prototype_descriptors);
{ // Add __proto__.
CallbacksDescriptor d(heap->proto_string(), *object_prototype, attribs);
object_prototype_map->AppendDescriptor(&d, witness);
}
Handle<JSObject> prototype = factory->NewJSObjectFromMap(
object_prototype_map,
Handle<JSObject> prototype = factory->NewJSObject(
isolate->object_function(),
TENURED);
native_context()->set_initial_object_prototype(*prototype);
SetPrototype(object_fun, prototype);
}
@ -1276,6 +1260,14 @@ bool Genesis::InitializeGlobal(Handle<GlobalObject> inner_global,
}
void Genesis::InstallTypedArray(const char* name) {
Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
InstallFunction(global, name, JS_TYPED_ARRAY_TYPE,
JSTypedArray::kSize, isolate()->initial_object_prototype(),
Builtins::kIllegal, true);
}
void Genesis::InitializeExperimentalGlobal() {
Handle<JSObject> global = Handle<JSObject>(native_context()->global_object());
@ -1293,33 +1285,80 @@ void Genesis::InitializeExperimentalGlobal() {
if (FLAG_harmony_collections) {
{ // -- S e t
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
InstallFunction(global, "Set", JS_SET_TYPE, JSSet::kSize,
prototype, Builtins::kIllegal, true);
isolate()->initial_object_prototype(),
Builtins::kIllegal, true);
}
{ // -- M a p
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
InstallFunction(global, "Map", JS_MAP_TYPE, JSMap::kSize,
prototype, Builtins::kIllegal, true);
isolate()->initial_object_prototype(),
Builtins::kIllegal, true);
}
{ // -- W e a k M a p
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
InstallFunction(global, "WeakMap", JS_WEAK_MAP_TYPE, JSWeakMap::kSize,
prototype, Builtins::kIllegal, true);
isolate()->initial_object_prototype(),
Builtins::kIllegal, true);
}
}
if (FLAG_harmony_typed_arrays) {
{ // -- A r r a y B u f f e r
Handle<JSObject> prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
{ // -- A r r a y B u f f e r
InstallFunction(global, "__ArrayBuffer", JS_ARRAY_BUFFER_TYPE,
JSArrayBuffer::kSize, prototype,
JSArrayBuffer::kSize,
isolate()->initial_object_prototype(),
Builtins::kIllegal, true);
}
{
// -- T y p e d A r r a y s
InstallTypedArray("__Int8Array");
InstallTypedArray("__Uint8Array");
InstallTypedArray("__Int16Array");
InstallTypedArray("__Uint16Array");
InstallTypedArray("__Int32Array");
InstallTypedArray("__Uint32Array");
InstallTypedArray("__Float32Array");
InstallTypedArray("__Float64Array");
}
}
if (FLAG_harmony_generators) {
// Create generator meta-objects and install them on the builtins object.
Handle<JSObject> builtins(native_context()->builtins());
Handle<JSObject> generator_object_prototype =
factory()->NewJSObject(isolate()->object_function(), TENURED);
Handle<JSFunction> generator_function_prototype =
InstallFunction(builtins, "GeneratorFunctionPrototype",
JS_FUNCTION_TYPE, JSFunction::kHeaderSize,
generator_object_prototype, Builtins::kIllegal,
false);
InstallFunction(builtins, "GeneratorFunction",
JS_FUNCTION_TYPE, JSFunction::kSize,
generator_function_prototype, Builtins::kIllegal,
false);
// Create maps for generator functions and their prototypes. Store those
// maps in the native context.
Handle<Map> function_map(native_context()->function_map());
Handle<Map> generator_function_map = factory()->CopyMap(function_map);
generator_function_map->set_prototype(*generator_function_prototype);
native_context()->set_generator_function_map(*generator_function_map);
Handle<Map> strict_mode_function_map(
native_context()->strict_mode_function_map());
Handle<Map> strict_mode_generator_function_map = factory()->CopyMap(
strict_mode_function_map);
strict_mode_generator_function_map->set_prototype(
*generator_function_prototype);
native_context()->set_strict_mode_generator_function_map(
*strict_mode_generator_function_map);
Handle<Map> object_map(native_context()->object_function()->initial_map());
Handle<Map> generator_object_prototype_map = factory()->CopyMap(
object_map, 0);
generator_object_prototype_map->set_prototype(
*generator_object_prototype);
native_context()->set_generator_object_prototype_map(
*generator_object_prototype_map);
}
}
@ -1933,6 +1972,11 @@ bool Genesis::InstallExperimentalNatives() {
"native typedarray.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
if (FLAG_harmony_generators &&
strcmp(ExperimentalNatives::GetScriptName(i).start(),
"native generator.js") == 0) {
if (!CompileExperimentalBuiltin(isolate(), i)) return false;
}
}
InstallExperimentalNativeFunctions();

40
deps/v8/src/builtins-decls.h

@ -0,0 +1,40 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_BUILTINS_DECLS_H_
#define V8_BUILTINS_DECLS_H_
#include "arguments.h"
namespace v8 {
namespace internal {
DECLARE_RUNTIME_FUNCTION(MaybeObject*, ArrayConstructor_StubFailure);
} } // namespace v8::internal
#endif // V8_BUILTINS_DECLS_H_

40
deps/v8/src/builtins.cc

@ -125,23 +125,31 @@ BUILTIN_LIST_C(DEF_ARG_TYPE)
#ifdef DEBUG
#define BUILTIN(name) \
MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
name##ArgumentsType args, Isolate* isolate); \
MUST_USE_RESULT static MaybeObject* Builtin_##name( \
name##ArgumentsType args, Isolate* isolate) { \
ASSERT(isolate == Isolate::Current()); \
args.Verify(); \
return Builtin_Impl_##name(args, isolate); \
} \
MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
#define BUILTIN(name) \
MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
name##ArgumentsType args, Isolate* isolate); \
MUST_USE_RESULT static MaybeObject* Builtin_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
name##ArgumentsType args(args_length, args_object); \
ASSERT(isolate == Isolate::Current()); \
args.Verify(); \
return Builtin_Impl_##name(args, isolate); \
} \
MUST_USE_RESULT static MaybeObject* Builtin_Impl_##name( \
name##ArgumentsType args, Isolate* isolate)
#else // For release mode.
#define BUILTIN(name) \
static MaybeObject* Builtin_##name(name##ArgumentsType args, Isolate* isolate)
#define BUILTIN(name) \
static MaybeObject* Builtin_impl##name( \
name##ArgumentsType args, Isolate* isolate); \
static MaybeObject* Builtin_##name( \
int args_length, Object** args_object, Isolate* isolate) { \
name##ArgumentsType args(args_length, args_object); \
return Builtin_impl##name(args, isolate); \
} \
static MaybeObject* Builtin_impl##name( \
name##ArgumentsType args, Isolate* isolate)
#endif
@ -323,9 +331,9 @@ static void MoveDoubleElements(FixedDoubleArray* dst,
int src_index,
int len) {
if (len == 0) return;
memmove(dst->data_start() + dst_index,
src->data_start() + src_index,
len * kDoubleSize);
OS::MemMove(dst->data_start() + dst_index,
src->data_start() + src_index,
len * kDoubleSize);
}

2
deps/v8/src/builtins.h

@ -274,8 +274,6 @@ enum BuiltinExtraArguments {
V(APPLY_PREPARE, 1) \
V(APPLY_OVERFLOW, 1)
MaybeObject* ArrayConstructor_StubFailure(Arguments args, Isolate* isolate);
class BuiltinFunctionTable;
class ObjectVisitor;

72
deps/v8/src/code-stubs-hydrogen.cc

@ -147,6 +147,8 @@ bool CodeStubGraphBuilderBase::BuildGraph() {
AddSimulate(BailoutId::StubEntry());
NoObservableSideEffectsScope no_effects(this);
HValue* return_value = BuildCodeStub();
// We might have extra expressions to pop from the stack in addition to the
@ -188,6 +190,70 @@ static Handle<Code> DoGenerateCode(Stub* stub) {
}
template <>
HValue* CodeStubGraphBuilder<FastCloneShallowArrayStub>::BuildCodeStub() {
Zone* zone = this->zone();
Factory* factory = isolate()->factory();
AllocationSiteMode alloc_site_mode = casted_stub()->allocation_site_mode();
FastCloneShallowArrayStub::Mode mode = casted_stub()->mode();
int length = casted_stub()->length();
HInstruction* boilerplate =
AddInstruction(new(zone) HLoadKeyed(GetParameter(0),
GetParameter(1),
NULL,
FAST_ELEMENTS));
CheckBuilder builder(this);
builder.CheckNotUndefined(boilerplate);
if (mode == FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS) {
HValue* elements =
AddInstruction(new(zone) HLoadElements(boilerplate, NULL));
IfBuilder if_fixed_cow(this);
if_fixed_cow.BeginIfMapEquals(elements, factory->fixed_cow_array_map());
environment()->Push(BuildCloneShallowArray(context(),
boilerplate,
alloc_site_mode,
FAST_ELEMENTS,
0/*copy-on-write*/));
if_fixed_cow.BeginElse();
IfBuilder if_fixed(this);
if_fixed.BeginIfMapEquals(elements, factory->fixed_array_map());
environment()->Push(BuildCloneShallowArray(context(),
boilerplate,
alloc_site_mode,
FAST_ELEMENTS,
length));
if_fixed.BeginElse();
environment()->Push(BuildCloneShallowArray(context(),
boilerplate,
alloc_site_mode,
FAST_DOUBLE_ELEMENTS,
length));
} else {
ElementsKind elements_kind = casted_stub()->ComputeElementsKind();
environment()->Push(BuildCloneShallowArray(context(),
boilerplate,
alloc_site_mode,
elements_kind,
length));
}
return environment()->Pop();
}
Handle<Code> FastCloneShallowArrayStub::GenerateCode() {
CodeStubGraphBuilder<FastCloneShallowArrayStub> builder(this);
LChunk* chunk = OptimizeGraph(builder.CreateGraph());
return chunk->Codegen(Code::COMPILED_STUB);
}
template <>
HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
Zone* zone = this->zone();
@ -230,7 +296,6 @@ HValue* CodeStubGraphBuilder<FastCloneShallowObjectStub>::BuildCodeStub() {
factory->empty_string(),
value,
true, i));
AddSimulate(BailoutId::StubEntry());
}
builder.End();
@ -264,7 +329,6 @@ HValue* CodeStubGraphBuilder<KeyedStoreFastElementStub>::BuildCodeStub() {
GetParameter(0), GetParameter(1), GetParameter(2), NULL,
casted_stub()->is_js_array(), casted_stub()->elements_kind(),
true, casted_stub()->store_mode(), Representation::Tagged());
AddSimulate(BailoutId::StubEntry(), REMOVABLE_SIMULATE);
return GetParameter(2);
}
@ -308,7 +372,7 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
AddInstruction(new(zone) HFixedArrayBaseLength(elements));
HValue* new_elements =
BuildAllocateElements(context(), to_kind, elements_length);
BuildAllocateAndInitializeElements(context(), to_kind, elements_length);
BuildCopyElements(context(), elements,
casted_stub()->from_kind(), new_elements,
@ -320,13 +384,11 @@ HValue* CodeStubGraphBuilder<TransitionElementsKindStub>::BuildCodeStub() {
factory->elements_field_string(),
new_elements, true,
JSArray::kElementsOffset));
AddSimulate(BailoutId::StubEntry());
if_builder.End();
AddInstruction(new(zone) HStoreNamedField(js_array, factory->length_string(),
map, true, JSArray::kMapOffset));
AddSimulate(BailoutId::StubEntry());
return js_array;
}

6
deps/v8/src/code-stubs.cc

@ -619,8 +619,10 @@ void ElementsTransitionAndStoreStub::Generate(MacroAssembler* masm) {
void StubFailureTrampolineStub::GenerateAheadOfTime(Isolate* isolate) {
StubFailureTrampolineStub(NOT_JS_FUNCTION_STUB_MODE).GetCode(isolate);
StubFailureTrampolineStub(JS_FUNCTION_STUB_MODE).GetCode(isolate);
StubFailureTrampolineStub stub1(NOT_JS_FUNCTION_STUB_MODE);
StubFailureTrampolineStub stub2(JS_FUNCTION_STUB_MODE);
stub1.GetCode(isolate)->set_is_pregenerated(true);
stub2.GetCode(isolate)->set_is_pregenerated(true);
}

56
deps/v8/src/code-stubs.h

@ -393,17 +393,24 @@ class ToNumberStub: public PlatformCodeStub {
class FastNewClosureStub : public PlatformCodeStub {
public:
explicit FastNewClosureStub(LanguageMode language_mode)
: language_mode_(language_mode) { }
explicit FastNewClosureStub(LanguageMode language_mode, bool is_generator)
: language_mode_(language_mode),
is_generator_(is_generator) { }
void Generate(MacroAssembler* masm);
private:
class StrictModeBits: public BitField<bool, 0, 1> {};
class IsGeneratorBits: public BitField<bool, 1, 1> {};
Major MajorKey() { return FastNewClosure; }
int MinorKey() { return language_mode_ == CLASSIC_MODE
? kNonStrictMode : kStrictMode; }
int MinorKey() {
return StrictModeBits::encode(language_mode_ != CLASSIC_MODE) |
IsGeneratorBits::encode(is_generator_);
}
LanguageMode language_mode_;
bool is_generator_;
};
@ -443,7 +450,7 @@ class FastNewBlockContextStub : public PlatformCodeStub {
};
class FastCloneShallowArrayStub : public PlatformCodeStub {
class FastCloneShallowArrayStub : public HydrogenCodeStub {
public:
// Maximum length of copied elements array.
static const int kMaximumClonedLength = 8;
@ -467,7 +474,31 @@ class FastCloneShallowArrayStub : public PlatformCodeStub {
ASSERT_LE(length_, kMaximumClonedLength);
}
void Generate(MacroAssembler* masm);
Mode mode() const { return mode_; }
int length() const { return length_; }
AllocationSiteMode allocation_site_mode() const {
return allocation_site_mode_;
}
ElementsKind ComputeElementsKind() const {
switch (mode()) {
case CLONE_ELEMENTS:
case COPY_ON_WRITE_ELEMENTS:
return FAST_ELEMENTS;
case CLONE_DOUBLE_ELEMENTS:
return FAST_DOUBLE_ELEMENTS;
case CLONE_ANY_ELEMENTS:
/*fall-through*/;
}
UNREACHABLE();
return LAST_ELEMENTS_KIND;
}
virtual Handle<Code> GenerateCode();
virtual void InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor);
private:
Mode mode_;
@ -746,7 +777,7 @@ class BinaryOpStub: public PlatformCodeStub {
private:
Token::Value op_;
OverwriteMode mode_;
bool platform_specific_bit_; // Indicates SSE3 on IA32, VFP2 on ARM.
bool platform_specific_bit_; // Indicates SSE3 on IA32.
// Operand type information determined at runtime.
BinaryOpIC::TypeInfo left_type_;
@ -1604,18 +1635,25 @@ class StoreArrayLiteralElementStub : public PlatformCodeStub {
class StubFailureTrampolineStub : public PlatformCodeStub {
public:
explicit StubFailureTrampolineStub(StubFunctionMode function_mode)
: function_mode_(function_mode) {}
: fp_registers_(CanUseFPRegisters()), function_mode_(function_mode) {}
virtual bool IsPregenerated() { return true; }
static void GenerateAheadOfTime(Isolate* isolate);
private:
class FPRegisters: public BitField<bool, 0, 1> {};
class FunctionModeField: public BitField<StubFunctionMode, 1, 1> {};
Major MajorKey() { return StubFailureTrampoline; }
int MinorKey() { return static_cast<int>(function_mode_); }
int MinorKey() {
return FPRegisters::encode(fp_registers_) |
FunctionModeField::encode(function_mode_);
}
void Generate(MacroAssembler* masm);
bool fp_registers_;
StubFunctionMode function_mode_;
DISALLOW_COPY_AND_ASSIGN(StubFailureTrampolineStub);

97
deps/v8/src/collection.js

@ -27,16 +27,20 @@
"use strict";
// This file relies on the fact that the following declaration has been made
// in runtime.js:
// var $Array = global.Array;
var $Set = global.Set;
var $Map = global.Map;
var $WeakMap = global.WeakMap;
//-------------------------------------------------------------------
// Global sentinel to be used instead of undefined keys, which are not
// supported internally but required for Harmony sets and maps.
var undefined_sentinel = {};
// -------------------------------------------------------------------
// Harmony Set
function SetConstructor() {
if (%_IsConstructCall()) {
@ -107,6 +111,31 @@ function SetClear() {
}
// -------------------------------------------------------------------
function SetUpSet() {
%CheckIsBootstrapping();
%SetCode($Set, SetConstructor);
%FunctionSetPrototype($Set, new $Object());
%SetProperty($Set.prototype, "constructor", $Set, DONT_ENUM);
// Set up the non-enumerable functions on the Set prototype object.
InstallGetter($Set.prototype, "size", SetGetSize);
InstallFunctions($Set.prototype, DONT_ENUM, $Array(
"add", SetAdd,
"has", SetHas,
"delete", SetDelete,
"clear", SetClear
));
}
SetUpSet();
// -------------------------------------------------------------------
// Harmony Map
function MapConstructor() {
if (%_IsConstructCall()) {
%MapInitialize(this);
@ -183,6 +212,32 @@ function MapClear() {
}
// -------------------------------------------------------------------
function SetUpMap() {
%CheckIsBootstrapping();
%SetCode($Map, MapConstructor);
%FunctionSetPrototype($Map, new $Object());
%SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
// Set up the non-enumerable functions on the Map prototype object.
InstallGetter($Map.prototype, "size", MapGetSize);
InstallFunctions($Map.prototype, DONT_ENUM, $Array(
"get", MapGet,
"set", MapSet,
"has", MapHas,
"delete", MapDelete,
"clear", MapClear
));
}
SetUpMap();
// -------------------------------------------------------------------
// Harmony WeakMap
function WeakMapConstructor() {
if (%_IsConstructCall()) {
%WeakMapInitialize(this);
@ -239,42 +294,14 @@ function WeakMapDelete(key) {
return %WeakMapDelete(this, key);
}
// -------------------------------------------------------------------
(function () {
function SetUpWeakMap() {
%CheckIsBootstrapping();
// Set up the Set and Map constructor function.
%SetCode($Set, SetConstructor);
%SetCode($Map, MapConstructor);
// Set up the constructor property on the Set and Map prototype object.
%SetProperty($Set.prototype, "constructor", $Set, DONT_ENUM);
%SetProperty($Map.prototype, "constructor", $Map, DONT_ENUM);
// Set up the non-enumerable functions on the Set prototype object.
InstallGetter($Set.prototype, "size", SetGetSize);
InstallFunctions($Set.prototype, DONT_ENUM, $Array(
"add", SetAdd,
"has", SetHas,
"delete", SetDelete,
"clear", SetClear
));
// Set up the non-enumerable functions on the Map prototype object.
InstallGetter($Map.prototype, "size", MapGetSize);
InstallFunctions($Map.prototype, DONT_ENUM, $Array(
"get", MapGet,
"set", MapSet,
"has", MapHas,
"delete", MapDelete,
"clear", MapClear
));
// Set up the WeakMap constructor function.
%SetCode($WeakMap, WeakMapConstructor);
// Set up the constructor property on the WeakMap prototype object.
%FunctionSetPrototype($WeakMap, new $Object());
%SetProperty($WeakMap.prototype, "constructor", $WeakMap, DONT_ENUM);
// Set up the non-enumerable functions on the WeakMap prototype object.
@ -284,4 +311,6 @@ function WeakMapDelete(key) {
"has", WeakMapHas,
"delete", WeakMapDelete
));
})();
}
SetUpWeakMap();

31
deps/v8/src/compiler.cc

@ -520,14 +520,15 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
// Only allow non-global compiles for eval.
ASSERT(info->is_eval() || info->is_global());
ParsingFlags flags = kNoParsingFlags;
if ((info->pre_parse_data() != NULL ||
String::cast(script->source())->length() > FLAG_min_preparse_length) &&
!DebuggerWantsEagerCompilation(info)) {
flags = kAllowLazy;
}
if (!ParserApi::Parse(info, flags)) {
return Handle<SharedFunctionInfo>::null();
{
Parser parser(info);
if ((info->pre_parse_data() != NULL ||
String::cast(script->source())->length() > FLAG_min_preparse_length) &&
!DebuggerWantsEagerCompilation(info))
parser.set_allow_lazy(true);
if (!parser.Parse()) {
return Handle<SharedFunctionInfo>::null();
}
}
// Measure how long it takes to do the compilation; only take the
@ -864,7 +865,7 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
if (InstallCodeFromOptimizedCodeMap(info)) return true;
// Generate the AST for the lazily compiled function.
if (ParserApi::Parse(info, kNoParsingFlags)) {
if (Parser::Parse(info)) {
// Measure how long it takes to do the lazy compilation; only take the
// rest of the function into account to avoid overlap with the lazy
// parsing statistics.
@ -932,7 +933,7 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
return;
}
if (ParserApi::Parse(*info, kNoParsingFlags)) {
if (Parser::Parse(*info)) {
LanguageMode language_mode = info->function()->language_mode();
info->SetLanguageMode(language_mode);
shared->set_language_mode(language_mode);
@ -957,18 +958,18 @@ void Compiler::RecompileParallel(Handle<JSFunction> closure) {
}
}
if (shared->code()->stack_check_patched_for_osr()) {
if (shared->code()->back_edges_patched_for_osr()) {
// At this point we either put the function on recompilation queue or
// aborted optimization. In either case we want to continue executing
// the unoptimized code without running into OSR. If the unoptimized
// code has been patched for OSR, unpatch it.
InterruptStub interrupt_stub;
Handle<Code> check_code = interrupt_stub.GetCode(isolate);
Handle<Code> interrupt_code = interrupt_stub.GetCode(isolate);
Handle<Code> replacement_code =
isolate->builtins()->OnStackReplacement();
Deoptimizer::RevertStackCheckCode(shared->code(),
*check_code,
*replacement_code);
Deoptimizer::RevertInterruptCode(shared->code(),
*interrupt_code,
*replacement_code);
}
if (isolate->has_pending_exception()) isolate->clear_pending_exception();

18
deps/v8/src/contexts.h

@ -165,6 +165,11 @@ enum BindingFlags {
V(PROXY_ENUMERATE_INDEX, JSFunction, proxy_enumerate) \
V(OBSERVERS_NOTIFY_CHANGE_INDEX, JSFunction, observers_notify_change) \
V(OBSERVERS_DELIVER_CHANGES_INDEX, JSFunction, observers_deliver_changes) \
V(GENERATOR_FUNCTION_MAP_INDEX, Map, generator_function_map) \
V(STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX, Map, \
strict_mode_generator_function_map) \
V(GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX, Map, \
generator_object_prototype_map) \
V(RANDOM_SEED_INDEX, ByteArray, random_seed)
// JSFunctions are pairs (context, function code), sometimes also called
@ -295,6 +300,9 @@ class Context: public FixedArray {
PROXY_ENUMERATE_INDEX,
OBSERVERS_NOTIFY_CHANGE_INDEX,
OBSERVERS_DELIVER_CHANGES_INDEX,
GENERATOR_FUNCTION_MAP_INDEX,
STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX,
GENERATOR_OBJECT_PROTOTYPE_MAP_INDEX,
RANDOM_SEED_INDEX,
// Properties from here are treated as weak references by the full GC.
@ -439,6 +447,16 @@ class Context: public FixedArray {
return kHeaderSize + index * kPointerSize - kHeapObjectTag;
}
static int FunctionMapIndex(LanguageMode language_mode, bool is_generator) {
return is_generator
? (language_mode == CLASSIC_MODE
? GENERATOR_FUNCTION_MAP_INDEX
: STRICT_MODE_GENERATOR_FUNCTION_MAP_INDEX)
: (language_mode == CLASSIC_MODE
? FUNCTION_MAP_INDEX
: STRICT_MODE_FUNCTION_MAP_INDEX);
}
static const int kSize = kHeaderSize + NATIVE_CONTEXT_SLOTS * kPointerSize;
// GC support.

2
deps/v8/src/conversions-inl.h

@ -77,7 +77,7 @@ inline unsigned int FastD2UI(double x) {
uint32_t result;
Address mantissa_ptr = reinterpret_cast<Address>(&x);
// Copy least significant 32 bits of mantissa.
memcpy(&result, mantissa_ptr, sizeof(result));
OS::MemCopy(&result, mantissa_ptr, sizeof(result));
return negative ? ~result + 1 : result;
}
// Large number (outside uint32 range), Infinity or NaN.

4
deps/v8/src/cpu-profiler.cc

@ -445,7 +445,7 @@ void CpuProfiler::StartProcessorIfNotStarted() {
generator_ = new ProfileGenerator(profiles_);
processor_ = new ProfilerEventsProcessor(generator_);
is_profiling_ = true;
processor_->Start();
processor_->StartSynchronously();
// Enumerate stuff we already have in the heap.
if (isolate_->heap()->HasBeenSetUp()) {
if (!FLAG_prof_browser_mode) {
@ -459,11 +459,11 @@ void CpuProfiler::StartProcessorIfNotStarted() {
}
// Enable stack sampling.
Sampler* sampler = reinterpret_cast<Sampler*>(isolate_->logger()->ticker_);
sampler->IncreaseProfilingDepth();
if (!sampler->IsActive()) {
sampler->Start();
need_to_stop_sampler_ = true;
}
sampler->IncreaseProfilingDepth();
}
}

1
deps/v8/src/cpu-profiler.h

@ -31,6 +31,7 @@
#include "allocation.h"
#include "atomicops.h"
#include "circular-queue.h"
#include "sampler.h"
#include "unbound-queue.h"
namespace v8 {

12
deps/v8/src/d8.cc

@ -45,6 +45,10 @@
#include "../include/v8-testing.h"
#endif // V8_SHARED
#ifdef ENABLE_VTUNE_JIT_INTERFACE
#include "third_party/vtune/v8-vtune.h"
#endif
#include "d8.h"
#ifndef V8_SHARED
@ -144,6 +148,11 @@ class DumbLineEditor: public LineEditor {
Handle<String> DumbLineEditor::Prompt(const char* prompt) {
printf("%s", prompt);
#if defined(__native_client__)
// Native Client libc is used to being embedded in Chrome and
// has trouble recognizing when to flush.
fflush(stdout);
#endif
return Shell::ReadFromStdin(isolate_);
}
@ -1921,6 +1930,9 @@ int Shell::Main(int argc, char* argv[]) {
DumbLineEditor dumb_line_editor(isolate);
{
Initialize(isolate);
#ifdef ENABLE_VTUNE_JIT_INTERFACE
vTune::InitilizeVtuneForV8();
#endif
Symbols symbols(isolate);
InitializeDebugger(isolate);

7
deps/v8/src/d8.gyp

@ -29,6 +29,8 @@
'includes': ['../build/common.gypi'],
'variables': {
'console%': '',
# Enable support for Intel VTune. Supported on ia32/x64 only
'v8_enable_vtunejit%': 0,
},
'targets': [
{
@ -70,6 +72,11 @@
}],
],
}],
['v8_enable_vtunejit==1', {
'dependencies': [
'../src/third_party/vtune/v8vtune.gyp:v8_vtune',
],
}],
],
},
{

19
deps/v8/src/date.js

@ -25,20 +25,16 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file relies on the fact that the following declarations have been made
// in v8natives.js:
// var $isFinite = GlobalIsFinite;
var $Date = global.Date;
// -------------------------------------------------------------------
// This file contains date support implemented in JavaScript.
// Keep reference to original values of some global properties. This
// has the added benefit that the code in this file is isolated from
// changes to these properties.
var $Date = global.Date;
// Helper function to throw error.
function ThrowDateTypeError() {
throw new $TypeError('this is not a Date object.');
@ -142,7 +138,7 @@ var Date_cache = {
};
%SetCode($Date, function(year, month, date, hours, minutes, seconds, ms) {
function DateConstructor(year, month, date, hours, minutes, seconds, ms) {
if (!%_IsConstructCall()) {
// ECMA 262 - 15.9.2
return (new $Date()).toString();
@ -199,10 +195,7 @@ var Date_cache = {
value = MakeDate(day, time);
SET_LOCAL_DATE_VALUE(this, value);
}
});
%FunctionSetPrototype($Date, new $Date($NaN));
}
var WeekDays = ['Sun', 'Mon', 'Tue', 'Wed', 'Thu', 'Fri', 'Sat'];
@ -767,6 +760,10 @@ function ResetDateCache() {
function SetUpDate() {
%CheckIsBootstrapping();
%SetCode($Date, DateConstructor);
%FunctionSetPrototype($Date, new $Date($NaN));
// Set up non-enumerable properties of the Date object itself.
InstallFunctions($Date, DONT_ENUM, $Array(
"UTC", DateUTC,

16
deps/v8/src/debug.cc

@ -551,9 +551,9 @@ void Debug::ThreadInit() {
char* Debug::ArchiveDebug(char* storage) {
char* to = storage;
memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
OS::MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
to += sizeof(ThreadLocal);
memcpy(to, reinterpret_cast<char*>(&registers_), sizeof(registers_));
OS::MemCopy(to, reinterpret_cast<char*>(&registers_), sizeof(registers_));
ThreadInit();
ASSERT(to <= storage + ArchiveSpacePerThread());
return storage + ArchiveSpacePerThread();
@ -562,9 +562,10 @@ char* Debug::ArchiveDebug(char* storage) {
char* Debug::RestoreDebug(char* storage) {
char* from = storage;
memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
OS::MemCopy(
reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
from += sizeof(ThreadLocal);
memcpy(reinterpret_cast<char*>(&registers_), from, sizeof(registers_));
OS::MemCopy(reinterpret_cast<char*>(&registers_), from, sizeof(registers_));
ASSERT(from <= storage + ArchiveSpacePerThread());
return storage + ArchiveSpacePerThread();
}
@ -874,8 +875,9 @@ bool Debug::Load() {
// Check for caught exceptions.
if (caught_exception) return false;
// Debugger loaded.
debug_context_ = context;
// Debugger loaded, create debugger context global handle.
debug_context_ = Handle<Context>::cast(
isolate_->global_handles()->Create(*context));
return true;
}
@ -891,7 +893,7 @@ void Debug::Unload() {
DestroyScriptCache();
// Clear debugger context global handle.
Isolate::Current()->global_handles()->Destroy(
isolate_->global_handles()->Destroy(
reinterpret_cast<Object**>(debug_context_.location()));
debug_context_ = Handle<Context>();
}

116
deps/v8/src/deoptimizer.cc

@ -1204,6 +1204,7 @@ void Deoptimizer::DoComputeCompiledStubFrame(TranslationIterator* iterator,
// and the standard stack frame slots. Include space for an argument
// object to the callee and optionally the space to pass the argument
// object to the stub failure handler.
ASSERT(descriptor->register_param_count_ >= 0);
int height_in_bytes = kPointerSize * descriptor->register_param_count_ +
sizeof(Arguments) + kPointerSize;
int fixed_frame_size = StandardFrameConstants::kFixedFrameSize;
@ -2029,52 +2030,96 @@ bool Deoptimizer::DoOsrTranslateCommand(TranslationIterator* iterator,
}
void Deoptimizer::PatchStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
// Iterate over the stack check table and patch every stack check
void Deoptimizer::PatchInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code) {
// Iterate over the back edge table and patch every interrupt
// call to an unconditional call to the replacement code.
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
ASSERT(!unoptimized_code->stack_check_patched_for_osr());
Address stack_check_cursor = unoptimized_code->instruction_start() +
unoptimized_code->stack_check_table_offset();
uint32_t table_length = Memory::uint32_at(stack_check_cursor);
stack_check_cursor += kIntSize;
int loop_nesting_level = unoptimized_code->allow_osr_at_loop_nesting_level();
Address back_edge_cursor = unoptimized_code->instruction_start() +
unoptimized_code->back_edge_table_offset();
uint32_t table_length = Memory::uint32_at(back_edge_cursor);
back_edge_cursor += kIntSize;
for (uint32_t i = 0; i < table_length; ++i) {
uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset;
PatchStackCheckCodeAt(unoptimized_code,
pc_after,
check_code,
replacement_code);
stack_check_cursor += 2 * kIntSize;
uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize);
if (loop_depth == loop_nesting_level) {
// Loop back edge has the loop depth that we want to patch.
uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset;
PatchInterruptCodeAt(unoptimized_code,
pc_after,
interrupt_code,
replacement_code);
}
back_edge_cursor += FullCodeGenerator::kBackEdgeEntrySize;
}
unoptimized_code->set_stack_check_patched_for_osr(true);
unoptimized_code->set_back_edges_patched_for_osr(true);
#ifdef DEBUG
Deoptimizer::VerifyInterruptCode(
unoptimized_code, interrupt_code, replacement_code, loop_nesting_level);
#endif // DEBUG
}
void Deoptimizer::RevertStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code) {
// Iterate over the stack check table and revert the patched
// stack check calls.
void Deoptimizer::RevertInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code) {
// Iterate over the back edge table and revert the patched interrupt calls.
ASSERT(unoptimized_code->kind() == Code::FUNCTION);
ASSERT(unoptimized_code->stack_check_patched_for_osr());
Address stack_check_cursor = unoptimized_code->instruction_start() +
unoptimized_code->stack_check_table_offset();
uint32_t table_length = Memory::uint32_at(stack_check_cursor);
stack_check_cursor += kIntSize;
ASSERT(unoptimized_code->back_edges_patched_for_osr());
int loop_nesting_level = unoptimized_code->allow_osr_at_loop_nesting_level();
Address back_edge_cursor = unoptimized_code->instruction_start() +
unoptimized_code->back_edge_table_offset();
uint32_t table_length = Memory::uint32_at(back_edge_cursor);
back_edge_cursor += kIntSize;
for (uint32_t i = 0; i < table_length; ++i) {
uint32_t pc_offset = Memory::uint32_at(stack_check_cursor + kIntSize);
uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize);
if (loop_depth <= loop_nesting_level) {
uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset;
RevertInterruptCodeAt(unoptimized_code,
pc_after,
interrupt_code,
replacement_code);
}
back_edge_cursor += FullCodeGenerator::kBackEdgeEntrySize;
}
unoptimized_code->set_back_edges_patched_for_osr(false);
#ifdef DEBUG
// Assert that none of the back edges are patched anymore.
Deoptimizer::VerifyInterruptCode(
unoptimized_code, interrupt_code, replacement_code, -1);
#endif // DEBUG
}
#ifdef DEBUG
void Deoptimizer::VerifyInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code,
int loop_nesting_level) {
CHECK(unoptimized_code->kind() == Code::FUNCTION);
Address back_edge_cursor = unoptimized_code->instruction_start() +
unoptimized_code->back_edge_table_offset();
uint32_t table_length = Memory::uint32_at(back_edge_cursor);
back_edge_cursor += kIntSize;
for (uint32_t i = 0; i < table_length; ++i) {
uint8_t loop_depth = Memory::uint8_at(back_edge_cursor + 2 * kIntSize);
CHECK_LE(loop_depth, Code::kMaxLoopNestingMarker);
// Assert that all back edges for shallower loops (and only those)
// have already been patched.
uint32_t pc_offset = Memory::uint32_at(back_edge_cursor + kIntSize);
Address pc_after = unoptimized_code->instruction_start() + pc_offset;
RevertStackCheckCodeAt(unoptimized_code,
pc_after,
check_code,
replacement_code);
stack_check_cursor += 2 * kIntSize;
CHECK_EQ((loop_depth <= loop_nesting_level),
InterruptCodeIsPatched(unoptimized_code,
pc_after,
interrupt_code,
replacement_code));
back_edge_cursor += FullCodeGenerator::kBackEdgeEntrySize;
}
unoptimized_code->set_stack_check_patched_for_osr(false);
}
#endif // DEBUG
unsigned Deoptimizer::ComputeInputFrameSize() const {
@ -2327,7 +2372,8 @@ int32_t TranslationIterator::Next() {
Handle<ByteArray> TranslationBuffer::CreateByteArray(Factory* factory) {
int length = contents_.length();
Handle<ByteArray> result = factory->NewByteArray(length, TENURED);
memcpy(result->GetDataStartAddress(), contents_.ToVector().start(), length);
OS::MemCopy(
result->GetDataStartAddress(), contents_.ToVector().start(), length);
return result;
}

47
deps/v8/src/deoptimizer.h

@ -210,32 +210,45 @@ class Deoptimizer : public Malloced {
// The size in bytes of the code required at a lazy deopt patch site.
static int patch_size();
// Patch all stack guard checks in the unoptimized code to
// Patch all interrupts with allowed loop depth in the unoptimized code to
// unconditionally call replacement_code.
static void PatchStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code);
static void PatchInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code);
// Patch stack guard check at instruction before pc_after in
// Patch the interrupt at the instruction before pc_after in
// the unoptimized code to unconditionally call replacement_code.
static void PatchStackCheckCodeAt(Code* unoptimized_code,
static void PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code);
// Change all patched interrupts patched in the unoptimized code
// back to normal interrupts.
static void RevertInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code);
// Change patched interrupt in the unoptimized code
// back to a normal interrupt.
static void RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
Code* interrupt_code,
Code* replacement_code);
// Change all patched stack guard checks in the unoptimized code
// back to a normal stack guard check.
static void RevertStackCheckCode(Code* unoptimized_code,
Code* check_code,
Code* replacement_code);
// Change all patched stack guard checks in the unoptimized code
// back to a normal stack guard check.
static void RevertStackCheckCodeAt(Code* unoptimized_code,
#ifdef DEBUG
static bool InterruptCodeIsPatched(Code* unoptimized_code,
Address pc_after,
Code* check_code,
Code* interrupt_code,
Code* replacement_code);
// Verify that all back edges of a certain loop depth are patched.
static void VerifyInterruptCode(Code* unoptimized_code,
Code* interrupt_code,
Code* replacement_code,
int loop_nesting_level);
#endif // DEBUG
~Deoptimizer();
void MaterializeHeapObjects(JavaScriptFrameIterator* it);

11
deps/v8/src/disassembler.cc

@ -50,8 +50,8 @@ void Disassembler::Dump(FILE* f, byte* begin, byte* end) {
pc - begin,
*pc);
} else {
fprintf(f, "%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
reinterpret_cast<uintptr_t>(pc), pc - begin, *pc);
PrintF(f, "%" V8PRIxPTR " %4" V8PRIdPTR " %02x\n",
reinterpret_cast<uintptr_t>(pc), pc - begin, *pc);
}
}
}
@ -101,13 +101,12 @@ static void DumpBuffer(FILE* f, StringBuilder* out) {
if (f == NULL) {
PrintF("%s\n", out->Finalize());
} else {
fprintf(f, "%s\n", out->Finalize());
PrintF(f, "%s\n", out->Finalize());
}
out->Reset();
}
static const int kOutBufferSize = 2048 + String::kMaxShortPrintLength;
static const int kRelocInfoPosition = 57;
@ -337,10 +336,10 @@ void Disassembler::Decode(FILE* f, Code* code) {
code->kind() == Code::COMPILED_STUB)
? static_cast<int>(code->safepoint_table_offset())
: code->instruction_size();
// If there might be a stack check table, stop before reaching it.
// If there might be a back edge table, stop before reaching it.
if (code->kind() == Code::FUNCTION) {
decode_size =
Min(decode_size, static_cast<int>(code->stack_check_table_offset()));
Min(decode_size, static_cast<int>(code->back_edge_table_offset()));
}
byte* begin = code->instruction_start();

4
deps/v8/src/elements.cc

@ -183,7 +183,7 @@ static void CopyObjectToObjectElements(FixedArrayBase* from_base,
Address from_address = from->address() + FixedArray::kHeaderSize;
CopyWords(reinterpret_cast<Object**>(to_address) + to_start,
reinterpret_cast<Object**>(from_address) + from_start,
copy_size);
static_cast<size_t>(copy_size));
if (IsFastObjectElementsKind(from_kind) &&
IsFastObjectElementsKind(to_kind)) {
Heap* heap = from->GetHeap();
@ -339,7 +339,7 @@ static void CopyDoubleToDoubleElements(FixedArrayBase* from_base,
int words_per_double = (kDoubleSize / kPointerSize);
CopyWords(reinterpret_cast<Object**>(to_address),
reinterpret_cast<Object**>(from_address),
words_per_double * copy_size);
static_cast<size_t>(words_per_double * copy_size));
}

24
deps/v8/src/execution.cc

@ -33,6 +33,7 @@
#include "bootstrapper.h"
#include "codegen.h"
#include "debug.h"
#include "deoptimizer.h"
#include "isolate-inl.h"
#include "runtime-profiler.h"
#include "simulator.h"
@ -448,6 +449,19 @@ void StackGuard::RequestGC() {
}
bool StackGuard::IsFullDeopt() {
ExecutionAccess access(isolate_);
return (thread_local_.interrupt_flags_ & FULL_DEOPT) != 0;
}
void StackGuard::FullDeopt() {
ExecutionAccess access(isolate_);
thread_local_.interrupt_flags_ |= FULL_DEOPT;
set_interrupt_limits(access);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
bool StackGuard::IsDebugBreak() {
ExecutionAccess access(isolate_);
@ -488,7 +502,7 @@ void StackGuard::Continue(InterruptFlag after_what) {
char* StackGuard::ArchiveStackGuard(char* to) {
ExecutionAccess access(isolate_);
memcpy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
OS::MemCopy(to, reinterpret_cast<char*>(&thread_local_), sizeof(ThreadLocal));
ThreadLocal blank;
// Set the stack limits using the old thread_local_.
@ -505,7 +519,8 @@ char* StackGuard::ArchiveStackGuard(char* to) {
char* StackGuard::RestoreStackGuard(char* from) {
ExecutionAccess access(isolate_);
memcpy(reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
OS::MemCopy(
reinterpret_cast<char*>(&thread_local_), from, sizeof(ThreadLocal));
isolate_->heap()->SetStackLimits();
return from + sizeof(ThreadLocal);
}
@ -880,7 +895,6 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(GC_REQUEST);
}
isolate->counters()->stack_interrupts()->Increment();
isolate->counters()->runtime_profiler_ticks()->Increment();
isolate->runtime_profiler()->OptimizeNow();
@ -898,6 +912,10 @@ MaybeObject* Execution::HandleStackGuardInterrupt(Isolate* isolate) {
stack_guard->Continue(INTERRUPT);
return isolate->StackOverflow();
}
if (stack_guard->IsFullDeopt()) {
stack_guard->Continue(FULL_DEOPT);
Deoptimizer::DeoptimizeAll(isolate);
}
return isolate->heap()->undefined_value();
}

5
deps/v8/src/execution.h

@ -41,7 +41,8 @@ enum InterruptFlag {
DEBUGCOMMAND = 1 << 2,
PREEMPT = 1 << 3,
TERMINATE = 1 << 4,
GC_REQUEST = 1 << 5
GC_REQUEST = 1 << 5,
FULL_DEOPT = 1 << 6
};
@ -197,6 +198,8 @@ class StackGuard {
#endif
bool IsGCRequest();
void RequestGC();
bool IsFullDeopt();
void FullDeopt();
void Continue(InterruptFlag after_what);
// This provides an asynchronous read of the stack limits for the current

20
deps/v8/src/factory.cc

@ -578,15 +578,22 @@ Handle<JSFunction> Factory::BaseNewFunctionFromSharedFunctionInfo(
}
static Handle<Map> MapForNewFunction(Isolate *isolate,
Handle<SharedFunctionInfo> function_info) {
Context *context = isolate->context()->native_context();
int map_index = Context::FunctionMapIndex(function_info->language_mode(),
function_info->is_generator());
return Handle<Map>(Map::cast(context->get(map_index)));
}
Handle<JSFunction> Factory::NewFunctionFromSharedFunctionInfo(
Handle<SharedFunctionInfo> function_info,
Handle<Context> context,
PretenureFlag pretenure) {
Handle<JSFunction> result = BaseNewFunctionFromSharedFunctionInfo(
function_info,
function_info->is_classic_mode()
? isolate()->function_map()
: isolate()->strict_mode_function_map(),
MapForNewFunction(isolate(), function_info),
pretenure);
if (function_info->ic_age() != isolate()->heap()->global_ic_age()) {
@ -874,14 +881,7 @@ Handle<JSFunction> Factory::NewFunctionWithPrototype(Handle<String> name,
initial_map->set_constructor(*function);
}
// Set function.prototype and give the prototype a constructor
// property that refers to the function.
SetPrototypeProperty(function, prototype);
// Currently safe because it is only invoked from Genesis.
CHECK_NOT_EMPTY_HANDLE(isolate(),
JSObject::SetLocalPropertyIgnoreAttributes(
prototype, constructor_string(),
function, DONT_ENUM));
return function;
}

35
deps/v8/src/flag-definitions.h

@ -119,6 +119,22 @@ public:
};
#endif
#if (defined CAN_USE_VFP3_INSTRUCTIONS) || !(defined ARM_TEST)
# define ENABLE_VFP3_DEFAULT true
#else
# define ENABLE_VFP3_DEFAULT false
#endif
#if (defined CAN_USE_ARMV7_INSTRUCTIONS) || !(defined ARM_TEST)
# define ENABLE_ARMV7_DEFAULT true
#else
# define ENABLE_ARMV7_DEFAULT false
#endif
#if (defined CAN_USE_VFP32DREGS) || !(defined ARM_TEST)
# define ENABLE_32DREGS_DEFAULT true
#else
# define ENABLE_32DREGS_DEFAULT false
#endif
#define DEFINE_bool(nam, def, cmt) FLAG(BOOL, bool, nam, def, cmt)
#define DEFINE_int(nam, def, cmt) FLAG(INT, int, nam, def, cmt)
#define DEFINE_float(nam, def, cmt) FLAG(FLOAT, double, nam, def, cmt)
@ -168,12 +184,12 @@ DEFINE_bool(packed_arrays, true, "optimizes arrays that have no holes")
DEFINE_bool(smi_only_arrays, true, "tracks arrays with only smi values")
DEFINE_bool(compiled_transitions, false, "use optimizing compiler to "
"generate array elements transition stubs")
DEFINE_bool(compiled_keyed_stores, false, "use optimizing compiler to "
DEFINE_bool(compiled_keyed_stores, true, "use optimizing compiler to "
"generate keyed store stubs")
DEFINE_bool(clever_optimizations,
true,
"Optimize object size, Array shift, DOM strings and string +")
DEFINE_bool(pretenure_literals, false, "allocate literals in old space")
DEFINE_bool(pretenure_literals, true, "allocate literals in old space")
// Flags for data representation optimizations
DEFINE_bool(unbox_double_arrays, true, "automatically unbox arrays of doubles")
@ -308,12 +324,9 @@ DEFINE_bool(enable_rdtsc, true,
"enable use of RDTSC instruction if available")
DEFINE_bool(enable_sahf, true,
"enable use of SAHF instruction if available (X64 only)")
DEFINE_bool(enable_vfp3, true,
"enable use of VFP3 instructions if available - this implies "
"enabling ARMv7 and VFP2 instructions (ARM only)")
DEFINE_bool(enable_vfp2, true,
"enable use of VFP2 instructions if available")
DEFINE_bool(enable_armv7, true,
DEFINE_bool(enable_vfp3, ENABLE_VFP3_DEFAULT,
"enable use of VFP3 instructions if available")
DEFINE_bool(enable_armv7, ENABLE_ARMV7_DEFAULT,
"enable use of ARMv7 instructions if available (ARM only)")
DEFINE_bool(enable_sudiv, true,
"enable use of SDIV and UDIV instructions if available (ARM only)")
@ -322,10 +335,8 @@ DEFINE_bool(enable_movw_movt, false,
"instruction pairs (ARM only)")
DEFINE_bool(enable_unaligned_accesses, true,
"enable unaligned accesses for ARMv7 (ARM only)")
DEFINE_bool(enable_32dregs, true,
DEFINE_bool(enable_32dregs, ENABLE_32DREGS_DEFAULT,
"enable use of d16-d31 registers on ARM - this requires VFP3")
DEFINE_bool(enable_fpu, true,
"enable use of MIPS FPU instructions if available (MIPS only)")
DEFINE_bool(enable_vldr_imm, false,
"enable use of constant pools for double immediate (ARM only)")
@ -502,6 +513,8 @@ DEFINE_int(sim_stack_alignment, 8,
"Stack alingment in bytes in simulator (4 or 8, 8 is default)")
// isolate.cc
DEFINE_bool(abort_on_uncaught_exception, false,
"abort program (dump core) when an uncaught exception is thrown")
DEFINE_bool(trace_exception, false,
"print stack trace when throwing exceptions")
DEFINE_bool(preallocate_message_memory, false,

29
deps/v8/src/flags.cc

@ -34,6 +34,9 @@
#include "smart-pointers.h"
#include "string-stream.h"
#ifdef V8_TARGET_ARCH_ARM
#include "arm/assembler-arm-inl.h"
#endif
namespace v8 {
namespace internal {
@ -305,7 +308,7 @@ static void SplitArgument(const char* arg,
// make a copy so we can NUL-terminate flag name
size_t n = arg - *name;
CHECK(n < static_cast<size_t>(buffer_size)); // buffer is too small
memcpy(buffer, *name, n);
OS::MemCopy(buffer, *name, n);
buffer[n] = '\0';
*name = buffer;
// get the value
@ -367,8 +370,8 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
// sense there.
continue;
} else {
fprintf(stderr, "Error: unrecognized flag %s\n"
"Try --help for options\n", arg);
PrintF(stderr, "Error: unrecognized flag %s\n"
"Try --help for options\n", arg);
return_code = j;
break;
}
@ -381,9 +384,9 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
if (i < *argc) {
value = argv[i++];
} else {
fprintf(stderr, "Error: missing value for flag %s of type %s\n"
"Try --help for options\n",
arg, Type2String(flag->type()));
PrintF(stderr, "Error: missing value for flag %s of type %s\n"
"Try --help for options\n",
arg, Type2String(flag->type()));
return_code = j;
break;
}
@ -424,9 +427,9 @@ int FlagList::SetFlagsFromCommandLine(int* argc,
if ((flag->type() == Flag::TYPE_BOOL && value != NULL) ||
(flag->type() != Flag::TYPE_BOOL && is_bool) ||
*endp != '\0') {
fprintf(stderr, "Error: illegal value for flag %s of type %s\n"
"Try --help for options\n",
arg, Type2String(flag->type()));
PrintF(stderr, "Error: illegal value for flag %s of type %s\n"
"Try --help for options\n",
arg, Type2String(flag->type()));
return_code = j;
break;
}
@ -475,7 +478,7 @@ static char* SkipBlackSpace(char* p) {
int FlagList::SetFlagsFromString(const char* str, int len) {
// make a 0-terminated copy of str
ScopedVector<char> copy0(len + 1);
memcpy(copy0.start(), str, len);
OS::MemCopy(copy0.start(), str, len);
copy0[len] = '\0';
// strip leading white space
@ -517,6 +520,12 @@ void FlagList::ResetAllFlags() {
// static
void FlagList::PrintHelp() {
#ifdef V8_TARGET_ARCH_ARM
CpuFeatures::PrintTarget();
CpuFeatures::Probe();
CpuFeatures::PrintFeatures();
#endif // V8_TARGET_ARCH_ARM
printf("Usage:\n");
printf(" shell [options] -e string\n");
printf(" execute string in V8\n");

13
deps/v8/src/frames.h

@ -84,6 +84,18 @@ class InnerPointerToCodeCache {
};
class StackHandlerConstants : public AllStatic {
public:
static const int kNextOffset = 0 * kPointerSize;
static const int kCodeOffset = 1 * kPointerSize;
static const int kStateOffset = 2 * kPointerSize;
static const int kContextOffset = 3 * kPointerSize;
static const int kFPOffset = 4 * kPointerSize;
static const int kSize = kFPOffset + kPointerSize;
};
class StackHandler BASE_EMBEDDED {
public:
enum Kind {
@ -581,7 +593,6 @@ class JavaScriptFrame: public StandardFrame {
inline Object* function_slot_object() const;
friend class StackFrameIterator;
friend class StackTracer;
};

34
deps/v8/src/full-codegen.cc

@ -322,7 +322,7 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
ASSERT(!isolate->has_pending_exception());
return false;
}
unsigned table_offset = cgen.EmitStackCheckTable();
unsigned table_offset = cgen.EmitBackEdgeTable();
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION);
Handle<Code> code = CodeGenerator::MakeCodeEpilogue(&masm, flags, info);
@ -341,8 +341,8 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
#endif // ENABLE_DEBUGGER_SUPPORT
code->set_allow_osr_at_loop_nesting_level(0);
code->set_profiler_ticks(0);
code->set_stack_check_table_offset(table_offset);
code->set_stack_check_patched_for_osr(false);
code->set_back_edge_table_offset(table_offset);
code->set_back_edges_patched_for_osr(false);
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // May be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE
@ -362,17 +362,18 @@ bool FullCodeGenerator::MakeCode(CompilationInfo* info) {
}
unsigned FullCodeGenerator::EmitStackCheckTable() {
// The stack check table consists of a length (in number of entries)
unsigned FullCodeGenerator::EmitBackEdgeTable() {
// The back edge table consists of a length (in number of entries)
// field, and then a sequence of entries. Each entry is a pair of AST id
// and code-relative pc offset.
masm()->Align(kIntSize);
unsigned offset = masm()->pc_offset();
unsigned length = stack_checks_.length();
unsigned length = back_edges_.length();
__ dd(length);
for (unsigned i = 0; i < length; ++i) {
__ dd(stack_checks_[i].id.ToInt());
__ dd(stack_checks_[i].pc_and_state);
__ dd(back_edges_[i].id.ToInt());
__ dd(back_edges_[i].pc);
__ db(back_edges_[i].loop_depth);
}
return offset;
}
@ -478,8 +479,11 @@ void FullCodeGenerator::RecordTypeFeedbackCell(
void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
// The pc offset does not need to be encoded and packed together with a state.
ASSERT(masm_->pc_offset() > 0);
BailoutEntry entry = { ast_id, static_cast<unsigned>(masm_->pc_offset()) };
stack_checks_.Add(entry, zone());
ASSERT(loop_depth() > 0);
uint8_t depth = Min(loop_depth(), Code::kMaxLoopNestingMarker);
BackEdgeEntry entry =
{ ast_id, static_cast<unsigned>(masm_->pc_offset()), depth };
back_edges_.Add(entry, zone());
}
@ -1251,7 +1255,7 @@ void FullCodeGenerator::VisitWithStatement(WithStatement* stmt) {
void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
Comment cmnt(masm_, "[ DoWhileStatement");
SetStatementPosition(stmt);
Label body, stack_check;
Label body, book_keeping;
Iteration loop_statement(this, stmt);
increment_loop_depth();
@ -1265,13 +1269,13 @@ void FullCodeGenerator::VisitDoWhileStatement(DoWhileStatement* stmt) {
PrepareForBailoutForId(stmt->ContinueId(), NO_REGISTERS);
SetExpressionPosition(stmt->cond(), stmt->condition_position());
VisitForControl(stmt->cond(),
&stack_check,
&book_keeping,
loop_statement.break_label(),
&stack_check);
&book_keeping);
// Check stack before looping.
PrepareForBailoutForId(stmt->BackEdgeId(), NO_REGISTERS);
__ bind(&stack_check);
__ bind(&book_keeping);
EmitBackEdgeBookkeeping(stmt, &body);
__ jmp(&body);
@ -1549,6 +1553,8 @@ void FullCodeGenerator::VisitYield(Yield* expr) {
UNIMPLEMENTED();
Comment cmnt(masm_, "[ Yield");
// TODO(wingo): Actually update the iterator state.
VisitForEffect(expr->generator_object());
VisitForAccumulatorValue(expr->expression());
// TODO(wingo): Assert that the operand stack depth is 0, at least while
// general yield expressions are unimplemented.

19
deps/v8/src/full-codegen.h

@ -92,7 +92,7 @@ class FullCodeGenerator: public AstVisitor {
bailout_entries_(info->HasDeoptimizationSupport()
? info->function()->ast_node_count() : 0,
info->zone()),
stack_checks_(2, info->zone()), // There's always at least one.
back_edges_(2, info->zone()),
type_feedback_cells_(info->HasDeoptimizationSupport()
? info->function()->ast_node_count() : 0,
info->zone()),
@ -135,6 +135,7 @@ class FullCodeGenerator: public AstVisitor {
#error Unsupported target architecture.
#endif
static const int kBackEdgeEntrySize = 2 * kIntSize + kOneByteSize;
private:
class Breakable;
@ -459,9 +460,9 @@ class FullCodeGenerator: public AstVisitor {
Label* back_edge_target);
// Record the OSR AST id corresponding to a back edge in the code.
void RecordBackEdge(BailoutId osr_ast_id);
// Emit a table of stack check ids and pcs into the code stream. Return
// the offset of the start of the table.
unsigned EmitStackCheckTable();
// Emit a table of back edge ids, pcs and loop depths into the code stream.
// Return the offset of the start of the table.
unsigned EmitBackEdgeTable();
void EmitProfilingCounterDecrement(int delta);
void EmitProfilingCounterReset();
@ -624,6 +625,12 @@ class FullCodeGenerator: public AstVisitor {
unsigned pc_and_state;
};
struct BackEdgeEntry {
BailoutId id;
unsigned pc;
uint8_t loop_depth;
};
struct TypeFeedbackCellEntry {
TypeFeedbackId ast_id;
Handle<JSGlobalPropertyCell> cell;
@ -818,9 +825,7 @@ class FullCodeGenerator: public AstVisitor {
const ExpressionContext* context_;
ZoneList<BailoutEntry> bailout_entries_;
GrowableBitVector prepared_bailout_ids_;
// TODO(svenpanne) Rename this to something like back_edges_ and rename
// related functions accordingly.
ZoneList<BailoutEntry> stack_checks_;
ZoneList<BackEdgeEntry> back_edges_;
ZoneList<TypeFeedbackCellEntry> type_feedback_cells_;
int ic_total_count_;
Handle<FixedArray> handler_table_;

39
deps/v8/src/gdb-jit.cc

@ -629,7 +629,7 @@ class MachO BASE_EMBEDDED {
#if defined(__ELF)
class ELF BASE_EMBEDDED {
public:
ELF(Zone* zone) : sections_(6, zone) {
explicit ELF(Zone* zone) : sections_(6, zone) {
sections_.Add(new(zone) ELFSection("", ELFSection::TYPE_NULL, 0), zone);
sections_.Add(new(zone) ELFStringTable(".shstrtab"), zone);
}
@ -681,7 +681,7 @@ class ELF BASE_EMBEDDED {
#else
#error Unsupported target architecture.
#endif
memcpy(header->ident, ident, 16);
OS::MemCopy(header->ident, ident, 16);
header->type = 1;
#if defined(V8_TARGET_ARCH_IA32)
header->machine = 3;
@ -1019,9 +1019,9 @@ class CodeDescription BASE_EMBEDDED {
#if defined(__ELF)
static void CreateSymbolsTable(CodeDescription* desc,
Zone* zone,
ELF* elf,
int text_section_index) {
Zone* zone = desc->info()->zone();
ELFSymbolTable* symtab = new(zone) ELFSymbolTable(".symtab", zone);
ELFStringTable* strtab = new(zone) ELFStringTable(".strtab");
@ -1213,8 +1213,11 @@ class DebugInfoSection : public DebugSection {
w->WriteSLEB128(StandardFrameConstants::kContextOffset);
block_size.set(static_cast<uint32_t>(w->position() - block_start));
}
w->WriteULEB128(0); // Terminate the sub program.
}
w->WriteULEB128(0); // Terminate the compile unit.
size.set(static_cast<uint32_t>(w->position() - start));
return true;
}
@ -1324,15 +1327,14 @@ class DebugAbbrevSection : public DebugSection {
// The real slot ID is internal_slots + context_slot_id.
int internal_slots = Context::MIN_CONTEXT_SLOTS;
int locals = scope->StackLocalCount();
int total_children =
params + slots + context_slots + internal_slots + locals + 2;
// Total children is params + slots + context_slots + internal_slots +
// locals + 2 (__function and __context).
// The extra duplication below seems to be necessary to keep
// gdb from getting upset on OSX.
w->WriteULEB128(current_abbreviation++); // Abbreviation code.
w->WriteULEB128(DW_TAG_SUBPROGRAM);
w->Write<uint8_t>(
total_children != 0 ? DW_CHILDREN_YES : DW_CHILDREN_NO);
w->Write<uint8_t>(DW_CHILDREN_YES);
w->WriteULEB128(DW_AT_NAME);
w->WriteULEB128(DW_FORM_STRING);
w->WriteULEB128(DW_AT_LOW_PC);
@ -1384,9 +1386,7 @@ class DebugAbbrevSection : public DebugSection {
// The context.
WriteVariableAbbreviation(w, current_abbreviation++, true, false);
if (total_children != 0) {
w->WriteULEB128(0); // Terminate the sibling list.
}
w->WriteULEB128(0); // Terminate the sibling list.
}
w->WriteULEB128(0); // Terminate the table.
@ -1789,8 +1789,9 @@ bool UnwindInfoSection::WriteBodyInternal(Writer* w) {
#endif // V8_TARGET_ARCH_X64
static void CreateDWARFSections(CodeDescription* desc, DebugObject* obj) {
Zone* zone = desc->info()->zone();
static void CreateDWARFSections(CodeDescription* desc,
Zone* zone,
DebugObject* obj) {
if (desc->IsLineInfoAvailable()) {
obj->AddSection(new(zone) DebugInfoSection(desc), zone);
obj->AddSection(new(zone) DebugAbbrevSection(desc), zone);
@ -1841,7 +1842,7 @@ extern "C" {
#ifdef OBJECT_PRINT
void __gdb_print_v8_object(MaybeObject* object) {
object->Print();
fprintf(stdout, "\n");
PrintF(stdout, "\n");
}
#endif
}
@ -1854,7 +1855,7 @@ static JITCodeEntry* CreateCodeEntry(Address symfile_addr,
entry->symfile_addr_ = reinterpret_cast<Address>(entry + 1);
entry->symfile_size_ = symfile_size;
memcpy(entry->symfile_addr_, symfile_addr, symfile_size);
OS::MemCopy(entry->symfile_addr_, symfile_addr, symfile_size);
entry->prev_ = entry->next_ = NULL;
@ -1915,8 +1916,7 @@ static void UnregisterCodeEntry(JITCodeEntry* entry) {
}
static JITCodeEntry* CreateELFObject(CodeDescription* desc) {
Zone* zone = desc->info()->zone();
static JITCodeEntry* CreateELFObject(CodeDescription* desc, Zone* zone) {
ZoneScope zone_scope(zone, DELETE_ON_EXIT);
#ifdef __MACH_O
MachO mach_o;
@ -1944,9 +1944,9 @@ static JITCodeEntry* CreateELFObject(CodeDescription* desc) {
ELFSection::FLAG_ALLOC | ELFSection::FLAG_EXEC),
zone);
CreateSymbolsTable(desc, &elf, text_section_index);
CreateSymbolsTable(desc, zone, &elf, text_section_index);
CreateDWARFSections(desc, &elf);
CreateDWARFSections(desc, zone, &elf);
elf.Write(&w);
#endif
@ -2083,7 +2083,8 @@ void GDBJITInterface::AddCode(const char* name,
}
AddUnwindInfo(&code_desc);
JITCodeEntry* entry = CreateELFObject(&code_desc);
Zone* zone = code->GetIsolate()->runtime_zone();
JITCodeEntry* entry = CreateELFObject(&code_desc, zone);
ASSERT(!IsLineInfoTagged(entry));
delete lineinfo;

74
deps/v8/src/generator.js

@ -0,0 +1,74 @@
// Copyright 2013 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"use strict";
// This file relies on the fact that the following declarations have been made
// in runtime.js:
// var $Function = global.Function;
// ----------------------------------------------------------------------------
// TODO(wingo): Give link to specification. For now, the following diagram is
// the spec:
// http://wiki.ecmascript.org/lib/exe/fetch.php?cache=cache&media=harmony:es6_generator_object_model_3-29-13.png
function GeneratorObjectNext() {
// TODO(wingo): Implement.
}
function GeneratorObjectSend(value) {
// TODO(wingo): Implement.
}
function GeneratorObjectThrow(exn) {
// TODO(wingo): Implement.
}
function GeneratorObjectClose() {
// TODO(wingo): Implement.
}
function SetUpGenerators() {
%CheckIsBootstrapping();
var GeneratorObjectPrototype = GeneratorFunctionPrototype.prototype;
InstallFunctions(GeneratorObjectPrototype,
DONT_ENUM | DONT_DELETE | READ_ONLY,
["next", GeneratorObjectNext,
"send", GeneratorObjectSend,
"throw", GeneratorObjectThrow,
"close", GeneratorObjectClose]);
%SetProperty(GeneratorObjectPrototype, "constructor",
GeneratorFunctionPrototype, DONT_ENUM | DONT_DELETE | READ_ONLY);
%SetPrototype(GeneratorFunctionPrototype, $Function.prototype);
%SetProperty(GeneratorFunctionPrototype, "constructor",
GeneratorFunction, DONT_ENUM | DONT_DELETE | READ_ONLY);
%SetPrototype(GeneratorFunction, $Function);
}
SetUpGenerators();

7
deps/v8/src/global-handles.h

@ -31,10 +31,15 @@
#include "../include/v8-profiler.h"
#include "list.h"
#include "v8utils.h"
namespace v8 {
namespace internal {
class GCTracer;
class HeapStats;
class ObjectVisitor;
// Structure for tracking global handles.
// A single list keeps all the allocated global handles.
// Destroyed handles stay in the list but is added to the free list.
@ -88,7 +93,7 @@ class ImplicitRefGroup {
malloc(OFFSET_OF(ImplicitRefGroup, children_[length])));
group->parent_ = parent;
group->length_ = length;
CopyWords(group->children_, children, static_cast<int>(length));
CopyWords(group->children_, children, length);
return group;
}

12
deps/v8/src/globals.h

@ -67,9 +67,21 @@ namespace internal {
// http://www.agner.org/optimize/calling_conventions.pdf
// or with gcc, run: "echo | gcc -E -dM -"
#if defined(_M_X64) || defined(__x86_64__)
#if defined(__native_client__)
// For Native Client builds of V8, use V8_TARGET_ARCH_ARM, so that V8
// generates ARM machine code, together with a portable ARM simulator
// compiled for the host architecture in question.
//
// Since Native Client is ILP-32 on all architectures we use
// V8_HOST_ARCH_IA32 on both 32- and 64-bit x86.
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
#else
#define V8_HOST_ARCH_X64 1
#define V8_HOST_ARCH_64_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
#endif // __native_client__
#elif defined(_M_IX86) || defined(__i386__)
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1

1
deps/v8/src/handles-inl.h

@ -59,7 +59,6 @@ inline bool Handle<T>::is_identical_to(const Handle<T> other) const {
if (FLAG_enable_slow_asserts) {
Isolate* isolate = Isolate::Current();
CHECK(isolate->AllowHandleDereference() ||
Heap::RelocationLock::IsLocked(isolate->heap()) ||
!isolate->optimizing_compiler_thread()->IsOptimizerThread());
}
#endif // DEBUG

27
deps/v8/src/handles.cc

@ -252,15 +252,32 @@ Handle<Object> ForceSetProperty(Handle<JSObject> object,
}
Handle<Object> DeleteProperty(Handle<JSObject> object, Handle<Object> key) {
Isolate* isolate = object->GetIsolate();
CALL_HEAP_FUNCTION(isolate,
Runtime::DeleteObjectProperty(
isolate, object, key, JSReceiver::NORMAL_DELETION),
Object);
}
Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
Handle<Object> key) {
Isolate* isolate = object->GetIsolate();
CALL_HEAP_FUNCTION(isolate,
Runtime::ForceDeleteObjectProperty(isolate, object, key),
Runtime::DeleteObjectProperty(
isolate, object, key, JSReceiver::FORCE_DELETION),
Object);
}
Handle<Object> HasProperty(Handle<JSReceiver> obj, Handle<Object> key) {
Isolate* isolate = obj->GetIsolate();
CALL_HEAP_FUNCTION(isolate,
Runtime::HasObjectProperty(isolate, obj, key), Object);
}
Handle<Object> GetProperty(Handle<JSReceiver> obj,
const char* name) {
Isolate* isolate = obj->GetIsolate();
@ -308,6 +325,14 @@ Handle<JSObject> Copy(Handle<JSObject> obj) {
}
Handle<JSObject> DeepCopy(Handle<JSObject> obj) {
Isolate* isolate = obj->GetIsolate();
CALL_HEAP_FUNCTION(isolate,
obj->DeepCopy(isolate),
JSObject);
}
Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info) {
CALL_HEAP_FUNCTION(obj->GetIsolate(), obj->DefineAccessor(*info), Object);
}

12
deps/v8/src/handles.h

@ -223,11 +223,13 @@ Handle<Object> ForceSetProperty(Handle<JSObject> object,
Handle<Object> value,
PropertyAttributes attributes);
Handle<Object> ForceDeleteProperty(Handle<JSObject> object,
Handle<Object> key);
Handle<Object> DeleteProperty(Handle<JSObject> object, Handle<Object> key);
Handle<Object> GetProperty(Handle<JSReceiver> obj,
const char* name);
Handle<Object> ForceDeleteProperty(Handle<JSObject> object, Handle<Object> key);
Handle<Object> HasProperty(Handle<JSReceiver> obj, Handle<Object> key);
Handle<Object> GetProperty(Handle<JSReceiver> obj, const char* name);
Handle<Object> GetProperty(Isolate* isolate,
Handle<Object> obj,
@ -240,6 +242,8 @@ Handle<Object> LookupSingleCharacterStringFromCode(Isolate* isolate,
Handle<JSObject> Copy(Handle<JSObject> obj);
Handle<JSObject> DeepCopy(Handle<JSObject> obj);
Handle<Object> SetAccessor(Handle<JSObject> obj, Handle<AccessorInfo> info);
Handle<FixedArray> AddKeysFromJSArray(Handle<FixedArray>,

22
deps/v8/src/heap-inl.h

@ -159,8 +159,8 @@ MaybeObject* Heap::AllocateOneByteInternalizedString(Vector<const uint8_t> str,
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
memcpy(answer->address() + SeqOneByteString::kHeaderSize,
str.start(), str.length());
OS::MemCopy(answer->address() + SeqOneByteString::kHeaderSize,
str.start(), str.length());
return answer;
}
@ -192,8 +192,8 @@ MaybeObject* Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
ASSERT_EQ(size, answer->Size());
// Fill in the characters.
memcpy(answer->address() + SeqTwoByteString::kHeaderSize,
str.start(), str.length() * kUC16Size);
OS::MemCopy(answer->address() + SeqTwoByteString::kHeaderSize,
str.start(), str.length() * kUC16Size);
return answer;
}
@ -345,6 +345,16 @@ bool Heap::InOldPointerSpace(Object* object) {
}
bool Heap::InOldDataSpace(Address address) {
return old_data_space_->Contains(address);
}
bool Heap::InOldDataSpace(Object* object) {
return InOldDataSpace(reinterpret_cast<Address>(object));
}
bool Heap::OldGenerationAllocationLimitReached() {
if (!incremental_marking()->IsStopped()) return false;
return OldGenerationSpaceAvailable() < 0;
@ -417,7 +427,7 @@ AllocationSpace Heap::TargetSpaceId(InstanceType type) {
void Heap::CopyBlock(Address dst, Address src, int byte_size) {
CopyWords(reinterpret_cast<Object**>(dst),
reinterpret_cast<Object**>(src),
byte_size / kPointerSize);
static_cast<size_t>(byte_size / kPointerSize));
}
@ -435,7 +445,7 @@ void Heap::MoveBlock(Address dst, Address src, int byte_size) {
*dst_slot++ = *src_slot++;
}
} else {
memmove(dst, src, byte_size);
OS::MemMove(dst, src, static_cast<size_t>(byte_size));
}
}

2
deps/v8/src/heap-snapshot-generator.cc

@ -2319,7 +2319,7 @@ class OutputStreamWriter {
int s_chunk_size = Min(
chunk_size_ - chunk_pos_, static_cast<int>(s_end - s));
ASSERT(s_chunk_size > 0);
memcpy(chunk_.start() + chunk_pos_, s, s_chunk_size);
OS::MemCopy(chunk_.start() + chunk_pos_, s, s_chunk_size);
s += s_chunk_size;
chunk_pos_ += s_chunk_size;
MaybeWriteChunk();

124
deps/v8/src/heap.cc

@ -162,8 +162,7 @@ Heap::Heap()
#endif
promotion_queue_(this),
configured_(false),
chunks_queued_for_free_(NULL),
relocation_mutex_(NULL) {
chunks_queued_for_free_(NULL) {
// Allow build-time customization of the max semispace size. Building
// V8 with snapshots and a non-default max semispace size is much
// easier if you can define it as part of the build environment.
@ -688,9 +687,9 @@ void Heap::MoveElements(FixedArray* array,
ASSERT(array->map() != HEAP->fixed_cow_array_map());
Object** dst_objects = array->data_start() + dst_index;
memmove(dst_objects,
array->data_start() + src_index,
len * kPointerSize);
OS::MemMove(dst_objects,
array->data_start() + src_index,
len * kPointerSize);
if (!InNewSpace(array)) {
for (int i = 0; i < len; i++) {
// TODO(hpayer): check store buffer for entries
@ -952,6 +951,13 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
PrintPID("Limited new space size due to high promotion rate: %d MB\n",
new_space_.InitialCapacity() / MB);
}
// Support for global pre-tenuring uses the high promotion mode as a
// heuristic indicator of whether to pretenure or not, we trigger
// deoptimization here to take advantage of pre-tenuring as soon as
// possible.
if (FLAG_pretenure_literals) {
isolate_->stack_guard()->FullDeopt();
}
} else if (new_space_high_promotion_mode_active_ &&
IsStableOrDecreasingSurvivalTrend() &&
IsLowSurvivalRate()) {
@ -963,6 +969,11 @@ bool Heap::PerformGarbageCollection(GarbageCollector collector,
PrintPID("Unlimited new space size due to low promotion rate: %d MB\n",
new_space_.MaximumCapacity() / MB);
}
// Trigger deoptimization here to turn off pre-tenuring as soon as
// possible.
if (FLAG_pretenure_literals) {
isolate_->stack_guard()->FullDeopt();
}
}
if (new_space_high_promotion_mode_active_ &&
@ -1282,8 +1293,6 @@ class ScavengeWeakObjectRetainer : public WeakObjectRetainer {
void Heap::Scavenge() {
RelocationLock relocation_lock(this);
#ifdef VERIFY_HEAP
if (FLAG_verify_heap) VerifyNonPointerSpacePointers();
#endif
@ -2836,13 +2845,6 @@ bool Heap::CreateInitialObjects() {
}
hidden_string_ = String::cast(obj);
// Allocate the foreign for __proto__.
{ MaybeObject* maybe_obj =
AllocateForeign((Address) &Accessors::ObjectPrototype);
if (!maybe_obj->ToObject(&obj)) return false;
}
set_prototype_accessors(Foreign::cast(obj));
// Allocate the code_stubs dictionary. The initial size is set to avoid
// expanding the dictionary during bootstrapping.
{ MaybeObject* maybe_obj = UnseededNumberDictionary::Allocate(this, 128);
@ -3963,30 +3965,36 @@ void Heap::InitializeFunction(JSFunction* function,
MaybeObject* Heap::AllocateFunctionPrototype(JSFunction* function) {
// Allocate the prototype. Make sure to use the object function
// from the function's context, since the function can be from a
// different context.
JSFunction* object_function =
function->context()->native_context()->object_function();
// Each function prototype gets a copy of the object function map.
// This avoid unwanted sharing of maps between prototypes of different
// constructors.
// Make sure to use globals from the function's context, since the function
// can be from a different context.
Context* native_context = function->context()->native_context();
bool needs_constructor_property;
Map* new_map;
ASSERT(object_function->has_initial_map());
MaybeObject* maybe_map = object_function->initial_map()->Copy();
if (!maybe_map->To(&new_map)) return maybe_map;
if (function->shared()->is_generator()) {
// Generator prototypes can share maps since they don't have "constructor"
// properties.
new_map = native_context->generator_object_prototype_map();
needs_constructor_property = false;
} else {
// Each function prototype gets a fresh map to avoid unwanted sharing of
// maps between prototypes of different constructors.
JSFunction* object_function = native_context->object_function();
ASSERT(object_function->has_initial_map());
MaybeObject* maybe_map = object_function->initial_map()->Copy();
if (!maybe_map->To(&new_map)) return maybe_map;
needs_constructor_property = true;
}
Object* prototype;
MaybeObject* maybe_prototype = AllocateJSObjectFromMap(new_map);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
// When creating the prototype for the function we must set its
// constructor to the function.
MaybeObject* maybe_failure =
JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
constructor_string(), function, DONT_ENUM);
if (maybe_failure->IsFailure()) return maybe_failure;
if (needs_constructor_property) {
MaybeObject* maybe_failure =
JSObject::cast(prototype)->SetLocalPropertyIgnoreAttributes(
constructor_string(), function, DONT_ENUM);
if (maybe_failure->IsFailure()) return maybe_failure;
}
return prototype;
}
@ -4086,10 +4094,20 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
// First create a new map with the size and number of in-object properties
// suggested by the function.
int instance_size = fun->shared()->CalculateInstanceSize();
int in_object_properties = fun->shared()->CalculateInObjectProperties();
InstanceType instance_type;
int instance_size;
int in_object_properties;
if (fun->shared()->is_generator()) {
instance_type = JS_GENERATOR_OBJECT_TYPE;
instance_size = JSGeneratorObject::kSize;
in_object_properties = 0;
} else {
instance_type = JS_OBJECT_TYPE;
instance_size = fun->shared()->CalculateInstanceSize();
in_object_properties = fun->shared()->CalculateInObjectProperties();
}
Map* map;
MaybeObject* maybe_map = AllocateMap(JS_OBJECT_TYPE, instance_size);
MaybeObject* maybe_map = AllocateMap(instance_type, instance_size);
if (!maybe_map->To(&map)) return maybe_map;
// Fetch or allocate prototype.
@ -4111,7 +4129,8 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
// the inline_new flag so we only change the map if we generate a
// specialized construct stub.
ASSERT(in_object_properties <= Map::kMaxPreAllocatedPropertyFields);
if (fun->shared()->CanGenerateInlineConstructor(prototype)) {
if (instance_type == JS_OBJECT_TYPE &&
fun->shared()->CanGenerateInlineConstructor(prototype)) {
int count = fun->shared()->this_property_assignments_count();
if (count > in_object_properties) {
// Inline constructor can only handle inobject properties.
@ -4144,7 +4163,9 @@ MaybeObject* Heap::AllocateInitialMap(JSFunction* fun) {
}
}
fun->shared()->StartInobjectSlackTracking(map);
if (instance_type == JS_OBJECT_TYPE) {
fun->shared()->StartInobjectSlackTracking(map);
}
return map;
}
@ -4327,6 +4348,22 @@ MaybeObject* Heap::AllocateJSObjectWithAllocationSite(JSFunction* constructor,
}
MaybeObject* Heap::AllocateJSGeneratorObject(JSFunction *function) {
ASSERT(function->shared()->is_generator());
Map *map;
if (function->has_initial_map()) {
map = function->initial_map();
} else {
// Allocate the initial map if absent.
MaybeObject* maybe_map = AllocateInitialMap(function);
if (!maybe_map->To(&map)) return maybe_map;
function->set_initial_map(map);
}
ASSERT(map->instance_type() == JS_GENERATOR_OBJECT_TYPE);
return AllocateJSObjectFromMap(map);
}
MaybeObject* Heap::AllocateJSModule(Context* context, ScopeInfo* scope_info) {
// Allocate a fresh map. Modules do not have a prototype.
Map* map;
@ -4945,7 +4982,7 @@ static inline void WriteOneByteData(Vector<const char> vector,
int len) {
// Only works for ascii.
ASSERT(vector.length() == len);
memcpy(chars, vector.start(), len);
OS::MemCopy(chars, vector.start(), len);
}
static inline void WriteTwoByteData(Vector<const char> vector,
@ -6588,11 +6625,6 @@ bool Heap::SetUp() {
store_buffer()->SetUp();
if (FLAG_parallel_recompilation) relocation_mutex_ = OS::CreateMutex();
#ifdef DEBUG
relocation_mutex_locked_ = false;
#endif // DEBUG
return true;
}
@ -6695,8 +6727,6 @@ void Heap::TearDown() {
incremental_marking()->TearDown();
isolate_->memory_allocator()->TearDown();
delete relocation_mutex_;
}
@ -7821,8 +7851,8 @@ void Heap::CheckpointObjectStats() {
FIXED_ARRAY_SUB_INSTANCE_TYPE_LIST(ADJUST_LAST_TIME_OBJECT_COUNT)
#undef ADJUST_LAST_TIME_OBJECT_COUNT
memcpy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
memcpy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
OS::MemCopy(object_counts_last_time_, object_counts_, sizeof(object_counts_));
OS::MemCopy(object_sizes_last_time_, object_sizes_, sizeof(object_sizes_));
ClearObjectStats();
}

60
deps/v8/src/heap.h

@ -150,7 +150,6 @@ namespace internal {
V(HeapNumber, minus_zero_value, MinusZeroValue) \
V(Map, neander_map, NeanderMap) \
V(JSObject, message_listeners, MessageListeners) \
V(Foreign, prototype_accessors, PrototypeAccessors) \
V(UnseededNumberDictionary, code_stubs, CodeStubs) \
V(UnseededNumberDictionary, non_monomorphic_cache, NonMonomorphicCache) \
V(PolymorphicCodeCache, polymorphic_code_cache, PolymorphicCodeCache) \
@ -210,9 +209,11 @@ namespace internal {
V(index_string, "index") \
V(last_index_string, "lastIndex") \
V(object_string, "object") \
V(payload_string, "payload") \
V(prototype_string, "prototype") \
V(string_string, "string") \
V(String_string, "String") \
V(unknown_field_string, "unknownField") \
V(symbol_string, "symbol") \
V(Symbol_string, "Symbol") \
V(Date_string, "Date") \
@ -601,6 +602,13 @@ class Heap {
return old_pointer_space_->allocation_limit_address();
}
Address* OldDataSpaceAllocationTopAddress() {
return old_data_space_->allocation_top_address();
}
Address* OldDataSpaceAllocationLimitAddress() {
return old_data_space_->allocation_limit_address();
}
// Uncommit unused semi space.
bool UncommitFromSpace() { return new_space_.UncommitFromSpace(); }
@ -617,6 +625,9 @@ class Heap {
JSFunction* constructor,
Handle<Object> allocation_site_info_payload);
MUST_USE_RESULT MaybeObject* AllocateJSGeneratorObject(
JSFunction* function);
MUST_USE_RESULT MaybeObject* AllocateJSModule(Context* context,
ScopeInfo* scope_info);
@ -1329,6 +1340,10 @@ class Heap {
inline bool InOldPointerSpace(Address address);
inline bool InOldPointerSpace(Object* object);
// Returns whether the object resides in old data space.
inline bool InOldDataSpace(Address address);
inline bool InOldDataSpace(Object* object);
// Checks whether an address/object in the heap (including auxiliary
// area and unused area).
bool Contains(Address addr);
@ -1497,6 +1512,12 @@ class Heap {
MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int length,
PretenureFlag pretenure);
// Predicate that governs global pre-tenuring decisions based on observed
// promotion rates of previous collections.
inline bool ShouldGloballyPretenure() {
return new_space_high_promotion_mode_active_;
}
inline intptr_t PromotedTotalSize() {
return PromotedSpaceSizeOfObjects() + PromotedExternalMemorySize();
}
@ -1825,38 +1846,6 @@ class Heap {
void CheckpointObjectStats();
// We don't use a ScopedLock here since we want to lock the heap
// only when FLAG_parallel_recompilation is true.
class RelocationLock {
public:
explicit RelocationLock(Heap* heap) : heap_(heap) {
if (FLAG_parallel_recompilation) {
heap_->relocation_mutex_->Lock();
#ifdef DEBUG
heap_->relocation_mutex_locked_ = true;
#endif // DEBUG
}
}
~RelocationLock() {
if (FLAG_parallel_recompilation) {
#ifdef DEBUG
heap_->relocation_mutex_locked_ = false;
#endif // DEBUG
heap_->relocation_mutex_->Unlock();
}
}
#ifdef DEBUG
static bool IsLocked(Heap* heap) {
return heap->relocation_mutex_locked_;
}
#endif // DEBUG
private:
Heap* heap_;
};
private:
Heap();
@ -2326,11 +2315,6 @@ class Heap {
MemoryChunk* chunks_queued_for_free_;
Mutex* relocation_mutex_;
#ifdef DEBUG
bool relocation_mutex_locked_;
#endif // DEBUG;
friend class Factory;
friend class GCTracer;
friend class DisallowAllocationFailure;

97
deps/v8/src/hydrogen-instructions.cc

@ -685,7 +685,7 @@ void HValue::Kill() {
HValue* operand = OperandAt(i);
if (operand == NULL) continue;
HUseListNode* first = operand->use_list_;
if (first != NULL && first->value() == this && first->index() == i) {
if (first != NULL && first->value()->CheckFlag(kIsDead)) {
operand->use_list_ = first->tail();
}
}
@ -806,6 +806,9 @@ void HInstruction::PrintTo(StringStream* stream) {
PrintRangeTo(stream);
PrintChangesTo(stream);
PrintTypeTo(stream);
if (CheckFlag(HValue::kHasNoObservableSideEffects)) {
stream->Add(" [noOSE]");
}
}
@ -1581,10 +1584,10 @@ void HCheckMaps::SetSideEffectDominator(GVNFlag side_effect,
// for which the map is known.
if (HasNoUses() && dominator->IsStoreNamedField()) {
HStoreNamedField* store = HStoreNamedField::cast(dominator);
Handle<Map> map = store->transition();
if (map.is_null() || store->object() != value()) return;
UniqueValueId map_unique_id = store->transition_unique_id();
if (!map_unique_id.IsInitialized() || store->object() != value()) return;
for (int i = 0; i < map_set()->length(); i++) {
if (map.is_identical_to(map_set()->at(i))) {
if (map_unique_id == map_unique_ids_.at(i)) {
DeleteAndReplaceWith(NULL);
return;
}
@ -1980,20 +1983,25 @@ void HPhi::AddIndirectUsesTo(int* dest) {
}
void HSimulate::MergeInto(HSimulate* other) {
for (int i = 0; i < values_.length(); ++i) {
HValue* value = values_[i];
if (HasAssignedIndexAt(i)) {
other->AddAssignedValue(GetAssignedIndexAt(i), value);
} else {
if (other->pop_count_ > 0) {
other->pop_count_--;
void HSimulate::MergeWith(ZoneList<HSimulate*>* list) {
while (!list->is_empty()) {
HSimulate* from = list->RemoveLast();
ZoneList<HValue*>* from_values = &from->values_;
for (int i = 0; i < from_values->length(); ++i) {
if (from->HasAssignedIndexAt(i)) {
AddAssignedValue(from->GetAssignedIndexAt(i),
from_values->at(i));
} else {
other->AddPushedValue(value);
if (pop_count_ > 0) {
pop_count_--;
} else {
AddPushedValue(from_values->at(i));
}
}
}
pop_count_ += from->pop_count_;
from->DeleteAndReplaceWith(NULL);
}
other->pop_count_ += pop_count();
}
@ -2039,6 +2047,7 @@ static bool IsInteger32(double value) {
HConstant::HConstant(Handle<Object> handle, Representation r)
: handle_(handle),
unique_id_(),
has_int32_value_(false),
has_double_value_(false),
is_internalized_string_(false),
@ -2067,11 +2076,13 @@ HConstant::HConstant(Handle<Object> handle, Representation r)
HConstant::HConstant(Handle<Object> handle,
UniqueValueId unique_id,
Representation r,
HType type,
bool is_internalize_string,
bool boolean_value)
: handle_(handle),
unique_id_(unique_id),
has_int32_value_(false),
has_double_value_(false),
is_internalized_string_(is_internalize_string),
@ -2087,7 +2098,9 @@ HConstant::HConstant(Handle<Object> handle,
HConstant::HConstant(int32_t integer_value,
Representation r,
Handle<Object> optional_handle)
: has_int32_value_(true),
: handle_(optional_handle),
unique_id_(),
has_int32_value_(true),
has_double_value_(true),
is_internalized_string_(false),
boolean_value_(integer_value != 0),
@ -2100,7 +2113,9 @@ HConstant::HConstant(int32_t integer_value,
HConstant::HConstant(double double_value,
Representation r,
Handle<Object> optional_handle)
: has_int32_value_(IsInteger32(double_value)),
: handle_(optional_handle),
unique_id_(),
has_int32_value_(IsInteger32(double_value)),
has_double_value_(true),
is_internalized_string_(false),
boolean_value_(double_value != 0 && !isnan(double_value)),
@ -2125,8 +2140,12 @@ HConstant* HConstant::CopyToRepresentation(Representation r, Zone* zone) const {
if (has_int32_value_) return new(zone) HConstant(int32_value_, r, handle_);
if (has_double_value_) return new(zone) HConstant(double_value_, r, handle_);
ASSERT(!handle_.is_null());
return new(zone) HConstant(
handle_, r, type_from_value_, is_internalized_string_, boolean_value_);
return new(zone) HConstant(handle_,
unique_id_,
r,
type_from_value_,
is_internalized_string_,
boolean_value_);
}
@ -2451,6 +2470,8 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
Zone* zone)
: types_(Min(types->length(), kMaxLoadPolymorphism), zone),
name_(name),
types_unique_ids_(0, zone),
name_unique_id_(),
need_generic_(false) {
SetOperandAt(0, context);
SetOperandAt(1, object);
@ -2517,15 +2538,39 @@ HLoadNamedFieldPolymorphic::HLoadNamedFieldPolymorphic(HValue* context,
}
void HCheckMaps::FinalizeUniqueValueId() {
if (!map_unique_ids_.is_empty()) return;
Zone* zone = block()->zone();
map_unique_ids_.Initialize(map_set_.length(), zone);
for (int i = 0; i < map_set_.length(); i++) {
map_unique_ids_.Add(UniqueValueId(map_set_.at(i)), zone);
}
}
void HLoadNamedFieldPolymorphic::FinalizeUniqueValueId() {
if (!types_unique_ids_.is_empty()) return;
Zone* zone = block()->zone();
types_unique_ids_.Initialize(types_.length(), zone);
for (int i = 0; i < types_.length(); i++) {
types_unique_ids_.Add(UniqueValueId(types_.at(i)), zone);
}
name_unique_id_ = UniqueValueId(name_);
}
bool HLoadNamedFieldPolymorphic::DataEquals(HValue* value) {
ASSERT_EQ(types_.length(), types_unique_ids_.length());
HLoadNamedFieldPolymorphic* other = HLoadNamedFieldPolymorphic::cast(value);
if (types_.length() != other->types()->length()) return false;
if (!name_.is_identical_to(other->name())) return false;
if (name_unique_id_ != other->name_unique_id_) return false;
if (types_unique_ids_.length() != other->types_unique_ids_.length()) {
return false;
}
if (need_generic_ != other->need_generic_) return false;
for (int i = 0; i < types_.length(); i++) {
for (int i = 0; i < types_unique_ids_.length(); i++) {
bool found = false;
for (int j = 0; j < types_.length(); j++) {
if (types_.at(j).is_identical_to(other->types()->at(i))) {
for (int j = 0; j < types_unique_ids_.length(); j++) {
if (types_unique_ids_.at(j) == other->types_unique_ids_.at(i)) {
found = true;
break;
}
@ -2916,12 +2961,6 @@ void HAllocate::PrintDataTo(StringStream* stream) {
}
HType HFastLiteral::CalculateInferredType() {
// TODO(mstarzinger): Be smarter, could also be JSArray here.
return HType::JSObject();
}
HType HArrayLiteral::CalculateInferredType() {
return HType::JSArray();
}

289
deps/v8/src/hydrogen-instructions.h

@ -111,7 +111,6 @@ class LChunkBuilder;
V(DummyUse) \
V(ElementsKind) \
V(EnterInlined) \
V(FastLiteral) \
V(FixedArrayBaseLength) \
V(ForceRepresentation) \
V(FunctionLiteral) \
@ -235,14 +234,6 @@ class LChunkBuilder;
virtual Opcode opcode() const { return HValue::k##type; }
#ifdef DEBUG
#define ASSERT_ALLOCATION_DISABLED \
ASSERT(isolate()->optimizing_compiler_thread()->IsOptimizerThread() || \
!isolate()->heap()->IsAllocationAllowed())
#else
#define ASSERT_ALLOCATION_DISABLED do {} while (0)
#endif
class Range: public ZoneObject {
public:
Range()
@ -365,6 +356,48 @@ class Representation {
};
class UniqueValueId {
public:
UniqueValueId() : raw_address_(NULL) { }
explicit UniqueValueId(Object* object) {
raw_address_ = reinterpret_cast<Address>(object);
ASSERT(IsInitialized());
}
explicit UniqueValueId(Handle<Object> handle) {
static const Address kEmptyHandleSentinel = reinterpret_cast<Address>(1);
if (handle.is_null()) {
raw_address_ = kEmptyHandleSentinel;
} else {
raw_address_ = reinterpret_cast<Address>(*handle);
ASSERT_NE(kEmptyHandleSentinel, raw_address_);
}
ASSERT(IsInitialized());
}
bool IsInitialized() const { return raw_address_ != NULL; }
bool operator==(const UniqueValueId& other) const {
ASSERT(IsInitialized() && other.IsInitialized());
return raw_address_ == other.raw_address_;
}
bool operator!=(const UniqueValueId& other) const {
ASSERT(IsInitialized() && other.IsInitialized());
return raw_address_ != other.raw_address_;
}
intptr_t Hashcode() const {
ASSERT(IsInitialized());
return reinterpret_cast<intptr_t>(raw_address_);
}
private:
Address raw_address_;
};
class HType {
public:
HType() : type_(kUninitialized) { }
@ -829,6 +862,7 @@ class HValue: public ZoneObject {
// This flag is set to true after the SetupInformativeDefinitions() pass
// has processed this instruction.
kIDefsProcessingDone,
kHasNoObservableSideEffects,
kLastFlag = kIDefsProcessingDone
};
@ -1006,7 +1040,8 @@ class HValue: public ZoneObject {
return gvn_flags_.ContainsAnyOf(AllSideEffectsFlagSet());
}
bool HasObservableSideEffects() const {
return gvn_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
return !CheckFlag(kHasNoObservableSideEffects) &&
gvn_flags_.ContainsAnyOf(AllObservableSideEffectsFlagSet());
}
GVNFlagSet DependsOnFlags() const {
@ -1056,6 +1091,9 @@ class HValue: public ZoneObject {
bool Equals(HValue* other);
virtual intptr_t Hashcode();
// Compute unique ids upfront that is safe wrt GC and parallel recompilation.
virtual void FinalizeUniqueValueId() { }
// Printing support.
virtual void PrintTo(StringStream* stream) = 0;
void PrintNameTo(StringStream* stream);
@ -1830,7 +1868,7 @@ class HSimulate: public HInstruction {
return Representation::None();
}
void MergeInto(HSimulate* other);
void MergeWith(ZoneList<HSimulate*>* list);
bool is_candidate_for_removal() { return removable_ == REMOVABLE_SIMULATE; }
DECLARE_CONCRETE_INSTRUCTION(Simulate)
@ -2642,7 +2680,8 @@ class HLoadExternalArrayPointer: public HUnaryOperation {
class HCheckMaps: public HTemplateInstruction<2> {
public:
HCheckMaps(HValue* value, Handle<Map> map, Zone* zone,
HValue* typecheck = NULL) {
HValue* typecheck = NULL)
: map_unique_ids_(0, zone) {
SetOperandAt(0, value);
// If callers don't depend on a typecheck, they can pass in NULL. In that
// case we use a copy of the |value| argument as a dummy value.
@ -2654,7 +2693,8 @@ class HCheckMaps: public HTemplateInstruction<2> {
SetGVNFlag(kDependsOnElementsKind);
map_set()->Add(map, zone);
}
HCheckMaps(HValue* value, SmallMapList* maps, Zone* zone) {
HCheckMaps(HValue* value, SmallMapList* maps, Zone* zone)
: map_unique_ids_(0, zone) {
SetOperandAt(0, value);
SetOperandAt(1, value);
set_representation(Representation::Tagged());
@ -2701,28 +2741,36 @@ class HCheckMaps: public HTemplateInstruction<2> {
HValue* value() { return OperandAt(0); }
SmallMapList* map_set() { return &map_set_; }
virtual void FinalizeUniqueValueId();
DECLARE_CONCRETE_INSTRUCTION(CheckMaps)
protected:
virtual bool DataEquals(HValue* other) {
ASSERT_EQ(map_set_.length(), map_unique_ids_.length());
HCheckMaps* b = HCheckMaps::cast(other);
// Relies on the fact that map_set has been sorted before.
if (map_set()->length() != b->map_set()->length()) return false;
for (int i = 0; i < map_set()->length(); i++) {
if (!map_set()->at(i).is_identical_to(b->map_set()->at(i))) return false;
if (map_unique_ids_.length() != b->map_unique_ids_.length()) {
return false;
}
for (int i = 0; i < map_unique_ids_.length(); i++) {
if (map_unique_ids_.at(i) != b->map_unique_ids_.at(i)) {
return false;
}
}
return true;
}
private:
SmallMapList map_set_;
ZoneList<UniqueValueId> map_unique_ids_;
};
class HCheckFunction: public HUnaryOperation {
public:
HCheckFunction(HValue* value, Handle<JSFunction> function)
: HUnaryOperation(value), target_(function) {
: HUnaryOperation(value), target_(function), target_unique_id_() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
target_in_new_space_ = Isolate::Current()->heap()->InNewSpace(*function);
@ -2738,6 +2786,10 @@ class HCheckFunction: public HUnaryOperation {
virtual void Verify();
#endif
virtual void FinalizeUniqueValueId() {
target_unique_id_ = UniqueValueId(target_);
}
Handle<JSFunction> target() const { return target_; }
bool target_in_new_space() const { return target_in_new_space_; }
@ -2746,11 +2798,12 @@ class HCheckFunction: public HUnaryOperation {
protected:
virtual bool DataEquals(HValue* other) {
HCheckFunction* b = HCheckFunction::cast(other);
return target_.is_identical_to(b->target());
return target_unique_id_ == b->target_unique_id_;
}
private:
Handle<JSFunction> target_;
UniqueValueId target_unique_id_;
bool target_in_new_space_;
};
@ -2855,7 +2908,11 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
public:
HCheckPrototypeMaps(Handle<JSObject> prototype,
Handle<JSObject> holder,
Zone* zone) : prototypes_(2, zone), maps_(2, zone) {
Zone* zone)
: prototypes_(2, zone),
maps_(2, zone),
first_prototype_unique_id_(),
last_prototype_unique_id_() {
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnMaps);
// Keep a list of all objects on the prototype chain up to the holder
@ -2881,18 +2938,13 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
virtual void PrintDataTo(StringStream* stream);
virtual intptr_t Hashcode() {
ASSERT_ALLOCATION_DISABLED;
// Dereferencing to use the object's raw address for hashing is safe.
HandleDereferenceGuard allow_handle_deref(isolate(),
HandleDereferenceGuard::ALLOW);
SLOW_ASSERT(Heap::RelocationLock::IsLocked(isolate()->heap()) ||
!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
intptr_t hash = 0;
for (int i = 0; i < prototypes_.length(); i++) {
hash = 17 * hash + reinterpret_cast<intptr_t>(*prototypes_[i]);
hash = 17 * hash + reinterpret_cast<intptr_t>(*maps_[i]);
}
return hash;
return first_prototype_unique_id_.Hashcode() * 17 +
last_prototype_unique_id_.Hashcode();
}
virtual void FinalizeUniqueValueId() {
first_prototype_unique_id_ = UniqueValueId(prototypes_.first());
last_prototype_unique_id_ = UniqueValueId(prototypes_.last());
}
bool CanOmitPrototypeChecks() {
@ -2905,22 +2957,15 @@ class HCheckPrototypeMaps: public HTemplateInstruction<0> {
protected:
virtual bool DataEquals(HValue* other) {
HCheckPrototypeMaps* b = HCheckPrototypeMaps::cast(other);
#ifdef DEBUG
if (prototypes_.length() != b->prototypes()->length()) return false;
for (int i = 0; i < prototypes_.length(); i++) {
if (!prototypes_[i].is_identical_to(b->prototypes()->at(i))) return false;
if (!maps_[i].is_identical_to(b->maps()->at(i))) return false;
}
return true;
#else
return prototypes_.first().is_identical_to(b->prototypes()->first()) &&
prototypes_.last().is_identical_to(b->prototypes()->last());
#endif // DEBUG
return first_prototype_unique_id_ == b->first_prototype_unique_id_ &&
last_prototype_unique_id_ == b->last_prototype_unique_id_;
}
private:
ZoneList<Handle<JSObject> > prototypes_;
ZoneList<Handle<Map> > maps_;
UniqueValueId first_prototype_unique_id_;
UniqueValueId last_prototype_unique_id_;
};
@ -3175,6 +3220,7 @@ class HConstant: public HTemplateInstruction<0> {
Representation r,
Handle<Object> optional_handle = Handle<Object>::null());
HConstant(Handle<Object> handle,
UniqueValueId unique_id,
Representation r,
HType type,
bool is_internalized_string,
@ -3188,35 +3234,36 @@ class HConstant: public HTemplateInstruction<0> {
return handle_;
}
bool InOldSpace() const { return !HEAP->InNewSpace(*handle_); }
bool IsSpecialDouble() const {
return has_double_value_ &&
(BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
FixedDoubleArray::is_the_hole_nan(double_value_) ||
isnan(double_value_));
}
bool ImmortalImmovable() const {
if (has_int32_value_) {
return false;
}
if (has_double_value_) {
if (BitCast<int64_t>(double_value_) == BitCast<int64_t>(-0.0) ||
isnan(double_value_)) {
if (IsSpecialDouble()) {
return true;
}
return false;
}
ASSERT(!handle_.is_null());
HandleDereferenceGuard allow_dereference_for_immovable_check(
isolate(), HandleDereferenceGuard::ALLOW);
Heap* heap = isolate()->heap();
// We should have handled minus_zero_value and nan_value in the
// has_double_value_ clause above.
// Dereferencing is safe to compare against immovable singletons.
HandleDereferenceGuard allow_handle_deref(isolate(),
HandleDereferenceGuard::ALLOW);
ASSERT(*handle_ != heap->minus_zero_value());
ASSERT(*handle_ != heap->nan_value());
return *handle_ == heap->undefined_value() ||
*handle_ == heap->null_value() ||
*handle_ == heap->true_value() ||
*handle_ == heap->false_value() ||
*handle_ == heap->the_hole_value() ||
*handle_ == heap->empty_string();
ASSERT(unique_id_ != UniqueValueId(heap->minus_zero_value()));
ASSERT(unique_id_ != UniqueValueId(heap->nan_value()));
return unique_id_ == UniqueValueId(heap->undefined_value()) ||
unique_id_ == UniqueValueId(heap->null_value()) ||
unique_id_ == UniqueValueId(heap->true_value()) ||
unique_id_ == UniqueValueId(heap->false_value()) ||
unique_id_ == UniqueValueId(heap->the_hole_value()) ||
unique_id_ == UniqueValueId(heap->empty_string());
}
virtual Representation RequiredInputRepresentation(int index) {
@ -3227,7 +3274,9 @@ class HConstant: public HTemplateInstruction<0> {
return has_int32_value_;
}
virtual bool EmitAtUses() { return !representation().IsDouble(); }
virtual bool EmitAtUses() {
return !representation().IsDouble() || IsSpecialDouble();
}
virtual void PrintDataTo(StringStream* stream);
virtual HType CalculateInferredType();
bool IsInteger() { return handle()->IsSmi(); }
@ -3246,6 +3295,16 @@ class HConstant: public HTemplateInstruction<0> {
ASSERT(HasDoubleValue());
return double_value_;
}
bool IsTheHole() const {
if (HasDoubleValue() && FixedDoubleArray::is_the_hole_nan(double_value_)) {
return true;
}
Heap* heap = isolate()->heap();
if (!handle_.is_null() && *handle_ == heap->the_hole_value()) {
return true;
}
return false;
}
bool HasNumberValue() const { return has_double_value_; }
int32_t NumberValueAsInteger32() const {
ASSERT(HasNumberValue());
@ -3274,24 +3333,21 @@ class HConstant: public HTemplateInstruction<0> {
}
virtual intptr_t Hashcode() {
ASSERT_ALLOCATION_DISABLED;
intptr_t hash;
if (has_int32_value_) {
hash = static_cast<intptr_t>(int32_value_);
return static_cast<intptr_t>(int32_value_);
} else if (has_double_value_) {
hash = static_cast<intptr_t>(BitCast<int64_t>(double_value_));
return static_cast<intptr_t>(BitCast<int64_t>(double_value_));
} else {
ASSERT(!handle_.is_null());
// Dereferencing to use the object's raw address for hashing is safe.
HandleDereferenceGuard allow_handle_deref(isolate(),
HandleDereferenceGuard::ALLOW);
SLOW_ASSERT(Heap::RelocationLock::IsLocked(isolate()->heap()) ||
!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
hash = reinterpret_cast<intptr_t>(*handle_);
return unique_id_.Hashcode();
}
}
return hash;
virtual void FinalizeUniqueValueId() {
if (!has_double_value_) {
ASSERT(!handle_.is_null());
unique_id_ = UniqueValueId(handle_);
}
}
#ifdef DEBUG
@ -3315,7 +3371,7 @@ class HConstant: public HTemplateInstruction<0> {
} else {
ASSERT(!handle_.is_null());
return !other_constant->handle_.is_null() &&
handle_.is_identical_to(other_constant->handle_);
unique_id_ == other_constant->unique_id_;
}
}
@ -3329,6 +3385,7 @@ class HConstant: public HTemplateInstruction<0> {
// constant is non-numeric, handle_ always points to a valid
// constant HeapObject.
Handle<Object> handle_;
UniqueValueId unique_id_;
// We store the HConstant in the most specific form safely possible.
// The two flags, has_int32_value_ and has_double_value_ tell us if
@ -4740,7 +4797,7 @@ class HUnknownOSRValue: public HTemplateInstruction<0> {
class HLoadGlobalCell: public HTemplateInstruction<0> {
public:
HLoadGlobalCell(Handle<JSGlobalPropertyCell> cell, PropertyDetails details)
: cell_(cell), details_(details) {
: cell_(cell), details_(details), unique_id_() {
set_representation(Representation::Tagged());
SetFlag(kUseGVN);
SetGVNFlag(kDependsOnGlobalVars);
@ -4752,13 +4809,11 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
virtual void PrintDataTo(StringStream* stream);
virtual intptr_t Hashcode() {
ASSERT_ALLOCATION_DISABLED;
// Dereferencing to use the object's raw address for hashing is safe.
HandleDereferenceGuard allow_handle_deref(isolate(),
HandleDereferenceGuard::ALLOW);
SLOW_ASSERT(Heap::RelocationLock::IsLocked(isolate()->heap()) ||
!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
return reinterpret_cast<intptr_t>(*cell_);
return unique_id_.Hashcode();
}
virtual void FinalizeUniqueValueId() {
unique_id_ = UniqueValueId(cell_);
}
virtual Representation RequiredInputRepresentation(int index) {
@ -4770,7 +4825,7 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
protected:
virtual bool DataEquals(HValue* other) {
HLoadGlobalCell* b = HLoadGlobalCell::cast(other);
return cell_.is_identical_to(b->cell());
return unique_id_ == b->unique_id_;
}
private:
@ -4778,6 +4833,7 @@ class HLoadGlobalCell: public HTemplateInstruction<0> {
Handle<JSGlobalPropertyCell> cell_;
PropertyDetails details_;
UniqueValueId unique_id_;
};
@ -4860,7 +4916,6 @@ class HAllocate: public HTemplateInstruction<2> {
HAllocate(HValue* context, HValue* size, HType type, Flags flags)
: type_(type),
flags_(flags) {
ASSERT((flags & CAN_ALLOCATE_IN_OLD_DATA_SPACE) == 0); // unimplemented
SetOperandAt(0, context);
SetOperandAt(1, size);
set_representation(Representation::Tagged());
@ -4955,7 +5010,6 @@ inline bool ReceiverObjectNeedsWriteBarrier(HValue* object,
new_space_dominator);
}
if (object != new_space_dominator) return true;
if (object->IsFastLiteral()) return false;
if (object->IsAllocateObject()) return false;
if (object->IsAllocate()) {
return !HAllocate::cast(object)->GuaranteedInNewSpace();
@ -5238,12 +5292,16 @@ class HLoadNamedFieldPolymorphic: public HTemplateInstruction<2> {
static const int kMaxLoadPolymorphism = 4;
virtual void FinalizeUniqueValueId();
protected:
virtual bool DataEquals(HValue* value);
private:
SmallMapList types_;
Handle<String> name_;
ZoneList<UniqueValueId> types_unique_ids_;
UniqueValueId name_unique_id_;
bool need_generic_;
};
@ -5507,6 +5565,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
: name_(name),
is_in_object_(in_object),
offset_(offset),
transition_unique_id_(),
new_space_dominator_(NULL) {
SetOperandAt(0, obj);
SetOperandAt(1, val);
@ -5537,6 +5596,7 @@ class HStoreNamedField: public HTemplateInstruction<2> {
bool is_in_object() const { return is_in_object_; }
int offset() const { return offset_; }
Handle<Map> transition() const { return transition_; }
UniqueValueId transition_unique_id() const { return transition_unique_id_; }
void set_transition(Handle<Map> map) { transition_ = map; }
HValue* new_space_dominator() const { return new_space_dominator_; }
@ -5549,11 +5609,16 @@ class HStoreNamedField: public HTemplateInstruction<2> {
return ReceiverObjectNeedsWriteBarrier(object(), new_space_dominator());
}
virtual void FinalizeUniqueValueId() {
transition_unique_id_ = UniqueValueId(transition_);
}
private:
Handle<String> name_;
bool is_in_object_;
int offset_;
Handle<Map> transition_;
UniqueValueId transition_unique_id_;
HValue* new_space_dominator_;
};
@ -5677,6 +5742,10 @@ class HStoreKeyed
bool IsDehoisted() { return is_dehoisted_; }
void SetDehoisted(bool is_dehoisted) { is_dehoisted_ = is_dehoisted; }
bool IsConstantHoleStore() {
return value()->IsConstant() && HConstant::cast(value())->IsTheHole();
}
virtual void SetSideEffectDominator(GVNFlag side_effect, HValue* dominator) {
ASSERT(side_effect == kChangesNewSpacePromotion);
new_space_dominator_ = dominator;
@ -5750,6 +5819,8 @@ class HTransitionElementsKind: public HTemplateInstruction<2> {
Handle<Map> transitioned_map)
: original_map_(original_map),
transitioned_map_(transitioned_map),
original_map_unique_id_(),
transitioned_map_unique_id_(),
from_kind_(original_map->elements_kind()),
to_kind_(transitioned_map->elements_kind()) {
SetOperandAt(0, object);
@ -5780,18 +5851,25 @@ class HTransitionElementsKind: public HTemplateInstruction<2> {
virtual void PrintDataTo(StringStream* stream);
virtual void FinalizeUniqueValueId() {
original_map_unique_id_ = UniqueValueId(original_map_);
transitioned_map_unique_id_ = UniqueValueId(transitioned_map_);
}
DECLARE_CONCRETE_INSTRUCTION(TransitionElementsKind)
protected:
virtual bool DataEquals(HValue* other) {
HTransitionElementsKind* instr = HTransitionElementsKind::cast(other);
return original_map_.is_identical_to(instr->original_map()) &&
transitioned_map_.is_identical_to(instr->transitioned_map());
return original_map_unique_id_ == instr->original_map_unique_id_ &&
transitioned_map_unique_id_ == instr->transitioned_map_unique_id_;
}
private:
Handle<Map> original_map_;
Handle<Map> transitioned_map_;
UniqueValueId original_map_unique_id_;
UniqueValueId transitioned_map_unique_id_;
ElementsKind from_kind_;
ElementsKind to_kind_;
};
@ -5966,45 +6044,6 @@ class HMaterializedLiteral: public HTemplateInstruction<V> {
};
class HFastLiteral: public HMaterializedLiteral<1> {
public:
HFastLiteral(HValue* context,
Handle<JSObject> boilerplate,
int total_size,
int literal_index,
int depth,
AllocationSiteMode mode)
: HMaterializedLiteral<1>(literal_index, depth, mode),
boilerplate_(boilerplate),
total_size_(total_size) {
SetOperandAt(0, context);
SetGVNFlag(kChangesNewSpacePromotion);
}
// Maximum depth and total number of elements and properties for literal
// graphs to be considered for fast deep-copying.
static const int kMaxLiteralDepth = 3;
static const int kMaxLiteralProperties = 8;
HValue* context() { return OperandAt(0); }
Handle<JSObject> boilerplate() const { return boilerplate_; }
int total_size() const { return total_size_; }
virtual Representation RequiredInputRepresentation(int index) {
return Representation::Tagged();
}
virtual Handle<Map> GetMonomorphicJSObjectMap() {
return Handle<Map>(boilerplate()->map());
}
virtual HType CalculateInferredType();
DECLARE_CONCRETE_INSTRUCTION(FastLiteral)
private:
Handle<JSObject> boilerplate_;
int total_size_;
};
class HArrayLiteral: public HMaterializedLiteral<1> {
public:
HArrayLiteral(HValue* context,
@ -6192,7 +6231,7 @@ class HToFastProperties: public HUnaryOperation {
// This instruction is not marked as having side effects, but
// changes the map of the input operand. Use it only when creating
// object literals.
ASSERT(value->IsObjectLiteral() || value->IsFastLiteral());
ASSERT(value->IsObjectLiteral());
set_representation(Representation::Tagged());
}

613
deps/v8/src/hydrogen.cc

@ -517,7 +517,6 @@ class ReachabilityAnalyzer BASE_EMBEDDED {
void HGraph::Verify(bool do_full_verify) const {
// Allow dereferencing for debug mode verification.
Heap::RelocationLock(isolate()->heap());
HandleDereferenceGuard allow_handle_deref(isolate(),
HandleDereferenceGuard::ALLOW);
for (int i = 0; i < blocks_.length(); i++) {
@ -619,6 +618,7 @@ HConstant* HGraph::GetConstant##Name() { \
if (!constant_##name##_.is_set()) { \
HConstant* constant = new(zone()) HConstant( \
isolate()->factory()->name##_value(), \
UniqueValueId(isolate()->heap()->name##_value()), \
Representation::Tagged(), \
htype, \
false, \
@ -880,6 +880,7 @@ HGraph* HGraphBuilder::CreateGraph() {
HPhase phase("H_Block building", isolate());
set_current_block(graph()->entry_block());
if (!BuildGraph()) return NULL;
graph()->FinalizeUniqueValueIds();
return graph_;
}
@ -887,6 +888,9 @@ HGraph* HGraphBuilder::CreateGraph() {
HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
ASSERT(current_block() != NULL);
current_block()->AddInstruction(instr);
if (no_side_effects_scope_count_ > 0) {
instr->SetFlag(HValue::kHasNoObservableSideEffects);
}
return instr;
}
@ -894,6 +898,7 @@ HInstruction* HGraphBuilder::AddInstruction(HInstruction* instr) {
void HGraphBuilder::AddSimulate(BailoutId id,
RemovableSimulate removable) {
ASSERT(current_block() != NULL);
ASSERT(no_side_effects_scope_count_ == 0);
current_block()->AddSimulate(id, removable);
environment()->set_previous_ast_id(id);
}
@ -1041,7 +1046,6 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
HValue* length,
HValue* key,
bool is_js_array) {
BailoutId ast_id = environment()->previous_ast_id();
Zone* zone = this->zone();
IfBuilder length_checker(this);
@ -1074,7 +1078,6 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
HAdd::New(zone, context, length, graph_->GetConstant1()));
new_length->ChangeRepresentation(Representation::Integer32());
new_length->ClearFlag(HValue::kCanOverflow);
AddSimulate(ast_id, REMOVABLE_SIMULATE);
Factory* factory = isolate()->factory();
HInstruction* length_store = AddInstruction(new(zone) HStoreNamedField(
@ -1083,7 +1086,6 @@ HValue* HGraphBuilder::BuildCheckForCapacityGrow(HValue* object,
new_length, true,
JSArray::kLengthOffset));
length_store->SetGVNFlag(kChangesArrayLengths);
AddSimulate(ast_id, REMOVABLE_SIMULATE);
}
length_checker.BeginElse();
@ -1210,6 +1212,8 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
}
if (IsGrowStoreMode(store_mode)) {
NoObservableSideEffectsScope no_effects(this);
elements = BuildCheckForCapacityGrow(object, elements, elements_kind,
length, key, is_js_array);
checked_key = key;
@ -1219,6 +1223,8 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
if (is_store && (fast_elements || fast_smi_only_elements)) {
if (store_mode == STORE_NO_TRANSITION_HANDLE_COW) {
NoObservableSideEffectsScope no_effects(this);
elements = BuildCopyElementsOnWrite(object, elements, elements_kind,
length);
} else {
@ -1238,7 +1244,6 @@ HInstruction* HGraphBuilder::BuildUncheckedMonomorphicElementAccess(
HValue* HGraphBuilder::BuildAllocateElements(HValue* context,
ElementsKind kind,
HValue* capacity) {
BailoutId ast_id = current_block()->last_environment()->previous_ast_id();
Zone* zone = this->zone();
int elements_size = IsFastDoubleElementsKind(kind)
@ -1260,10 +1265,14 @@ HValue* HGraphBuilder::BuildAllocateElements(HValue* context,
total_size->ClearFlag(HValue::kCanOverflow);
HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
// TODO(hpayer): add support for old data space
if (FLAG_pretenure_literals && !IsFastDoubleElementsKind(kind)) {
flags = static_cast<HAllocate::Flags>(
flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
if (FLAG_pretenure_literals) {
if (IsFastDoubleElementsKind(kind)) {
flags = static_cast<HAllocate::Flags>(
flags | HAllocate::CAN_ALLOCATE_IN_OLD_DATA_SPACE);
} else {
flags = static_cast<HAllocate::Flags>(
flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
}
}
if (IsFastDoubleElementsKind(kind)) {
flags = static_cast<HAllocate::Flags>(
@ -1273,27 +1282,40 @@ HValue* HGraphBuilder::BuildAllocateElements(HValue* context,
HValue* elements =
AddInstruction(new(zone) HAllocate(context, total_size,
HType::JSArray(), flags));
return elements;
}
void HGraphBuilder::BuildInitializeElements(HValue* elements,
ElementsKind kind,
HValue* capacity) {
Zone* zone = this->zone();
Factory* factory = isolate()->factory();
Handle<Map> map = IsFastDoubleElementsKind(kind)
? factory->fixed_double_array_map()
: factory->fixed_array_map();
BuildStoreMap(elements, map, ast_id);
BuildStoreMap(elements, map);
Handle<String> fixed_array_length_field_name = factory->length_field_string();
HInstruction* store_length =
new(zone) HStoreNamedField(elements, fixed_array_length_field_name,
capacity, true, FixedArray::kLengthOffset);
AddInstruction(store_length);
AddSimulate(ast_id, REMOVABLE_SIMULATE);
}
return elements;
HValue* HGraphBuilder::BuildAllocateAndInitializeElements(HValue* context,
ElementsKind kind,
HValue* capacity) {
HValue* new_elements =
BuildAllocateElements(context, kind, capacity);
BuildInitializeElements(new_elements, kind, capacity);
return new_elements;
}
HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
HValue* map,
BailoutId id) {
HValue* map) {
Zone* zone = this->zone();
Factory* factory = isolate()->factory();
Handle<String> map_field_name = factory->map_field_string();
@ -1302,18 +1324,16 @@ HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
true, JSObject::kMapOffset);
store_map->SetGVNFlag(kChangesMaps);
AddInstruction(store_map);
AddSimulate(id, REMOVABLE_SIMULATE);
return store_map;
}
HInstruction* HGraphBuilder::BuildStoreMap(HValue* object,
Handle<Map> map,
BailoutId id) {
Handle<Map> map) {
Zone* zone = this->zone();
HValue* map_constant =
AddInstruction(new(zone) HConstant(map, Representation::Tagged()));
return BuildStoreMap(object, map_constant, id);
return BuildStoreMap(object, map_constant);
}
@ -1372,7 +1392,7 @@ HValue* HGraphBuilder::BuildGrowElementsCapacity(HValue* object,
BuildNewSpaceArrayCheck(new_capacity, kind);
HValue* new_elements =
BuildAllocateElements(context, kind, new_capacity);
BuildAllocateAndInitializeElements(context, kind, new_capacity);
BuildCopyElements(context, elements, kind,
new_elements, kind,
@ -1395,7 +1415,6 @@ void HGraphBuilder::BuildFillElementsWithHole(HValue* context,
ElementsKind elements_kind,
HValue* from,
HValue* to) {
BailoutId ast_id = current_block()->last_environment()->previous_ast_id();
// Fast elements kinds need to be initialized in case statements below cause
// a garbage collection.
Factory* factory = isolate()->factory();
@ -1413,7 +1432,6 @@ void HGraphBuilder::BuildFillElementsWithHole(HValue* context,
HValue* key = builder.BeginBody(from, to, Token::LT);
AddInstruction(new(zone) HStoreKeyed(elements, key, hole, elements_kind));
AddSimulate(ast_id, REMOVABLE_SIMULATE);
builder.EndBody();
}
@ -1426,7 +1444,6 @@ void HGraphBuilder::BuildCopyElements(HValue* context,
ElementsKind to_elements_kind,
HValue* length,
HValue* capacity) {
BailoutId ast_id = environment()->previous_ast_id();
bool pre_fill_with_holes =
IsFastDoubleElementsKind(from_elements_kind) &&
IsFastObjectElementsKind(to_elements_kind);
@ -1450,7 +1467,6 @@ void HGraphBuilder::BuildCopyElements(HValue* context,
AddInstruction(new(zone()) HStoreKeyed(to_elements, key, element,
to_elements_kind));
AddSimulate(ast_id, REMOVABLE_SIMULATE);
builder.EndBody();
@ -1462,6 +1478,119 @@ void HGraphBuilder::BuildCopyElements(HValue* context,
}
HValue* HGraphBuilder::BuildCloneShallowArray(HContext* context,
HValue* boilerplate,
AllocationSiteMode mode,
ElementsKind kind,
int length) {
Zone* zone = this->zone();
Factory* factory = isolate()->factory();
NoObservableSideEffectsScope no_effects(this);
// All sizes here are multiples of kPointerSize.
int size = JSArray::kSize;
if (mode == TRACK_ALLOCATION_SITE) {
size += AllocationSiteInfo::kSize;
}
int elems_offset = size;
if (length > 0) {
size += IsFastDoubleElementsKind(kind)
? FixedDoubleArray::SizeFor(length)
: FixedArray::SizeFor(length);
}
HAllocate::Flags allocate_flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
if (IsFastDoubleElementsKind(kind)) {
allocate_flags = static_cast<HAllocate::Flags>(
allocate_flags | HAllocate::ALLOCATE_DOUBLE_ALIGNED);
}
// Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks.
HValue* size_in_bytes =
AddInstruction(new(zone) HConstant(size, Representation::Integer32()));
HInstruction* object =
AddInstruction(new(zone) HAllocate(context,
size_in_bytes,
HType::JSObject(),
allocate_flags));
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
if ((i != JSArray::kElementsOffset) || (length == 0)) {
HInstruction* value =
AddInstruction(new(zone) HLoadNamedField(boilerplate, true, i));
if (i != JSArray::kMapOffset) {
AddInstruction(new(zone) HStoreNamedField(object,
factory->empty_string(),
value,
true, i));
} else {
BuildStoreMap(object, value);
}
}
}
// Create an allocation site info if requested.
if (mode == TRACK_ALLOCATION_SITE) {
HValue* alloc_site =
AddInstruction(new(zone) HInnerAllocatedObject(object, JSArray::kSize));
Handle<Map> alloc_site_map(isolate()->heap()->allocation_site_info_map());
BuildStoreMap(alloc_site, alloc_site_map);
int alloc_payload_offset = AllocationSiteInfo::kPayloadOffset;
AddInstruction(new(zone) HStoreNamedField(alloc_site,
factory->empty_string(),
boilerplate,
true, alloc_payload_offset));
}
if (length > 0) {
// Get hold of the elements array of the boilerplate and setup the
// elements pointer in the resulting object.
HValue* boilerplate_elements =
AddInstruction(new(zone) HLoadElements(boilerplate, NULL));
HValue* object_elements =
AddInstruction(new(zone) HInnerAllocatedObject(object, elems_offset));
AddInstruction(new(zone) HStoreNamedField(object,
factory->elements_field_string(),
object_elements,
true, JSObject::kElementsOffset));
// Copy the elements array header.
for (int i = 0; i < FixedArrayBase::kHeaderSize; i += kPointerSize) {
HInstruction* value =
AddInstruction(new(zone) HLoadNamedField(boilerplate_elements,
true, i));
AddInstruction(new(zone) HStoreNamedField(object_elements,
factory->empty_string(),
value,
true, i));
}
// Copy the elements array contents.
// TODO(mstarzinger): Teach HGraphBuilder::BuildCopyElements to unfold
// copying loops with constant length up to a given boundary and use this
// helper here instead.
for (int i = 0; i < length; i++) {
HValue* key_constant =
AddInstruction(new(zone) HConstant(i, Representation::Integer32()));
HInstruction* value =
AddInstruction(new(zone) HLoadKeyed(boilerplate_elements,
key_constant,
NULL,
kind));
AddInstruction(new(zone) HStoreKeyed(object_elements,
key_constant,
value,
kind));
}
}
return object;
}
HOptimizedGraphBuilder::HOptimizedGraphBuilder(CompilationInfo* info,
TypeFeedbackOracle* oracle)
: HGraphBuilder(info),
@ -1568,6 +1697,19 @@ HBasicBlock* HGraph::CreateBasicBlock() {
}
void HGraph::FinalizeUniqueValueIds() {
AssertNoAllocation no_gc;
ASSERT(!isolate()->optimizing_compiler_thread()->IsOptimizerThread());
for (int i = 0; i < blocks()->length(); ++i) {
for (HInstruction* instr = blocks()->at(i)->first();
instr != NULL;
instr = instr->next()) {
instr->FinalizeUniqueValueId();
}
}
}
void HGraph::Canonicalize() {
if (!FLAG_use_canonicalizing) return;
HPhase phase("H_Canonicalize", this);
@ -2358,8 +2500,10 @@ HValueMap::HValueMap(Zone* zone, const HValueMap* other)
array_(zone->NewArray<HValueMapListElement>(other->array_size_)),
lists_(zone->NewArray<HValueMapListElement>(other->lists_size_)),
free_list_head_(other->free_list_head_) {
memcpy(array_, other->array_, array_size_ * sizeof(HValueMapListElement));
memcpy(lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
OS::MemCopy(
array_, other->array_, array_size_ * sizeof(HValueMapListElement));
OS::MemCopy(
lists_, other->lists_, lists_size_ * sizeof(HValueMapListElement));
}
@ -2485,7 +2629,7 @@ void HValueMap::ResizeLists(int new_size, Zone* zone) {
lists_ = new_lists;
if (old_lists != NULL) {
memcpy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
OS::MemCopy(lists_, old_lists, old_size * sizeof(HValueMapListElement));
}
for (int i = old_size; i < lists_size_; ++i) {
lists_[i].next = free_list_head_;
@ -2531,7 +2675,7 @@ HSideEffectMap::HSideEffectMap(HSideEffectMap* other) : count_(other->count_) {
HSideEffectMap& HSideEffectMap::operator= (const HSideEffectMap& other) {
if (this != &other) {
memcpy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
OS::MemCopy(data_, other.data_, kNumberOfTrackedSideEffects * kPointerSize);
}
return *this;
}
@ -2829,7 +2973,7 @@ GVN_UNTRACKED_FLAG_LIST(DECLARE_FLAG)
size_t string_len = strlen(underlying_buffer) + 1;
ASSERT(string_len <= sizeof(underlying_buffer));
char* result = new char[strlen(underlying_buffer) + 1];
memcpy(result, underlying_buffer, string_len);
OS::MemCopy(result, underlying_buffer, string_len);
return SmartArrayPointer<char>(result);
}
@ -3295,10 +3439,11 @@ void HInferRepresentation::Analyze() {
void HGraph::MergeRemovableSimulates() {
ZoneList<HSimulate*> mergelist(2, zone());
for (int i = 0; i < blocks()->length(); ++i) {
HBasicBlock* block = blocks()->at(i);
// Always reset the folding candidate at the start of a block.
HSimulate* folding_candidate = NULL;
// Make sure the merge list is empty at the start of a block.
ASSERT(mergelist.is_empty());
// Nasty heuristic: Never remove the first simulate in a block. This
// just so happens to have a beneficial effect on register allocation.
bool first = true;
@ -3309,33 +3454,38 @@ void HGraph::MergeRemovableSimulates() {
// in the outer environment.
// (Before each HEnterInlined, there is a non-foldable HSimulate
// anyway, so we get the barrier in the other direction for free.)
if (folding_candidate != NULL) {
folding_candidate->DeleteAndReplaceWith(NULL);
// Simply remove all accumulated simulates without merging. This
// is safe because simulates after instructions with side effects
// are never added to the merge list.
while (!mergelist.is_empty()) {
mergelist.RemoveLast()->DeleteAndReplaceWith(NULL);
}
folding_candidate = NULL;
continue;
}
// If we have an HSimulate and a candidate, perform the folding.
// Skip the non-simulates and the first simulate.
if (!current->IsSimulate()) continue;
if (first) {
first = false;
continue;
}
HSimulate* current_simulate = HSimulate::cast(current);
if (folding_candidate != NULL) {
folding_candidate->MergeInto(current_simulate);
folding_candidate->DeleteAndReplaceWith(NULL);
folding_candidate = NULL;
}
// Check if the current simulate is a candidate for folding.
if (current_simulate->previous()->HasObservableSideEffects() &&
!current_simulate->next()->IsSimulate()) {
continue;
}
if (!current_simulate->is_candidate_for_removal()) {
if ((current_simulate->previous()->HasObservableSideEffects() &&
!current_simulate->next()->IsSimulate()) ||
!current_simulate->is_candidate_for_removal()) {
// This simulate is not suitable for folding.
// Fold the ones accumulated so far.
current_simulate->MergeWith(&mergelist);
continue;
} else {
// Accumulate this simulate for folding later on.
mergelist.Add(current_simulate, zone());
}
folding_candidate = current_simulate;
}
if (!mergelist.is_empty()) {
// Merge the accumulated simulates at the end of the block.
HSimulate* last = mergelist.RemoveLast();
last->MergeWith(&mergelist);
}
}
}
@ -4193,8 +4343,6 @@ bool HOptimizedGraphBuilder::BuildGraph() {
void HGraph::GlobalValueNumbering() {
// Perform common subexpression elimination and loop-invariant code motion.
if (FLAG_use_gvn) {
// We use objects' raw addresses for identification, so they must not move.
Heap::RelocationLock relocation_lock(isolate()->heap());
HPhase phase("H_Global value numbering", this);
HGlobalValueNumberer gvn(this, info());
bool removed_side_effects = gvn.Analyze();
@ -5977,7 +6125,8 @@ static bool LookupSetter(Handle<Map> map,
static bool IsFastLiteral(Handle<JSObject> boilerplate,
int max_depth,
int* max_properties,
int* total_size) {
int* data_size,
int* pointer_size) {
ASSERT(max_depth >= 0 && *max_properties >= 0);
if (max_depth == 0) return false;
@ -5986,7 +6135,7 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
if (elements->length() > 0 &&
elements->map() != isolate->heap()->fixed_cow_array_map()) {
if (boilerplate->HasFastDoubleElements()) {
*total_size += FixedDoubleArray::SizeFor(elements->length());
*data_size += FixedDoubleArray::SizeFor(elements->length());
} else if (boilerplate->HasFastObjectElements()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
int length = elements->length();
@ -5998,12 +6147,13 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
if (!IsFastLiteral(value_object,
max_depth - 1,
max_properties,
total_size)) {
data_size,
pointer_size)) {
return false;
}
}
}
*total_size += FixedArray::SizeFor(length);
*pointer_size += FixedArray::SizeFor(length);
} else {
return false;
}
@ -6022,14 +6172,15 @@ static bool IsFastLiteral(Handle<JSObject> boilerplate,
if (!IsFastLiteral(value_object,
max_depth - 1,
max_properties,
total_size)) {
data_size,
pointer_size)) {
return false;
}
}
}
}
*total_size += boilerplate->map()->instance_size();
*pointer_size += boilerplate->map()->instance_size();
return true;
}
@ -6043,34 +6194,41 @@ void HOptimizedGraphBuilder::VisitObjectLiteral(ObjectLiteral* expr) {
HInstruction* literal;
// Check whether to use fast or slow deep-copying for boilerplate.
int total_size = 0;
int max_properties = HFastLiteral::kMaxLiteralProperties;
Handle<Object> boilerplate(closure->literals()->get(expr->literal_index()),
isolate());
if (boilerplate->IsJSObject() &&
IsFastLiteral(Handle<JSObject>::cast(boilerplate),
HFastLiteral::kMaxLiteralDepth,
int data_size = 0;
int pointer_size = 0;
int max_properties = kMaxFastLiteralProperties;
Handle<Object> original_boilerplate(closure->literals()->get(
expr->literal_index()), isolate());
if (original_boilerplate->IsJSObject() &&
IsFastLiteral(Handle<JSObject>::cast(original_boilerplate),
kMaxFastLiteralDepth,
&max_properties,
&total_size)) {
Handle<JSObject> boilerplate_object = Handle<JSObject>::cast(boilerplate);
literal = new(zone()) HFastLiteral(context,
boilerplate_object,
total_size,
expr->literal_index(),
expr->depth(),
DONT_TRACK_ALLOCATION_SITE);
&data_size,
&pointer_size)) {
Handle<JSObject> original_boilerplate_object =
Handle<JSObject>::cast(original_boilerplate);
Handle<JSObject> boilerplate_object =
DeepCopy(original_boilerplate_object);
literal = BuildFastLiteral(context,
boilerplate_object,
original_boilerplate_object,
data_size,
pointer_size,
DONT_TRACK_ALLOCATION_SITE);
} else {
literal = new(zone()) HObjectLiteral(context,
expr->constant_properties(),
expr->fast_elements(),
expr->literal_index(),
expr->depth(),
expr->has_function());
literal = AddInstruction(
new(zone()) HObjectLiteral(context,
expr->constant_properties(),
expr->fast_elements(),
expr->literal_index(),
expr->depth(),
expr->has_function()));
}
// The object is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
PushAndAdd(literal);
Push(literal);
expr->CalculateEmitStore(zone());
@ -6167,9 +6325,10 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
}
}
Handle<JSObject> boilerplate = Handle<JSObject>::cast(raw_boilerplate);
Handle<JSObject> original_boilerplate_object =
Handle<JSObject>::cast(raw_boilerplate);
ElementsKind boilerplate_elements_kind =
Handle<JSObject>::cast(boilerplate)->GetElementsKind();
Handle<JSObject>::cast(original_boilerplate_object)->GetElementsKind();
// TODO(mvstanton): This heuristic is only a temporary solution. In the
// end, we want to quit creating allocation site info after a certain number
@ -6178,33 +6337,38 @@ void HOptimizedGraphBuilder::VisitArrayLiteral(ArrayLiteral* expr) {
boilerplate_elements_kind);
// Check whether to use fast or slow deep-copying for boilerplate.
int total_size = 0;
int max_properties = HFastLiteral::kMaxLiteralProperties;
if (IsFastLiteral(boilerplate,
HFastLiteral::kMaxLiteralDepth,
int data_size = 0;
int pointer_size = 0;
int max_properties = kMaxFastLiteralProperties;
if (IsFastLiteral(original_boilerplate_object,
kMaxFastLiteralDepth,
&max_properties,
&total_size)) {
&data_size,
&pointer_size)) {
if (mode == TRACK_ALLOCATION_SITE) {
total_size += AllocationSiteInfo::kSize;
}
literal = new(zone()) HFastLiteral(context,
boilerplate,
total_size,
expr->literal_index(),
expr->depth(),
mode);
pointer_size += AllocationSiteInfo::kSize;
}
Handle<JSObject> boilerplate_object = DeepCopy(original_boilerplate_object);
literal = BuildFastLiteral(context,
boilerplate_object,
original_boilerplate_object,
data_size,
pointer_size,
mode);
} else {
literal = new(zone()) HArrayLiteral(context,
boilerplate,
length,
expr->literal_index(),
expr->depth(),
mode);
literal = AddInstruction(
new(zone()) HArrayLiteral(context,
original_boilerplate_object,
length,
expr->literal_index(),
expr->depth(),
mode));
}
// The array is expected in the bailout environment during computation
// of the property values and is the value of the entire expression.
PushAndAdd(literal);
Push(literal);
HLoadElements* elements = NULL;
@ -6420,8 +6584,9 @@ bool HOptimizedGraphBuilder::HandlePolymorphicArrayLengthLoad(
}
AddInstruction(new(zone()) HCheckNonSmi(object));
HInstruction* typecheck =
AddInstruction(HCheckInstanceType::NewIsJSArray(object, zone()));
AddInstruction(new(zone()) HCheckMaps(object, types, zone()));
HInstruction* instr =
HLoadNamedField::NewArrayLength(zone(), object, typecheck);
instr->set_position(expr->position());
@ -7113,8 +7278,10 @@ HInstruction* HOptimizedGraphBuilder::BuildLoadNamedMonomorphic(
Handle<JSObject> holder(lookup.holder());
Handle<Map> holder_map(holder->map());
AddCheckMap(object, map);
HInstruction* holder_value = AddInstruction(
AddInstruction(
new(zone()) HCheckPrototypeMaps(prototype, holder, zone()));
HValue* holder_value = AddInstruction(
new(zone()) HConstant(holder, Representation::Tagged()));
return BuildLoadNamedField(holder_value, holder_map, &lookup);
}
@ -7990,8 +8157,7 @@ bool HOptimizedGraphBuilder::TryInline(CallKind call_kind,
// Parse and allocate variables.
CompilationInfo target_info(target, zone());
Handle<SharedFunctionInfo> target_shared(target->shared());
if (!ParserApi::Parse(&target_info, kNoParsingFlags) ||
!Scope::Analyze(&target_info)) {
if (!Parser::Parse(&target_info) || !Scope::Analyze(&target_info)) {
if (target_info.isolate()->has_pending_exception()) {
// Parse or scope error, never optimize this function.
SetStackOverflow();
@ -9942,6 +10108,241 @@ HInstruction* HOptimizedGraphBuilder::BuildThisFunction() {
}
HInstruction* HOptimizedGraphBuilder::BuildFastLiteral(
HValue* context,
Handle<JSObject> boilerplate_object,
Handle<JSObject> original_boilerplate_object,
int data_size,
int pointer_size,
AllocationSiteMode mode) {
Zone* zone = this->zone();
int total_size = data_size + pointer_size;
NoObservableSideEffectsScope no_effects(this);
HAllocate::Flags flags = HAllocate::CAN_ALLOCATE_IN_NEW_SPACE;
// TODO(hpayer): add support for old data space
if (FLAG_pretenure_literals &&
isolate()->heap()->ShouldGloballyPretenure() &&
data_size == 0) {
flags = static_cast<HAllocate::Flags>(
flags | HAllocate::CAN_ALLOCATE_IN_OLD_POINTER_SPACE);
}
HValue* size_in_bytes =
AddInstruction(new(zone) HConstant(total_size,
Representation::Integer32()));
HInstruction* result =
AddInstruction(new(zone) HAllocate(context,
size_in_bytes,
HType::JSObject(),
flags));
int offset = 0;
BuildEmitDeepCopy(boilerplate_object, original_boilerplate_object, result,
&offset, mode);
return result;
}
void HOptimizedGraphBuilder::BuildEmitDeepCopy(
Handle<JSObject> boilerplate_object,
Handle<JSObject> original_boilerplate_object,
HInstruction* target,
int* offset,
AllocationSiteMode mode) {
Zone* zone = this->zone();
Factory* factory = isolate()->factory();
HInstruction* original_boilerplate = AddInstruction(new(zone) HConstant(
original_boilerplate_object, Representation::Tagged()));
bool create_allocation_site_info = mode == TRACK_ALLOCATION_SITE &&
boilerplate_object->map()->CanTrackAllocationSite();
// Only elements backing stores for non-COW arrays need to be copied.
Handle<FixedArrayBase> elements(boilerplate_object->elements());
Handle<FixedArrayBase> original_elements(
original_boilerplate_object->elements());
ElementsKind kind = boilerplate_object->map()->elements_kind();
// Increase the offset so that subsequent objects end up right after
// this object and its backing store.
int object_offset = *offset;
int object_size = boilerplate_object->map()->instance_size();
int elements_size = (elements->length() > 0 &&
elements->map() != isolate()->heap()->fixed_cow_array_map()) ?
elements->Size() : 0;
int elements_offset = *offset + object_size;
int inobject_properties = boilerplate_object->map()->inobject_properties();
if (create_allocation_site_info) {
elements_offset += AllocationSiteInfo::kSize;
*offset += AllocationSiteInfo::kSize;
}
*offset += object_size + elements_size;
HValue* object_elements = BuildCopyObjectHeader(boilerplate_object, target,
object_offset, elements_offset, elements_size);
// Copy in-object properties.
HValue* object_properties =
AddInstruction(new(zone) HInnerAllocatedObject(target, object_offset));
for (int i = 0; i < inobject_properties; i++) {
Handle<Object> value =
Handle<Object>(boilerplate_object->InObjectPropertyAt(i),
isolate());
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
Handle<JSObject> original_value_object = Handle<JSObject>::cast(
Handle<Object>(original_boilerplate_object->InObjectPropertyAt(i),
isolate()));
HInstruction* value_instruction =
AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
AddInstruction(new(zone) HStoreNamedField(
object_properties, factory->unknown_field_string(), value_instruction,
true, boilerplate_object->GetInObjectPropertyOffset(i)));
BuildEmitDeepCopy(value_object, original_value_object, target,
offset, DONT_TRACK_ALLOCATION_SITE);
} else {
HInstruction* value_instruction = AddInstruction(new(zone) HConstant(
value, Representation::Tagged()));
AddInstruction(new(zone) HStoreNamedField(
object_properties, factory->unknown_field_string(), value_instruction,
true, boilerplate_object->GetInObjectPropertyOffset(i)));
}
}
// Build Allocation Site Info if desired
if (create_allocation_site_info) {
HValue* alloc_site =
AddInstruction(new(zone) HInnerAllocatedObject(target, JSArray::kSize));
Handle<Map> alloc_site_map(isolate()->heap()->allocation_site_info_map());
BuildStoreMap(alloc_site, alloc_site_map);
int alloc_payload_offset = AllocationSiteInfo::kPayloadOffset;
AddInstruction(new(zone) HStoreNamedField(alloc_site,
factory->payload_string(),
original_boilerplate,
true, alloc_payload_offset));
}
if (object_elements != NULL) {
HInstruction* boilerplate_elements = AddInstruction(new(zone) HConstant(
elements, Representation::Tagged()));
int elements_length = elements->length();
HValue* object_elements_length =
AddInstruction(new(zone) HConstant(
elements_length, Representation::Integer32()));
BuildInitializeElements(object_elements, kind, object_elements_length);
// Copy elements backing store content.
if (elements->IsFixedDoubleArray()) {
for (int i = 0; i < elements_length; i++) {
HValue* key_constant =
AddInstruction(new(zone) HConstant(i, Representation::Integer32()));
HInstruction* value_instruction =
AddInstruction(new(zone) HLoadKeyed(
boilerplate_elements, key_constant, NULL, kind));
AddInstruction(new(zone) HStoreKeyed(
object_elements, key_constant, value_instruction, kind));
}
} else if (elements->IsFixedArray()) {
Handle<FixedArray> fast_elements = Handle<FixedArray>::cast(elements);
Handle<FixedArray> original_fast_elements =
Handle<FixedArray>::cast(original_elements);
for (int i = 0; i < elements_length; i++) {
Handle<Object> value(fast_elements->get(i), isolate());
HValue* key_constant =
AddInstruction(new(zone) HConstant(i, Representation::Integer32()));
if (value->IsJSObject()) {
Handle<JSObject> value_object = Handle<JSObject>::cast(value);
Handle<JSObject> original_value_object = Handle<JSObject>::cast(
Handle<Object>(original_fast_elements->get(i), isolate()));
HInstruction* value_instruction =
AddInstruction(new(zone) HInnerAllocatedObject(target, *offset));
AddInstruction(new(zone) HStoreKeyed(
object_elements, key_constant, value_instruction, kind));
BuildEmitDeepCopy(value_object, original_value_object, target,
offset, DONT_TRACK_ALLOCATION_SITE);
} else {
HInstruction* value_instruction =
AddInstruction(new(zone) HLoadKeyed(
boilerplate_elements, key_constant, NULL, kind));
AddInstruction(new(zone) HStoreKeyed(
object_elements, key_constant, value_instruction, kind));
}
}
} else {
UNREACHABLE();
}
}
}
HValue* HOptimizedGraphBuilder::BuildCopyObjectHeader(
Handle<JSObject> boilerplate_object,
HInstruction* target,
int object_offset,
int elements_offset,
int elements_size) {
ASSERT(boilerplate_object->properties()->length() == 0);
Zone* zone = this->zone();
Factory* factory = isolate()->factory();
HValue* result = NULL;
HValue* object_header =
AddInstruction(new(zone) HInnerAllocatedObject(target, object_offset));
Handle<Map> boilerplate_object_map(boilerplate_object->map());
BuildStoreMap(object_header, boilerplate_object_map);
HInstruction* elements;
if (elements_size == 0) {
Handle<Object> elements_field =
Handle<Object>(boilerplate_object->elements(), isolate());
elements = AddInstruction(new(zone) HConstant(
elements_field, Representation::Tagged()));
} else {
elements = AddInstruction(new(zone) HInnerAllocatedObject(
target, elements_offset));
result = elements;
}
HInstruction* elements_store = AddInstruction(new(zone) HStoreNamedField(
object_header,
factory->elements_field_string(),
elements,
true, JSObject::kElementsOffset));
elements_store->SetGVNFlag(kChangesElementsPointer);
Handle<Object> properties_field =
Handle<Object>(boilerplate_object->properties(), isolate());
ASSERT(*properties_field == isolate()->heap()->empty_fixed_array());
HInstruction* properties = AddInstruction(new(zone) HConstant(
properties_field, Representation::None()));
AddInstruction(new(zone) HStoreNamedField(object_header,
factory->empty_string(),
properties,
true, JSObject::kPropertiesOffset));
if (boilerplate_object->IsJSArray()) {
Handle<JSArray> boilerplate_array =
Handle<JSArray>::cast(boilerplate_object);
Handle<Object> length_field =
Handle<Object>(boilerplate_array->length(), isolate());
HInstruction* length = AddInstruction(new(zone) HConstant(
length_field, Representation::None()));
HInstruction* length_store = AddInstruction(new(zone) HStoreNamedField(
object_header,
factory->length_field_string(),
length,
true, JSArray::kLengthOffset));
length_store->SetGVNFlag(kChangesArrayLengths);
}
return result;
}
void HOptimizedGraphBuilder::VisitThisFunction(ThisFunction* expr) {
ASSERT(!HasStackOverflow());
ASSERT(current_block() != NULL);
@ -10583,7 +10984,13 @@ void HOptimizedGraphBuilder::GenerateMathLog(CallRuntime* call) {
void HOptimizedGraphBuilder::GenerateMathSqrt(CallRuntime* call) {
return Bailout("inlined runtime function: MathSqrt");
ASSERT(call->arguments()->length() == 1);
CHECK_ALIVE(VisitForValue(call->arguments()->at(0)));
HValue* value = Pop();
HValue* context = environment()->LookupContext();
HInstruction* result =
HUnaryMathOperation::New(zone(), context, value, kMathSqrt);
return ast_context()->ReturnInstruction(result, call->id());
}

72
deps/v8/src/hydrogen.h

@ -260,6 +260,7 @@ class HGraph: public ZoneObject {
HBasicBlock* entry_block() const { return entry_block_; }
HEnvironment* start_environment() const { return start_environment_; }
void FinalizeUniqueValueIds();
void InitializeInferredTypes();
void InsertTypeConversions();
void MergeRemovableSimulates();
@ -865,7 +866,10 @@ class FunctionState {
class HGraphBuilder {
public:
explicit HGraphBuilder(CompilationInfo* info)
: info_(info), graph_(NULL), current_block_(NULL) {}
: info_(info),
graph_(NULL),
current_block_(NULL),
no_side_effects_scope_count_(0) {}
virtual ~HGraphBuilder() {}
HBasicBlock* current_block() const { return current_block_; }
@ -891,6 +895,14 @@ class HGraphBuilder {
HReturn* AddReturn(HValue* value);
void IncrementInNoSideEffectsScope() {
no_side_effects_scope_count_++;
}
void DecrementInNoSideEffectsScope() {
no_side_effects_scope_count_--;
}
protected:
virtual bool BuildGraph() = 0;
@ -939,8 +951,8 @@ class HGraphBuilder {
KeyedAccessStoreMode store_mode,
Representation checked_index_representation = Representation::None());
HInstruction* BuildStoreMap(HValue* object, HValue* map, BailoutId id);
HInstruction* BuildStoreMap(HValue* object, Handle<Map> map, BailoutId id);
HInstruction* BuildStoreMap(HValue* object, HValue* map);
HInstruction* BuildStoreMap(HValue* object, Handle<Map> map);
class CheckBuilder {
public:
@ -1032,6 +1044,20 @@ class HGraphBuilder {
bool finished_;
};
class NoObservableSideEffectsScope {
public:
explicit NoObservableSideEffectsScope(HGraphBuilder* builder) :
builder_(builder) {
builder_->IncrementInNoSideEffectsScope();
}
~NoObservableSideEffectsScope() {
builder_->DecrementInNoSideEffectsScope();
}
private:
HGraphBuilder* builder_;
};
HValue* BuildNewElementsCapacity(HValue* context,
HValue* old_capacity);
@ -1042,6 +1068,14 @@ class HGraphBuilder {
ElementsKind kind,
HValue* capacity);
void BuildInitializeElements(HValue* elements,
ElementsKind kind,
HValue* capacity);
HValue* BuildAllocateAndInitializeElements(HValue* context,
ElementsKind kind,
HValue* capacity);
HValue* BuildGrowElementsCapacity(HValue* object,
HValue* elements,
ElementsKind kind,
@ -1062,11 +1096,18 @@ class HGraphBuilder {
HValue* length,
HValue* capacity);
HValue* BuildCloneShallowArray(HContext* context,
HValue* boilerplate,
AllocationSiteMode mode,
ElementsKind kind,
int length);
private:
HGraphBuilder();
CompilationInfo* info_;
HGraph* graph_;
HBasicBlock* current_block_;
int no_side_effects_scope_count_;
};
@ -1183,6 +1224,11 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
static const int kUnlimitedMaxInlinedNodes = 10000;
static const int kUnlimitedMaxInlinedNodesCumulative = 10000;
// Maximum depth and total number of elements and properties for literal
// graphs to be considered for fast deep-copying.
static const int kMaxFastLiteralDepth = 3;
static const int kMaxFastLiteralProperties = 8;
// Simple accessors.
void set_function_state(FunctionState* state) { function_state_ = state; }
@ -1459,6 +1505,26 @@ class HOptimizedGraphBuilder: public HGraphBuilder, public AstVisitor {
HInstruction* BuildThisFunction();
HInstruction* BuildFastLiteral(HValue* context,
Handle<JSObject> boilerplate_object,
Handle<JSObject> original_boilerplate_object,
int data_size,
int pointer_size,
AllocationSiteMode mode);
void BuildEmitDeepCopy(Handle<JSObject> boilerplat_object,
Handle<JSObject> object,
HInstruction* result,
int* offset,
AllocationSiteMode mode);
MUST_USE_RESULT HValue* BuildCopyObjectHeader(
Handle<JSObject> boilerplat_object,
HInstruction* target,
int object_offset,
int elements_offset,
int elements_size);
void AddCheckPrototypeMaps(Handle<JSObject> holder,
Handle<Map> receiver_map);

6
deps/v8/src/ia32/assembler-ia32.cc

@ -2556,9 +2556,9 @@ void Assembler::GrowBuffer() {
// Copy the data.
int pc_delta = desc.buffer - buffer_;
int rc_delta = (desc.buffer + desc.buffer_size) - (buffer_ + buffer_size_);
memmove(desc.buffer, buffer_, desc.instr_size);
memmove(rc_delta + reloc_info_writer.pos(),
reloc_info_writer.pos(), desc.reloc_size);
OS::MemMove(desc.buffer, buffer_, desc.instr_size);
OS::MemMove(rc_delta + reloc_info_writer.pos(),
reloc_info_writer.pos(), desc.reloc_size);
// Switch buffers.
if (isolate()->assembler_spare_buffer() == NULL &&

7
deps/v8/src/ia32/assembler-ia32.h

@ -1026,6 +1026,13 @@ class Assembler : public AssemblerBase {
void movdqa(const Operand& dst, XMMRegister src);
void movdqu(XMMRegister dst, const Operand& src);
void movdqu(const Operand& dst, XMMRegister src);
void movdq(bool aligned, XMMRegister dst, const Operand& src) {
if (aligned) {
movdqa(dst, src);
} else {
movdqu(dst, src);
}
}
// Use either movsd or movlpd.
void movdbl(XMMRegister dst, const Operand& src);

35
deps/v8/src/ia32/builtins-ia32.cc

@ -1810,25 +1810,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Get the loop depth of the stack guard check. This is recorded in
// a test(eax, depth) instruction right after the call.
Label stack_check;
__ mov(ebx, Operand(esp, 0)); // return address
if (FLAG_debug_code) {
__ cmpb(Operand(ebx, 0), Assembler::kTestAlByte);
__ Assert(equal, "test eax instruction not found after loop stack check");
}
__ movzx_b(ebx, Operand(ebx, 1)); // depth
// Get the loop nesting level at which we allow OSR from the
// unoptimized code and check if we want to do OSR yet. If not we
// should perform a stack guard check so we can get interrupts while
// waiting for on-stack replacement.
__ mov(eax, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(ecx, FieldOperand(eax, JSFunction::kSharedFunctionInfoOffset));
__ mov(ecx, FieldOperand(ecx, SharedFunctionInfo::kCodeOffset));
__ cmpb(ebx, FieldOperand(ecx, Code::kAllowOSRAtLoopNestingLevelOffset));
__ j(greater, &stack_check);
// Pass the function to optimize as the argument to the on-stack
// replacement runtime function.
@ -1845,23 +1827,6 @@ void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
__ j(not_equal, &skip, Label::kNear);
__ ret(0);
// Insert a stack guard check so that if we decide not to perform
// on-stack replacement right away, the function calling this stub can
// still be interrupted.
__ bind(&stack_check);
Label ok;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(masm->isolate());
__ cmp(esp, Operand::StaticVariable(stack_limit));
__ j(above_equal, &ok, Label::kNear);
StackCheckStub stub;
__ TailCallStub(&stub);
if (FLAG_debug_code) {
__ Abort("Unreachable code: returned from tail call.");
}
__ bind(&ok);
__ ret(0);
__ bind(&skip);
// Untag the AST id and push it on the stack.
__ SmiUntag(eax);

191
deps/v8/src/ia32/code-stubs-ia32.cc

@ -30,6 +30,7 @@
#if defined(V8_TARGET_ARCH_IA32)
#include "bootstrapper.h"
#include "builtins-decls.h"
#include "code-stubs.h"
#include "isolate.h"
#include "jsregexp.h"
@ -43,6 +44,18 @@ namespace v8 {
namespace internal {
void FastCloneShallowArrayStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
static Register registers[] = { eax, ebx, ecx };
descriptor->register_param_count_ = 3;
descriptor->register_params_ = registers;
descriptor->stack_parameter_count_ = NULL;
descriptor->deoptimization_handler_ =
Runtime::FunctionForId(Runtime::kCreateArrayLiteralShallow)->entry;
}
void FastCloneShallowObjectStub::InitializeInterfaceDescriptor(
Isolate* isolate,
CodeStubInterfaceDescriptor* descriptor) {
@ -163,9 +176,7 @@ void FastNewClosureStub::Generate(MacroAssembler* masm) {
// Get the function info from the stack.
__ mov(edx, Operand(esp, 1 * kPointerSize));
int map_index = (language_mode_ == CLASSIC_MODE)
? Context::FUNCTION_MAP_INDEX
: Context::STRICT_MODE_FUNCTION_MAP_INDEX;
int map_index = Context::FunctionMapIndex(language_mode_, is_generator_);
// Compute the function map in the current native context and set that
// as the map of the allocated object.
@ -393,168 +404,6 @@ void FastNewBlockContextStub::Generate(MacroAssembler* masm) {
}
static void GenerateFastCloneShallowArrayCommon(
MacroAssembler* masm,
int length,
FastCloneShallowArrayStub::Mode mode,
AllocationSiteMode allocation_site_mode,
Label* fail) {
// Registers on entry:
//
// ecx: boilerplate literal array.
ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS);
// All sizes here are multiples of kPointerSize.
int elements_size = 0;
if (length > 0) {
elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS
? FixedDoubleArray::SizeFor(length)
: FixedArray::SizeFor(length);
}
int size = JSArray::kSize;
int allocation_info_start = size;
if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
size += AllocationSiteInfo::kSize;
}
size += elements_size;
// Allocate both the JS array and the elements array in one big
// allocation. This avoids multiple limit checks.
AllocationFlags flags = TAG_OBJECT;
if (mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS) {
flags = static_cast<AllocationFlags>(DOUBLE_ALIGNMENT | flags);
}
__ Allocate(size, eax, ebx, edx, fail, flags);
if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
__ mov(FieldOperand(eax, allocation_info_start),
Immediate(Handle<Map>(masm->isolate()->heap()->
allocation_site_info_map())));
__ mov(FieldOperand(eax, allocation_info_start + kPointerSize), ecx);
}
// Copy the JS array part.
for (int i = 0; i < JSArray::kSize; i += kPointerSize) {
if ((i != JSArray::kElementsOffset) || (length == 0)) {
__ mov(ebx, FieldOperand(ecx, i));
__ mov(FieldOperand(eax, i), ebx);
}
}
if (length > 0) {
// Get hold of the elements array of the boilerplate and setup the
// elements pointer in the resulting object.
__ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
__ lea(edx, Operand(eax, JSArray::kSize + AllocationSiteInfo::kSize));
} else {
__ lea(edx, Operand(eax, JSArray::kSize));
}
__ mov(FieldOperand(eax, JSArray::kElementsOffset), edx);
// Copy the elements array.
if (mode == FastCloneShallowArrayStub::CLONE_ELEMENTS) {
for (int i = 0; i < elements_size; i += kPointerSize) {
__ mov(ebx, FieldOperand(ecx, i));
__ mov(FieldOperand(edx, i), ebx);
}
} else {
ASSERT(mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS);
int i;
for (i = 0; i < FixedDoubleArray::kHeaderSize; i += kPointerSize) {
__ mov(ebx, FieldOperand(ecx, i));
__ mov(FieldOperand(edx, i), ebx);
}
while (i < elements_size) {
__ fld_d(FieldOperand(ecx, i));
__ fstp_d(FieldOperand(edx, i));
i += kDoubleSize;
}
ASSERT(i == elements_size);
}
}
}
void FastCloneShallowArrayStub::Generate(MacroAssembler* masm) {
// Stack layout on entry:
//
// [esp + kPointerSize]: constant elements.
// [esp + (2 * kPointerSize)]: literal index.
// [esp + (3 * kPointerSize)]: literals array.
// Load boilerplate object into ecx and check if we need to create a
// boilerplate.
__ mov(ecx, Operand(esp, 3 * kPointerSize));
__ mov(eax, Operand(esp, 2 * kPointerSize));
STATIC_ASSERT(kPointerSize == 4);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
__ mov(ecx, FieldOperand(ecx, eax, times_half_pointer_size,
FixedArray::kHeaderSize));
Factory* factory = masm->isolate()->factory();
__ cmp(ecx, factory->undefined_value());
Label slow_case;
__ j(equal, &slow_case);
FastCloneShallowArrayStub::Mode mode = mode_;
// ecx is boilerplate object.
if (mode == CLONE_ANY_ELEMENTS) {
Label double_elements, check_fast_elements;
__ mov(ebx, FieldOperand(ecx, JSArray::kElementsOffset));
__ CheckMap(ebx, factory->fixed_cow_array_map(),
&check_fast_elements, DONT_DO_SMI_CHECK);
GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS,
allocation_site_mode_,
&slow_case);
__ ret(3 * kPointerSize);
__ bind(&check_fast_elements);
__ CheckMap(ebx, factory->fixed_array_map(),
&double_elements, DONT_DO_SMI_CHECK);
GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS,
allocation_site_mode_,
&slow_case);
__ ret(3 * kPointerSize);
__ bind(&double_elements);
mode = CLONE_DOUBLE_ELEMENTS;
// Fall through to generate the code to handle double elements.
}
if (FLAG_debug_code) {
const char* message;
Handle<Map> expected_map;
if (mode == CLONE_ELEMENTS) {
message = "Expected (writable) fixed array";
expected_map = factory->fixed_array_map();
} else if (mode == CLONE_DOUBLE_ELEMENTS) {
message = "Expected (writable) fixed double array";
expected_map = factory->fixed_double_array_map();
} else {
ASSERT(mode == COPY_ON_WRITE_ELEMENTS);
message = "Expected copy-on-write fixed array";
expected_map = factory->fixed_cow_array_map();
}
__ push(ecx);
__ mov(ecx, FieldOperand(ecx, JSArray::kElementsOffset));
__ cmp(FieldOperand(ecx, HeapObject::kMapOffset), expected_map);
__ Assert(equal, message);
__ pop(ecx);
}
GenerateFastCloneShallowArrayCommon(masm, length_, mode,
allocation_site_mode_,
&slow_case);
// Return and remove the on-stack parameters.
__ ret(3 * kPointerSize);
__ bind(&slow_case);
__ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1);
}
// The stub expects its argument on the stack and returns its result in tos_:
// zero for false, and a non-zero value for true.
void ToBooleanStub::Generate(MacroAssembler* masm) {
@ -5076,6 +4925,7 @@ bool CEntryStub::IsPregenerated() {
void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) {
CEntryStub::GenerateAheadOfTime(isolate);
StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate);
StubFailureTrampolineStub::GenerateAheadOfTime(isolate);
// It is important that the store buffer overflow stubs are generated first.
RecordWriteStub::GenerateFixedRegStubsAheadOfTime(isolate);
}
@ -5215,8 +5065,13 @@ void CEntryStub::GenerateCore(MacroAssembler* masm,
// Special handling of out of memory exceptions.
JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception);
// Retrieve the pending exception and clear the variable.
// Retrieve the pending exception.
__ mov(eax, Operand::StaticVariable(pending_exception_address));
// See if we just retrieved an OOM exception.
JumpIfOOM(masm, eax, ecx, throw_out_of_memory_exception);
// Clear the pending exception.
__ mov(edx, Immediate(masm->isolate()->factory()->the_hole_value()));
__ mov(Operand::StaticVariable(pending_exception_address), edx);
@ -7786,9 +7641,7 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) {
void StubFailureTrampolineStub::Generate(MacroAssembler* masm) {
ASSERT(!Serializer::enabled());
bool save_fp_regs = CpuFeatures::IsSupported(SSE2);
CEntryStub ces(1, save_fp_regs ? kSaveFPRegs : kDontSaveFPRegs);
CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs);
__ call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET);
int parameter_count_offset =
StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset;

579
deps/v8/src/ia32/codegen-ia32.cc

@ -173,21 +173,94 @@ UnaryMathFunction CreateSqrtFunction() {
}
static void MemCopyWrapper(void* dest, const void* src, size_t size) {
memcpy(dest, src, size);
// Helper functions for CreateMemMoveFunction.
#undef __
#define __ ACCESS_MASM(masm)
// Keep around global pointers to these objects so that Valgrind won't complain.
static size_t* medium_handlers = NULL;
static size_t* small_handlers = NULL;
enum Direction { FORWARD, BACKWARD };
enum Alignment { MOVE_ALIGNED, MOVE_UNALIGNED };
// Expects registers:
// esi - source, aligned if alignment == ALIGNED
// edi - destination, always aligned
// ecx - count (copy size in bytes)
// edx - loop count (number of 64 byte chunks)
void MemMoveEmitMainLoop(MacroAssembler* masm,
Label* move_last_15,
Direction direction,
Alignment alignment) {
Register src = esi;
Register dst = edi;
Register count = ecx;
Register loop_count = edx;
Label loop, move_last_31, move_last_63;
__ cmp(loop_count, 0);
__ j(equal, &move_last_63);
__ bind(&loop);
// Main loop. Copy in 64 byte chunks.
if (direction == BACKWARD) __ sub(src, Immediate(0x40));
__ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
__ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
__ movdq(alignment == MOVE_ALIGNED, xmm2, Operand(src, 0x20));
__ movdq(alignment == MOVE_ALIGNED, xmm3, Operand(src, 0x30));
if (direction == FORWARD) __ add(src, Immediate(0x40));
if (direction == BACKWARD) __ sub(dst, Immediate(0x40));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
__ movdqa(Operand(dst, 0x20), xmm2);
__ movdqa(Operand(dst, 0x30), xmm3);
if (direction == FORWARD) __ add(dst, Immediate(0x40));
__ dec(loop_count);
__ j(not_zero, &loop);
// At most 63 bytes left to copy.
__ bind(&move_last_63);
__ test(count, Immediate(0x20));
__ j(zero, &move_last_31);
if (direction == BACKWARD) __ sub(src, Immediate(0x20));
__ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0x00));
__ movdq(alignment == MOVE_ALIGNED, xmm1, Operand(src, 0x10));
if (direction == FORWARD) __ add(src, Immediate(0x20));
if (direction == BACKWARD) __ sub(dst, Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
if (direction == FORWARD) __ add(dst, Immediate(0x20));
// At most 31 bytes left to copy.
__ bind(&move_last_31);
__ test(count, Immediate(0x10));
__ j(zero, move_last_15);
if (direction == BACKWARD) __ sub(src, Immediate(0x10));
__ movdq(alignment == MOVE_ALIGNED, xmm0, Operand(src, 0));
if (direction == FORWARD) __ add(src, Immediate(0x10));
if (direction == BACKWARD) __ sub(dst, Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
if (direction == FORWARD) __ add(dst, Immediate(0x10));
}
void MemMoveEmitPopAndReturn(MacroAssembler* masm) {
__ pop(esi);
__ pop(edi);
__ ret(0);
}
OS::MemCopyFunction CreateMemCopyFunction() {
#undef __
#define __ masm.
OS::MemMoveFunction CreateMemMoveFunction() {
size_t actual_size;
// Allocate buffer in executable space.
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB,
&actual_size,
true));
if (buffer == NULL) return &MemCopyWrapper;
byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
if (buffer == NULL) return NULL;
MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
// Generated code is put into a fixed, unmovable, buffer, and not into
// Generated code is put into a fixed, unmovable buffer, and not into
// the V8 heap. We can't, and don't, refer to any relocatable addresses
// (e.g. the JavaScript nan-object).
@ -203,185 +276,369 @@ OS::MemCopyFunction CreateMemCopyFunction() {
const int kSourceOffset = 2 * kPointerSize;
const int kSizeOffset = 3 * kPointerSize;
// When copying up to this many bytes, use special "small" handlers.
const size_t kSmallCopySize = 8;
// When copying up to this many bytes, use special "medium" handlers.
const size_t kMediumCopySize = 63;
// When non-overlapping region of src and dst is less than this,
// use a more careful implementation (slightly slower).
const size_t kMinMoveDistance = 16;
// Note that these values are dictated by the implementation below,
// do not just change them and hope things will work!
int stack_offset = 0; // Update if we change the stack height.
if (FLAG_debug_code) {
__ cmp(Operand(esp, kSizeOffset + stack_offset),
Immediate(OS::kMinComplexMemCopy));
Label ok;
__ j(greater_equal, &ok);
__ int3();
__ bind(&ok);
}
Label backward, backward_much_overlap;
Label forward_much_overlap, small_size, medium_size, pop_and_return;
__ push(edi);
__ push(esi);
stack_offset += 2 * kPointerSize;
Register dst = edi;
Register src = esi;
Register count = ecx;
Register loop_count = edx;
__ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
__ mov(src, Operand(esp, stack_offset + kSourceOffset));
__ mov(count, Operand(esp, stack_offset + kSizeOffset));
__ cmp(dst, src);
__ j(equal, &pop_and_return);
if (CpuFeatures::IsSupported(SSE2)) {
CpuFeatureScope enable(&masm, SSE2);
__ push(edi);
__ push(esi);
stack_offset += 2 * kPointerSize;
Register dst = edi;
Register src = esi;
Register count = ecx;
__ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
__ mov(src, Operand(esp, stack_offset + kSourceOffset));
__ mov(count, Operand(esp, stack_offset + kSizeOffset));
__ movdqu(xmm0, Operand(src, 0));
__ movdqu(Operand(dst, 0), xmm0);
__ mov(edx, dst);
__ and_(edx, 0xF);
__ neg(edx);
__ add(edx, Immediate(16));
__ add(dst, edx);
__ add(src, edx);
__ sub(count, edx);
// edi is now aligned. Check if esi is also aligned.
Label unaligned_source;
__ test(src, Immediate(0x0F));
__ j(not_zero, &unaligned_source);
CpuFeatureScope sse2_scope(&masm, SSE2);
__ prefetch(Operand(src, 0), 1);
__ cmp(count, kSmallCopySize);
__ j(below_equal, &small_size);
__ cmp(count, kMediumCopySize);
__ j(below_equal, &medium_size);
__ cmp(dst, src);
__ j(above, &backward);
{
// |dst| is a lower address than |src|. Copy front-to-back.
Label unaligned_source, move_last_15, skip_last_move;
__ mov(eax, src);
__ sub(eax, dst);
__ cmp(eax, kMinMoveDistance);
__ j(below, &forward_much_overlap);
// Copy first 16 bytes.
__ movdqu(xmm0, Operand(src, 0));
__ movdqu(Operand(dst, 0), xmm0);
// Determine distance to alignment: 16 - (dst & 0xF).
__ mov(edx, dst);
__ and_(edx, 0xF);
__ neg(edx);
__ add(edx, Immediate(16));
__ add(dst, edx);
__ add(src, edx);
__ sub(count, edx);
// dst is now aligned. Main copy loop.
__ mov(loop_count, count);
__ shr(loop_count, 6);
// Check if src is also aligned.
__ test(src, Immediate(0xF));
__ j(not_zero, &unaligned_source);
// Copy loop for aligned source and destination.
__ mov(edx, count);
Register loop_count = ecx;
Register count = edx;
__ shr(loop_count, 5);
{
// Main copy loop.
Label loop;
__ bind(&loop);
__ prefetch(Operand(src, 0x20), 1);
__ movdqa(xmm0, Operand(src, 0x00));
__ movdqa(xmm1, Operand(src, 0x10));
__ add(src, Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
__ add(dst, Immediate(0x20));
__ dec(loop_count);
__ j(not_zero, &loop);
}
// At most 31 bytes to copy.
Label move_less_16;
__ test(count, Immediate(0x10));
__ j(zero, &move_less_16);
__ movdqa(xmm0, Operand(src, 0));
__ add(src, Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
__ add(dst, Immediate(0x10));
__ bind(&move_less_16);
MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_ALIGNED);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
__ bind(&move_last_15);
__ and_(count, 0xF);
__ j(zero, &skip_last_move, Label::kNear);
__ movdqu(xmm0, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
__ bind(&skip_last_move);
MemMoveEmitPopAndReturn(&masm);
__ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
__ pop(esi);
__ pop(edi);
__ ret(0);
}
__ Align(16);
{
// Copy loop for unaligned source and aligned destination.
// If source is not aligned, we can't read it as efficiently.
__ bind(&unaligned_source);
__ mov(edx, ecx);
Register loop_count = ecx;
Register count = edx;
__ shr(loop_count, 5);
{
// Main copy loop
Label loop;
__ bind(&loop);
__ prefetch(Operand(src, 0x20), 1);
__ movdqu(xmm0, Operand(src, 0x00));
__ movdqu(xmm1, Operand(src, 0x10));
__ add(src, Immediate(0x20));
__ movdqa(Operand(dst, 0x00), xmm0);
__ movdqa(Operand(dst, 0x10), xmm1);
__ add(dst, Immediate(0x20));
__ dec(loop_count);
__ j(not_zero, &loop);
}
MemMoveEmitMainLoop(&masm, &move_last_15, FORWARD, MOVE_UNALIGNED);
__ jmp(&move_last_15);
// Less than kMinMoveDistance offset between dst and src.
Label loop_until_aligned, last_15_much_overlap;
__ bind(&loop_until_aligned);
__ mov_b(eax, Operand(src, 0));
__ inc(src);
__ mov_b(Operand(dst, 0), eax);
__ inc(dst);
__ dec(count);
__ bind(&forward_much_overlap); // Entry point into this block.
__ test(dst, Immediate(0xF));
__ j(not_zero, &loop_until_aligned);
// dst is now aligned, src can't be. Main copy loop.
__ mov(loop_count, count);
__ shr(loop_count, 6);
MemMoveEmitMainLoop(&masm, &last_15_much_overlap,
FORWARD, MOVE_UNALIGNED);
__ bind(&last_15_much_overlap);
__ and_(count, 0xF);
__ j(zero, &pop_and_return);
__ cmp(count, kSmallCopySize);
__ j(below_equal, &small_size);
__ jmp(&medium_size);
}
// At most 31 bytes to copy.
Label move_less_16;
__ test(count, Immediate(0x10));
__ j(zero, &move_less_16);
{
// |dst| is a higher address than |src|. Copy backwards.
Label unaligned_source, move_first_15, skip_last_move;
__ bind(&backward);
// |dst| and |src| always point to the end of what's left to copy.
__ add(dst, count);
__ add(src, count);
__ mov(eax, dst);
__ sub(eax, src);
__ cmp(eax, kMinMoveDistance);
__ j(below, &backward_much_overlap);
// Copy last 16 bytes.
__ movdqu(xmm0, Operand(src, -0x10));
__ movdqu(Operand(dst, -0x10), xmm0);
// Find distance to alignment: dst & 0xF
__ mov(edx, dst);
__ and_(edx, 0xF);
__ sub(dst, edx);
__ sub(src, edx);
__ sub(count, edx);
// dst is now aligned. Main copy loop.
__ mov(loop_count, count);
__ shr(loop_count, 6);
// Check if src is also aligned.
__ test(src, Immediate(0xF));
__ j(not_zero, &unaligned_source);
// Copy loop for aligned source and destination.
MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_ALIGNED);
// At most 15 bytes to copy. Copy 16 bytes at beginning of string.
__ bind(&move_first_15);
__ and_(count, 0xF);
__ j(zero, &skip_last_move, Label::kNear);
__ sub(src, count);
__ sub(dst, count);
__ movdqu(xmm0, Operand(src, 0));
__ add(src, Immediate(0x10));
__ movdqa(Operand(dst, 0), xmm0);
__ add(dst, Immediate(0x10));
__ bind(&move_less_16);
// At most 15 bytes to copy. Copy 16 bytes at end of string.
__ and_(count, 0x0F);
__ movdqu(xmm0, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, count, times_1, -0x10), xmm0);
__ movdqu(Operand(dst, 0), xmm0);
__ bind(&skip_last_move);
MemMoveEmitPopAndReturn(&masm);
__ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
__ pop(esi);
__ pop(edi);
__ ret(0);
// Copy loop for unaligned source and aligned destination.
__ bind(&unaligned_source);
MemMoveEmitMainLoop(&masm, &move_first_15, BACKWARD, MOVE_UNALIGNED);
__ jmp(&move_first_15);
// Less than kMinMoveDistance offset between dst and src.
Label loop_until_aligned, first_15_much_overlap;
__ bind(&loop_until_aligned);
__ dec(src);
__ dec(dst);
__ mov_b(eax, Operand(src, 0));
__ mov_b(Operand(dst, 0), eax);
__ dec(count);
__ bind(&backward_much_overlap); // Entry point into this block.
__ test(dst, Immediate(0xF));
__ j(not_zero, &loop_until_aligned);
// dst is now aligned, src can't be. Main copy loop.
__ mov(loop_count, count);
__ shr(loop_count, 6);
MemMoveEmitMainLoop(&masm, &first_15_much_overlap,
BACKWARD, MOVE_UNALIGNED);
__ bind(&first_15_much_overlap);
__ and_(count, 0xF);
__ j(zero, &pop_and_return);
// Small/medium handlers expect dst/src to point to the beginning.
__ sub(dst, count);
__ sub(src, count);
__ cmp(count, kSmallCopySize);
__ j(below_equal, &small_size);
__ jmp(&medium_size);
}
{
// Special handlers for 9 <= copy_size < 64. No assumptions about
// alignment or move distance, so all reads must be unaligned and
// must happen before any writes.
Label f9_16, f17_32, f33_48, f49_63;
__ bind(&f9_16);
__ movdbl(xmm0, Operand(src, 0));
__ movdbl(xmm1, Operand(src, count, times_1, -8));
__ movdbl(Operand(dst, 0), xmm0);
__ movdbl(Operand(dst, count, times_1, -8), xmm1);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f17_32);
__ movdqu(xmm0, Operand(src, 0));
__ movdqu(xmm1, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, 0x00), xmm0);
__ movdqu(Operand(dst, count, times_1, -0x10), xmm1);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f33_48);
__ movdqu(xmm0, Operand(src, 0x00));
__ movdqu(xmm1, Operand(src, 0x10));
__ movdqu(xmm2, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, 0x00), xmm0);
__ movdqu(Operand(dst, 0x10), xmm1);
__ movdqu(Operand(dst, count, times_1, -0x10), xmm2);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f49_63);
__ movdqu(xmm0, Operand(src, 0x00));
__ movdqu(xmm1, Operand(src, 0x10));
__ movdqu(xmm2, Operand(src, 0x20));
__ movdqu(xmm3, Operand(src, count, times_1, -0x10));
__ movdqu(Operand(dst, 0x00), xmm0);
__ movdqu(Operand(dst, 0x10), xmm1);
__ movdqu(Operand(dst, 0x20), xmm2);
__ movdqu(Operand(dst, count, times_1, -0x10), xmm3);
MemMoveEmitPopAndReturn(&masm);
medium_handlers = new size_t[4];
medium_handlers[0] = reinterpret_cast<intptr_t>(buffer) + f9_16.pos();
medium_handlers[1] = reinterpret_cast<intptr_t>(buffer) + f17_32.pos();
medium_handlers[2] = reinterpret_cast<intptr_t>(buffer) + f33_48.pos();
medium_handlers[3] = reinterpret_cast<intptr_t>(buffer) + f49_63.pos();
__ bind(&medium_size); // Entry point into this block.
__ mov(eax, count);
__ dec(eax);
__ shr(eax, 4);
if (FLAG_debug_code) {
Label ok;
__ cmp(eax, 3);
__ j(below_equal, &ok);
__ int3();
__ bind(&ok);
}
__ mov(eax, Operand(eax, times_4,
reinterpret_cast<intptr_t>(medium_handlers)));
__ jmp(eax);
}
{
// Specialized copiers for copy_size <= 8 bytes.
Label f0, f1, f2, f3, f4, f5_8;
__ bind(&f0);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f1);
__ mov_b(eax, Operand(src, 0));
__ mov_b(Operand(dst, 0), eax);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f2);
__ mov_w(eax, Operand(src, 0));
__ mov_w(Operand(dst, 0), eax);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f3);
__ mov_w(eax, Operand(src, 0));
__ mov_b(edx, Operand(src, 2));
__ mov_w(Operand(dst, 0), eax);
__ mov_b(Operand(dst, 2), edx);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f4);
__ mov(eax, Operand(src, 0));
__ mov(Operand(dst, 0), eax);
MemMoveEmitPopAndReturn(&masm);
__ bind(&f5_8);
__ mov(eax, Operand(src, 0));
__ mov(edx, Operand(src, count, times_1, -4));
__ mov(Operand(dst, 0), eax);
__ mov(Operand(dst, count, times_1, -4), edx);
MemMoveEmitPopAndReturn(&masm);
small_handlers = new size_t[9];
small_handlers[0] = reinterpret_cast<intptr_t>(buffer) + f0.pos();
small_handlers[1] = reinterpret_cast<intptr_t>(buffer) + f1.pos();
small_handlers[2] = reinterpret_cast<intptr_t>(buffer) + f2.pos();
small_handlers[3] = reinterpret_cast<intptr_t>(buffer) + f3.pos();
small_handlers[4] = reinterpret_cast<intptr_t>(buffer) + f4.pos();
small_handlers[5] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
small_handlers[6] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
small_handlers[7] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
small_handlers[8] = reinterpret_cast<intptr_t>(buffer) + f5_8.pos();
__ bind(&small_size); // Entry point into this block.
if (FLAG_debug_code) {
Label ok;
__ cmp(count, 8);
__ j(below_equal, &ok);
__ int3();
__ bind(&ok);
}
__ mov(eax, Operand(count, times_4,
reinterpret_cast<intptr_t>(small_handlers)));
__ jmp(eax);
}
} else {
// SSE2 not supported. Unlikely to happen in practice.
__ push(edi);
__ push(esi);
stack_offset += 2 * kPointerSize;
__ cld();
Register dst = edi;
Register src = esi;
Register count = ecx;
__ mov(dst, Operand(esp, stack_offset + kDestinationOffset));
__ mov(src, Operand(esp, stack_offset + kSourceOffset));
__ mov(count, Operand(esp, stack_offset + kSizeOffset));
// Copy the first word.
__ mov(eax, Operand(src, 0));
__ mov(Operand(dst, 0), eax);
// Increment src,dstso that dst is aligned.
__ mov(edx, dst);
__ and_(edx, 0x03);
__ neg(edx);
__ add(edx, Immediate(4)); // edx = 4 - (dst & 3)
__ add(dst, edx);
__ add(src, edx);
__ sub(count, edx);
// edi is now aligned, ecx holds number of remaning bytes to copy.
__ mov(edx, count);
count = edx;
__ shr(ecx, 2); // Make word count instead of byte count.
__ rep_movs();
// At most 3 bytes left to copy. Copy 4 bytes at end of string.
__ and_(count, 3);
__ mov(eax, Operand(src, count, times_1, -4));
__ mov(Operand(dst, count, times_1, -4), eax);
__ mov(eax, Operand(esp, stack_offset + kDestinationOffset));
__ pop(esi);
__ pop(edi);
__ ret(0);
// No SSE2.
Label forward;
__ cmp(count, 0);
__ j(equal, &pop_and_return);
__ cmp(dst, src);
__ j(above, &backward);
__ jmp(&forward);
{
// Simple forward copier.
Label forward_loop_1byte, forward_loop_4byte;
__ bind(&forward_loop_4byte);
__ mov(eax, Operand(src, 0));
__ sub(count, Immediate(4));
__ add(src, Immediate(4));
__ mov(Operand(dst, 0), eax);
__ add(dst, Immediate(4));
__ bind(&forward); // Entry point.
__ cmp(count, 3);
__ j(above, &forward_loop_4byte);
__ bind(&forward_loop_1byte);
__ cmp(count, 0);
__ j(below_equal, &pop_and_return);
__ mov_b(eax, Operand(src, 0));
__ dec(count);
__ inc(src);
__ mov_b(Operand(dst, 0), eax);
__ inc(dst);
__ jmp(&forward_loop_1byte);
}
{
// Simple backward copier.
Label backward_loop_1byte, backward_loop_4byte, entry_shortcut;
__ bind(&backward);
__ add(src, count);
__ add(dst, count);
__ cmp(count, 3);
__ j(below_equal, &entry_shortcut);
__ bind(&backward_loop_4byte);
__ sub(src, Immediate(4));
__ sub(count, Immediate(4));
__ mov(eax, Operand(src, 0));
__ sub(dst, Immediate(4));
__ mov(Operand(dst, 0), eax);
__ cmp(count, 3);
__ j(above, &backward_loop_4byte);
__ bind(&backward_loop_1byte);
__ cmp(count, 0);
__ j(below_equal, &pop_and_return);
__ bind(&entry_shortcut);
__ dec(src);
__ dec(count);
__ mov_b(eax, Operand(src, 0));
__ dec(dst);
__ mov_b(Operand(dst, 0), eax);
__ jmp(&backward_loop_1byte);
}
}
__ bind(&pop_and_return);
MemMoveEmitPopAndReturn(&masm);
CodeDesc desc;
masm.GetCode(&desc);
ASSERT(!RelocInfo::RequiresRelocation(desc));
CPU::FlushICache(buffer, actual_size);
OS::ProtectCode(buffer, actual_size);
return FUNCTION_CAST<OS::MemCopyFunction>(buffer);
return FUNCTION_CAST<OS::MemMoveFunction>(buffer);
}
#undef __
// -------------------------------------------------------------------------

2
deps/v8/src/ia32/cpu-ia32.cc

@ -79,6 +79,8 @@ void CPU::DebugBreak() {
// instead
// __asm { int 3 }
__debugbreak();
#elif defined(__native_client__)
asm("hlt");
#else
asm("int $3");
#endif

114
deps/v8/src/ia32/deoptimizer-ia32.cc

@ -90,9 +90,9 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
Factory* factory = isolate->factory();
Handle<ByteArray> new_reloc =
factory->NewByteArray(reloc_length + padding, TENURED);
memcpy(new_reloc->GetDataStartAddress() + padding,
code->relocation_info()->GetDataStartAddress(),
reloc_length);
OS::MemCopy(new_reloc->GetDataStartAddress() + padding,
code->relocation_info()->GetDataStartAddress(),
reloc_length);
// Create a relocation writer to write the comments in the padding
// space. Use position 0 for everything to ensure short encoding.
RelocInfoWriter reloc_info_writer(
@ -177,7 +177,8 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
// Move the relocation info to the beginning of the byte array.
int new_reloc_size = reloc_end_address - reloc_info_writer.pos();
memmove(code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
OS::MemMove(
code->relocation_start(), reloc_info_writer.pos(), new_reloc_size);
// The relocation info is in place, update the size.
reloc_info->set_length(new_reloc_size);
@ -211,41 +212,39 @@ void Deoptimizer::DeoptimizeFunctionWithPreparedFunctionList(
static const byte kJnsInstruction = 0x79;
static const byte kJnsOffset = 0x13;
static const byte kJnsOffset = 0x11;
static const byte kCallInstruction = 0xe8;
static const byte kNopByteOne = 0x66;
static const byte kNopByteTwo = 0x90;
void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
Code* replacement_code) {
// The back edge bookkeeping code matches the pattern:
//
// sub <profiling_counter>, <delta>
// jns ok
// call <interrupt stub>
// ok:
//
// The patched back edge looks like this:
//
// sub <profiling_counter>, <delta> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// ok:
void Deoptimizer::PatchInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
ASSERT(!InterruptCodeIsPatched(unoptimized_code,
pc_after,
interrupt_code,
replacement_code));
// Turn the jump into nops.
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(check_code->entry(),
Assembler::target_address_at(call_target_address));
// The back edge bookkeeping code matches the pattern:
//
// sub <profiling_counter>, <delta>
// jns ok
// call <stack guard>
// test eax, <loop nesting depth>
// ok: ...
//
// We will patch away the branch so the code is:
//
// sub <profiling_counter>, <delta> ;; Not changed
// nop
// nop
// call <on-stack replacment>
// test eax, <loop nesting depth>
// ok:
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kNopByteOne;
*(call_target_address - 2) = kNopByteTwo;
// Replace the call address.
Assembler::set_target_address_at(call_target_address,
replacement_code->entry());
@ -254,27 +253,48 @@ void Deoptimizer::PatchStackCheckCodeAt(Code* unoptimized_code,
}
void Deoptimizer::RevertStackCheckCodeAt(Code* unoptimized_code,
Address pc_after,
Code* check_code,
Code* replacement_code) {
void Deoptimizer::RevertInterruptCodeAt(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
ASSERT(InterruptCodeIsPatched(unoptimized_code,
pc_after,
interrupt_code,
replacement_code));
// Restore the original jump.
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(replacement_code->entry(),
Assembler::target_address_at(call_target_address));
// Replace the nops from patching (Deoptimizer::PatchStackCheckCode) to
// restore the conditional branch.
ASSERT_EQ(kNopByteOne, *(call_target_address - 3));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
*(call_target_address - 3) = kJnsInstruction;
*(call_target_address - 2) = kJnsOffset;
// Restore the original call address.
Assembler::set_target_address_at(call_target_address,
check_code->entry());
interrupt_code->entry());
check_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, check_code);
interrupt_code->GetHeap()->incremental_marking()->RecordCodeTargetPatch(
unoptimized_code, call_target_address, interrupt_code);
}
#ifdef DEBUG
bool Deoptimizer::InterruptCodeIsPatched(Code* unoptimized_code,
Address pc_after,
Code* interrupt_code,
Code* replacement_code) {
Address call_target_address = pc_after - kIntSize;
ASSERT_EQ(kCallInstruction, *(call_target_address - 1));
if (*(call_target_address - 3) == kNopByteOne) {
ASSERT_EQ(replacement_code->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kNopByteTwo, *(call_target_address - 2));
return true;
} else {
ASSERT_EQ(interrupt_code->entry(),
Assembler::target_address_at(call_target_address));
ASSERT_EQ(kJnsInstruction, *(call_target_address - 3));
ASSERT_EQ(kJnsOffset, *(call_target_address - 2));
return false;
}
}
#endif // DEBUG
static int LookupBailoutId(DeoptimizationInputData* data, BailoutId ast_id) {

12
deps/v8/src/ia32/frames-ia32.h

@ -60,18 +60,6 @@ const int kAlignmentZapValue = 0x12345678; // Not heap object tagged.
// ----------------------------------------------------
class StackHandlerConstants : public AllStatic {
public:
static const int kNextOffset = 0 * kPointerSize;
static const int kCodeOffset = 1 * kPointerSize;
static const int kStateOffset = 2 * kPointerSize;
static const int kContextOffset = 3 * kPointerSize;
static const int kFPOffset = 4 * kPointerSize;
static const int kSize = kFPOffset + kPointerSize;
};
class EntryFrameConstants : public AllStatic {
public:
static const int kCallerFPOffset = -6 * kPointerSize;

34
deps/v8/src/ia32/full-codegen-ia32.cc

@ -164,6 +164,8 @@ void FullCodeGenerator::Generate() {
{ Comment cmnt(masm_, "[ Allocate locals");
int locals_count = info->scope()->num_stack_slots();
// Generators allocate locals, if any, in context slots.
ASSERT(!info->function()->is_generator() || locals_count == 0);
if (locals_count == 1) {
__ push(Immediate(isolate()->factory()->undefined_value()));
} else if (locals_count > 1) {
@ -351,13 +353,6 @@ void FullCodeGenerator::EmitBackEdgeBookkeeping(IterationStatement* stmt,
// the deoptimization input data found in the optimized code.
RecordBackEdge(stmt->OsrEntryId());
// Loop stack checks can be patched to perform on-stack replacement. In
// order to decide whether or not to perform OSR we embed the loop depth
// in a test instruction after the call so we can extract it from the OSR
// builtin.
ASSERT(loop_depth() > 0);
__ test(eax, Immediate(Min(loop_depth(), Code::kMaxLoopNestingMarker)));
EmitProfilingCounterReset();
__ bind(&ok);
@ -1214,7 +1209,7 @@ void FullCodeGenerator::EmitNewClosure(Handle<SharedFunctionInfo> info,
!pretenure &&
scope()->is_function_scope() &&
info->num_literals() == 0) {
FastNewClosureStub stub(info->language_mode());
FastNewClosureStub stub(info->language_mode(), info->is_generator());
__ push(Immediate(info));
__ CallStub(&stub);
} else {
@ -1670,24 +1665,33 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
Handle<FixedArrayBase> constant_elements_values(
FixedArrayBase::cast(constant_elements->get(1)));
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_elements));
Heap* heap = isolate()->heap();
if (has_constant_fast_elements &&
constant_elements_values->map() == heap->fixed_cow_array_map()) {
// If the elements are already FAST_*_ELEMENTS, the boilerplate cannot
// change, so it's possible to specialize the stub in advance.
__ IncrementCounter(isolate()->counters()->cow_arrays_created_stub(), 1);
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_elements));
FastCloneShallowArrayStub stub(
FastCloneShallowArrayStub::COPY_ON_WRITE_ELEMENTS,
DONT_TRACK_ALLOCATION_SITE,
length);
__ CallStub(&stub);
} else if (expr->depth() > 1) {
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_elements));
__ CallRuntime(Runtime::kCreateArrayLiteral, 3);
} else if (length > FastCloneShallowArrayStub::kMaximumClonedLength) {
} else if (Serializer::enabled() ||
length > FastCloneShallowArrayStub::kMaximumClonedLength) {
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ push(FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ push(Immediate(Smi::FromInt(expr->literal_index())));
__ push(Immediate(constant_elements));
__ CallRuntime(Runtime::kCreateArrayLiteralShallow, 3);
} else {
ASSERT(IsFastSmiOrObjectElementsKind(constant_elements_kind) ||
@ -1704,6 +1708,10 @@ void FullCodeGenerator::VisitArrayLiteral(ArrayLiteral* expr) {
allocation_site_mode = DONT_TRACK_ALLOCATION_SITE;
}
__ mov(ebx, Operand(ebp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(eax, FieldOperand(ebx, JSFunction::kLiteralsOffset));
__ mov(ebx, Immediate(Smi::FromInt(expr->literal_index())));
__ mov(ecx, Immediate(constant_elements));
FastCloneShallowArrayStub stub(mode, allocation_site_mode, length);
__ CallStub(&stub);
}

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save