Browse Source

Downgrade V8 to 3.1.8.25

There are serious performance regressions both in V8 and our own legacy
networking stack. Until we correct our own problems we are going back to the
old V8.
v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
149562555c
  1. 4
      deps/v8/.gitignore
  2. 8
      deps/v8/AUTHORS
  3. 443
      deps/v8/ChangeLog
  4. 546
      deps/v8/SConstruct
  5. 45
      deps/v8/include/v8-debug.h
  6. 13
      deps/v8/include/v8-preparser.h
  7. 174
      deps/v8/include/v8-profiler.h
  8. 5
      deps/v8/include/v8-testing.h
  9. 648
      deps/v8/include/v8.h
  10. 38
      deps/v8/preparser/SConscript
  11. 395
      deps/v8/preparser/preparser-process.cc
  12. 6
      deps/v8/samples/process.cc
  13. 440
      deps/v8/samples/shell.cc
  14. 84
      deps/v8/src/SConscript
  15. 307
      deps/v8/src/accessors.cc
  16. 2
      deps/v8/src/accessors.h
  17. 82
      deps/v8/src/allocation.cc
  18. 51
      deps/v8/src/allocation.h
  19. 3440
      deps/v8/src/api.cc
  20. 111
      deps/v8/src/api.h
  21. 8
      deps/v8/src/apinatives.js
  22. 7
      deps/v8/src/apiutils.h
  23. 29
      deps/v8/src/arguments.h
  24. 19
      deps/v8/src/arm/assembler-arm-inl.h
  25. 638
      deps/v8/src/arm/assembler-arm.cc
  26. 263
      deps/v8/src/arm/assembler-arm.h
  27. 169
      deps/v8/src/arm/builtins-arm.cc
  28. 3478
      deps/v8/src/arm/code-stubs-arm.cc
  29. 607
      deps/v8/src/arm/code-stubs-arm.h
  30. 48
      deps/v8/src/arm/codegen-arm-inl.h
  31. 7360
      deps/v8/src/arm/codegen-arm.cc
  32. 512
      deps/v8/src/arm/codegen-arm.h
  33. 24
      deps/v8/src/arm/constants-arm.h
  34. 39
      deps/v8/src/arm/cpu-arm.cc
  35. 14
      deps/v8/src/arm/debug-arm.cc
  36. 110
      deps/v8/src/arm/deoptimizer-arm.cc
  37. 133
      deps/v8/src/arm/disasm-arm.cc
  38. 5
      deps/v8/src/arm/frames-arm.h
  39. 1110
      deps/v8/src/arm/full-codegen-arm.cc
  40. 676
      deps/v8/src/arm/ic-arm.cc
  41. 174
      deps/v8/src/arm/jump-target-arm.cc
  42. 659
      deps/v8/src/arm/lithium-arm.cc
  43. 638
      deps/v8/src/arm/lithium-arm.h
  44. 1884
      deps/v8/src/arm/lithium-codegen-arm.cc
  45. 52
      deps/v8/src/arm/lithium-codegen-arm.h
  46. 2
      deps/v8/src/arm/lithium-gap-resolver-arm.cc
  47. 765
      deps/v8/src/arm/macro-assembler-arm.cc
  48. 207
      deps/v8/src/arm/macro-assembler-arm.h
  49. 45
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  50. 3
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  51. 86
      deps/v8/src/arm/register-allocator-arm-inl.h
  52. 63
      deps/v8/src/arm/register-allocator-arm.cc
  53. 44
      deps/v8/src/arm/register-allocator-arm.h
  54. 515
      deps/v8/src/arm/simulator-arm.cc
  55. 85
      deps/v8/src/arm/simulator-arm.h
  56. 1240
      deps/v8/src/arm/stub-cache-arm.cc
  57. 45
      deps/v8/src/arm/virtual-frame-arm-inl.h
  58. 843
      deps/v8/src/arm/virtual-frame-arm.cc
  59. 520
      deps/v8/src/arm/virtual-frame-arm.h
  60. 188
      deps/v8/src/array.js
  61. 583
      deps/v8/src/assembler.cc
  62. 324
      deps/v8/src/assembler.h
  63. 11
      deps/v8/src/ast-inl.h
  64. 367
      deps/v8/src/ast.cc
  65. 461
      deps/v8/src/ast.h
  66. 2
      deps/v8/src/atomicops.h
  67. 169
      deps/v8/src/atomicops_internals_mips_gcc.h
  68. 13
      deps/v8/src/atomicops_internals_x86_gcc.cc
  69. 6
      deps/v8/src/atomicops_internals_x86_gcc.h
  70. 1192
      deps/v8/src/bootstrapper.cc
  71. 121
      deps/v8/src/bootstrapper.h
  72. 536
      deps/v8/src/builtins.cc
  73. 78
      deps/v8/src/builtins.h
  74. 4
      deps/v8/src/char-predicates.h
  75. 4
      deps/v8/src/checks.cc
  76. 4
      deps/v8/src/checks.h
  77. 73
      deps/v8/src/code-stubs.cc
  78. 224
      deps/v8/src/code-stubs.h
  79. 2
      deps/v8/src/code.h
  80. 64
      deps/v8/src/codegen-inl.h
  81. 315
      deps/v8/src/codegen.cc
  82. 163
      deps/v8/src/codegen.h
  83. 317
      deps/v8/src/compilation-cache.cc
  84. 207
      deps/v8/src/compilation-cache.h
  85. 328
      deps/v8/src/compiler.cc
  86. 64
      deps/v8/src/compiler.h
  87. 144
      deps/v8/src/contexts.cc
  88. 100
      deps/v8/src/contexts.h
  89. 4
      deps/v8/src/conversions-inl.h
  90. 132
      deps/v8/src/conversions.cc
  91. 23
      deps/v8/src/conversions.h
  92. 23
      deps/v8/src/counters.cc
  93. 38
      deps/v8/src/counters.h
  94. 22
      deps/v8/src/cpu-profiler-inl.h
  95. 188
      deps/v8/src/cpu-profiler.cc
  96. 56
      deps/v8/src/cpu-profiler.h
  97. 4
      deps/v8/src/cpu.h
  98. 2
      deps/v8/src/d8-debug.cc
  99. 13
      deps/v8/src/d8-posix.cc
  100. 2
      deps/v8/src/d8-readline.cc

4
deps/v8/.gitignore

@ -19,17 +19,13 @@ d8
d8_g d8_g
shell shell
shell_g shell_g
/build/gyp
/obj/ /obj/
/test/es5conform/data/ /test/es5conform/data/
/test/mozilla/data/ /test/mozilla/data/
/test/sputnik/sputniktests/ /test/sputnik/sputniktests/
/test/test262/data/
/tools/oom_dump/oom_dump /tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o /tools/oom_dump/oom_dump.o
/tools/visual_studio/Debug /tools/visual_studio/Debug
/tools/visual_studio/Release /tools/visual_studio/Release
/xcodebuild/ /xcodebuild/
TAGS TAGS
Makefile
*.Makefile

8
deps/v8/AUTHORS

@ -7,9 +7,7 @@ Google Inc.
Sigma Designs Inc. Sigma Designs Inc.
ARM Ltd. ARM Ltd.
Hewlett-Packard Development Company, LP Hewlett-Packard Development Company, LP
Igalia, S.L.
Akinori MUSHA <knu@FreeBSD.org>
Alexander Botero-Lowry <alexbl@FreeBSD.org> Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexander Karpinsky <homm86@gmail.com> Alexander Karpinsky <homm86@gmail.com>
Alexandre Vassalotti <avassalotti@gmail.com> Alexandre Vassalotti <avassalotti@gmail.com>
@ -26,21 +24,17 @@ Jay Freeman <saurik@saurik.com>
Joel Stanley <joel.stan@gmail.com> Joel Stanley <joel.stan@gmail.com>
John Jozwiak <jjozwiak@codeaurora.org> John Jozwiak <jjozwiak@codeaurora.org>
Kun Zhang <zhangk@codeaurora.org> Kun Zhang <zhangk@codeaurora.org>
Martyn Capewell <martyn.capewell@arm.com>
Matt Hanselman <mjhanselman@gmail.com> Matt Hanselman <mjhanselman@gmail.com>
Maxim Mossienko <maxim.mossienko@gmail.com> Martyn Capewell <martyn.capewell@arm.com>
Michael Smith <mike@w3.org> Michael Smith <mike@w3.org>
Mike Gilbert <floppymaster@gmail.com> Mike Gilbert <floppymaster@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com> Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com> Patrick Gansterer <paroga@paroga.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Rafal Krypa <rafal@krypa.net> Rafal Krypa <rafal@krypa.net>
Rene Rebe <rene@exactcode.de> Rene Rebe <rene@exactcode.de>
Robert Mustacchi <rm@fingolfin.org>
Rodolph Perfetta <rodolph.perfetta@arm.com> Rodolph Perfetta <rodolph.perfetta@arm.com>
Ryan Dahl <coldredlemur@gmail.com> Ryan Dahl <coldredlemur@gmail.com>
Sanjoy Das <sanjoy@playingwithpointers.com> Sanjoy Das <sanjoy@playingwithpointers.com>
Subrato K De <subratokde@codeaurora.org> Subrato K De <subratokde@codeaurora.org>
Vlad Burlik <vladbph@gmail.com> Vlad Burlik <vladbph@gmail.com>
Yuqiang Xian <yuqiang.xian@intel.com>
Zaheer Ahmad <zahmad@codeaurora.org> Zaheer Ahmad <zahmad@codeaurora.org>

443
deps/v8/ChangeLog

@ -1,443 +1,3 @@
2011-07-04: Version 3.4.9
Added support for debugger inspection of locals in optimized frames
(issue 1140).
Fixed SConstruct to pass correct defines to samples/preparser when
building with library=shared.
Made date parser handle ES5 Date Time Strings correctly (issue 1498).
Fixed a bug in Object.defineProperty on the arguments object.
Performance improvements on all platforms.
2011-06-29: Version 3.4.8
Ensure 16-byte stack alignment on Solaris (issue 1505).
Fix "illegal access" when calling parseInt with a radix
that is not a smi. (issue 1246).
2011-06-27: Version 3.4.7
Fixed 64-bit build on FreeBSD.
Added API to set the property attributes for the prototype
property on functions created from FunctionTemplates.
Bugfixes and performance work.
2011-06-22: Version 3.4.6
Lowered limit on code space for systems with low memory supply.
Allowed compiling v8_shell with the 'host' toolset (issue 82437).
Extended setBreakpoint API to accept partial script name (issue 1418).
Made multi-line comments not count when deciding whether the '-->'
comment starter is first on a line. This matches Safari.
Made handling of non-array recievers in Array length setter correct
(issue 1491).
Added ability to heap profiler to iterate over snapshot's node
(issue 1481).
2011-06-20: Version 3.4.5
Fixed issues 794, 1097, 1215(partial), 1417, 1435, 1472, 1473,
1476, and 1477.
Improved code generation for !0 and !1.
Reduced memory usage for regular expressions with nested qualifiers.
(issue 1472)
Fixed V8 to count line terminators in multi-line comments.
(Chromium issue 86431)
Fixed disassembler=on option for release-mode builds. (issue 1473)
Performance improvements on all platforms.
2011-06-15: Version 3.4.4
Added snapshot compression support and --stress-opt flag to d8.
Improved performance of try/catch.
Several GYP-related changes: Added support for building Xcode project
files. Make the ARM simulator build with GYP again. Generate Makefiles
for all architectures on Linux.
Fixed Array.prototype.{reduce,reduceRight} to pass undefined as the
receiver for strict mode callbacks. (issue 1436)
Fixed a bug where an array load was incorrectly hoisted by GVN.
Handle 'undefined' correctly when === has been specialized for doubles.
(issue 1434)
Corrected the limit of local variables in an optimized function from 64
to 63.
Correctly set ReadOnly flag on indexed properties when using the API Set
method. (issue 1470)
Give the correct error message when Object.isExtensible is called on a
non-object. (issue 1452)
Added GetOwnPropertyNames method for Object in the API. Patch by Peter
Varga.
Do not redefine properties unneccesarily in seal and freeze. (issue
1447)
IsExecutionTerminating has an Isolate parameter now.
Distinguish keyed loads with a symbol key from fast elements loads,
avoiding some useless deoptimizations. (issue 1471)
2011-06-08: Version 3.4.3
Clear the global thread table when an isolate is disposed
(issue 1433).
Converted time zone name to UTF8 on Windows (issue 1290).
Limited the number of arguments in a function call to 32766
(issue 1413).
Compress sources of JS libraries in addition to the snapshot.
Fixed a bug in Lithium environment iteration.
Performance improvements on all platforms.
2011-06-06: Version 3.4.2
More work on ES-Harmony proxies. Still hidden behind a flag.
Fixed some crash bugs and improved performance.
Fixed building with gdb debugging support.
Do not install SIGPROF handler until it is needed.
Added DateTimeFormat to i18n API.
Fixed compilation on OpenBSD.
Take the ulimit into account when sizing the heap. OpenBSD users
may still have to increase the default ulimit to run heavy pages in
the browser.
2011-06-01: Version 3.4.1
Fixed JSON stringify issue with arrays.
Changed calls to JS builtins to be passed undefined when called with
implicit receiver.
Implemented the set trap for Harmony proxies. Proxies still need to
be enabled with the --harmony-proxies flag.
2011-05-30: Version 3.4.0
Changed calls to undefined property setters to not throw (issue 1355).
Made RegExp objects not callable.
Fixed issues on special case large JSON strings in new json parser
(issues http://crbug.com/83877 and http://crbug.com/84186).
Performance improvements on all platforms.
2011-05-25: Version 3.3.10
Fixed calls of strict mode function with an implicit receiver.
Fixed fast handling of arrays to properly deal with changes to the
Object prototype (issue 1403).
Changed strict mode poison pill to be the same type error function
(issue 1387).
Fixed a debug crash in arguments object handling (issue 1227).
Fixed a bug in deoptimization on x64 (issue 1404).
Performance improvements and bug fixes on all platforms.
2011-05-23: Version 3.3.9
Added DateTimeFormat class to experimental i18n API.
Extended preparser to give early errors for some strict mode
restrictions.
Removed legacy execScript function from V8.
Extended isolate API with the ability to add embedder-specific
data to an isolate.
Added basic support for polymorphic loads from JS and external
arrays.
Fixed bug in handling of switch statements in the optimizing
compiler.
2011-05-18: Version 3.3.8
Added MarkIndependent to the persistent handle API. Independent
handles are independent of all other persistent handles and can be
garbage collected more frequently.
Implemented the get trap for Harmony proxies. Proxies are enabled
with the --harmony-proxies flag.
Performance improvements and bug fixes on all platforms.
2011-05-16: Version 3.3.7
Updated MIPS infrastructure files.
Performance improvements and bug fixes on all platforms.
2011-05-11: Version 3.3.6
Updated MIPS infrastructure files.
Added method IsCallable for Object to the API.
Patch by Peter Varga.
2011-05-09: Version 3.3.5
Fixed build on FreeBSD. Patch by Akinori MUSHA.
Added check that receiver is JSObject on API calls.
Implemented CallAsConstructor method for Object in the API (Issue 1348).
Patch by Peter Varga.
Added CallAsFunction method to the Object class in the API (Issue 1336).
Patch by Peter Varga.
Added per-isolate locking and unlocking.
Fixed bug in x64 >>> operator (Issue 1359).
2011-05-04: Version 3.3.4
Implemented API to disallow code generation from strings for a context
(issue 1258).
Fixed bug with whitespaces in parseInt (issue 955).
Fixed bug with == comparison of Date objects (issue 1356).
Added GYP variables for ARM code generation:
v8_can_use_vfp_instructions, v8_can_use_unaligned_accesses
and v8_use_arm_eabi_hardfloat.
2011-05-02: Version 3.3.3
Added support for generating Visual Studio solution and project files
using GYP.
Implemented support for ARM EABI calling convention variation where
floating-point arguments are passed in registers (hardfloat).
Added Object::HasOwnProperty() to the API.
Added support for compressing startup data to reduce binary size. This
includes build time support and an API for the embedder to decompress
the startup data before initializing V8.
Reduced the profiling hooks overhead from >400% to 25% when using
ll_prof.
Performance improvements and bug fixes on all platforms.
2011-04-27: Version 3.3.2
Fixed crash bug on ARM with no VFP3 hardware.
Fixed compilation of V8 without debugger support.
Improved performance on JSLint.
Added support Float64 WebGL arrays.
Fixed crash bug in regexp replace.
2011-04-20: Version 3.3.1
Reduced V8 binary size by removing virtual functions from hydrogen.
Fixed crash bug on x64.
Performance improvements on ARM and IA32.
2011-04-18: Version 3.3.0
Fixed bug in floating point rounding in Crankshaft on ARM
(issue 958)
Fixed a number of issues with running without VFPv3 support on ARM
(issue 1315)
Introduced v8Locale.Collator, a partial implementation of Collator
per last ECMAScript meeting + mailing list.
Minor performance improvements and bug fixes.
2011-04-13: Version 3.2.10
Fixed bug in external float arrays on ARM (issue 1323).
Minor performance improvements and bug fixes.
2011-04-11: Version 3.2.9
Removed support for ABI prior to EABI on ARM.
Fixed multiple crash bugs.
Added GCMole to the repository, a simple static analysis tool that
searches for GC-unsafe evaluation order dependent callsites.
Made preparser API be exported in shared libraries.
Fixed multiple issues in EcmaScript 5 strict mode implementation.
Fixed mutable __proto__ property if object is not extensible
(Issue 1309).
Fixed auto suspension of the sampler thread.
2011-04-06: Version 3.2.8
Exposed WebGL typed array constructors in the shell sample.
Performance improvements on all platforms.
2011-04-04: Version 3.2.7
Disabled the original 'classic' V8 code generator. Crankshaft is
now the default on all platforms.
Changed the heap profiler to use more descriptive names.
Performance and stability improvements to isolates on all platforms.
2011-03-30: Version 3.2.6
Fixed xcode build warning in shell.cc (out of order initialization).
Fixed null-pointer dereference in the compiler when running without
SSE3 support (Chromium issue 77654).
Fixed x64 compilation error due to some dead code. (Issue 1286)
Introduced scons target to build the preparser stand-alone example.
Made FreeBSD build and pass all tests.
2011-03-28: Version 3.2.5
Fixed build with Irregexp interpreter (issue 1266).
Added Crankshaft support for external arrays.
Fixed two potential crash bugs.
2011-03-23: Version 3.2.4
Added isolates which allows several V8 instances in the same process.
This is controlled through the new Isolate class in the API.
Implemented more of EcmaScript 5 strict mode.
Reduced the time it takes to make detailed heap snapshot.
Added a number of commands to the ARM simulator and enhanced the ARM
disassembler.
2011-03-17: Version 3.2.3
Fixed a number of crash bugs.
Fixed Array::New(length) to return an array with a length (issue 1256).
Fixed FreeBSD build.
Changed __defineGetter__ to not throw (matching the behavior of Safari).
Implemented more of EcmaScript 5 strict mode.
Improved Crankshaft performance on all platforms.
2011-03-14: Version 3.2.2
Fixed a number of crash and correctness bugs.
Improved Crankshaft performance on all platforms.
Fixed Crankshaft on Solaris/Illumos.
2011-03-10: Version 3.2.1
Fixed a number of crash bugs.
Improved Crankshaft for x64 and ARM.
Implemented more of EcmaScript 5 strict mode.
2011-03-07: Version 3.2.0
Fixed a number of crash bugs.
Turned on Crankshaft by default on x64 and ARM.
Improved Crankshaft for x64 and ARM.
Implemented more of EcmaScript 5 strict mode.
2011-03-02: Version 3.1.8 2011-03-02: Version 3.1.8
Fixed a number of crash bugs. Fixed a number of crash bugs.
@ -3004,6 +2564,3 @@
Initial export. Initial export.
# Local Variables:
# mode:text
# End:

546
deps/v8/SConstruct

@ -1,4 +1,4 @@
# Copyright 2011 the V8 project authors. All rights reserved. # Copyright 2010 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without # Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are # modification, are permitted provided that the following conditions are
# met: # met:
@ -36,6 +36,13 @@ root_dir = dirname(File('SConstruct').rfile().abspath)
sys.path.insert(0, join(root_dir, 'tools')) sys.path.insert(0, join(root_dir, 'tools'))
import js2c, utils import js2c, utils
# ANDROID_TOP is the top of the Android checkout, fetched from the environment
# variable 'TOP'. You will also need to set the CXX, CC, AR and RANLIB
# environment variables to the cross-compiling tools.
ANDROID_TOP = os.environ.get('TOP')
if ANDROID_TOP is None:
ANDROID_TOP=""
# ARM_TARGET_LIB is the path to the dynamic library to use on the target # ARM_TARGET_LIB is the path to the dynamic library to use on the target
# machine if cross-compiling to an arm machine. You will also need to set # machine if cross-compiling to an arm machine. You will also need to set
# the additional cross-compiling environment variables to the cross compiler. # the additional cross-compiling environment variables to the cross compiler.
@ -51,6 +58,50 @@ else:
GCC_EXTRA_CCFLAGS = [] GCC_EXTRA_CCFLAGS = []
GCC_DTOA_EXTRA_CCFLAGS = [] GCC_DTOA_EXTRA_CCFLAGS = []
ANDROID_FLAGS = ['-march=armv7-a',
'-mtune=cortex-a8',
'-mfloat-abi=softfp',
'-mfpu=vfp',
'-fpic',
'-mthumb-interwork',
'-funwind-tables',
'-fstack-protector',
'-fno-short-enums',
'-fmessage-length=0',
'-finline-functions',
'-fno-inline-functions-called-once',
'-fgcse-after-reload',
'-frerun-cse-after-loop',
'-frename-registers',
'-fomit-frame-pointer',
'-finline-limit=64',
'-DCAN_USE_VFP_INSTRUCTIONS=1',
'-DCAN_USE_ARMV7_INSTRUCTIONS=1',
'-DCAN_USE_UNALIGNED_ACCESSES=1',
'-MD']
ANDROID_INCLUDES = [ANDROID_TOP + '/bionic/libc/arch-arm/include',
ANDROID_TOP + '/bionic/libc/include',
ANDROID_TOP + '/bionic/libstdc++/include',
ANDROID_TOP + '/bionic/libc/kernel/common',
ANDROID_TOP + '/bionic/libc/kernel/arch-arm',
ANDROID_TOP + '/bionic/libm/include',
ANDROID_TOP + '/bionic/libm/include/arch/arm',
ANDROID_TOP + '/bionic/libthread_db/include',
ANDROID_TOP + '/frameworks/base/include',
ANDROID_TOP + '/system/core/include']
ANDROID_LINKFLAGS = ['-nostdlib',
'-Bdynamic',
'-Wl,-T,' + ANDROID_TOP + '/build/core/armelf.x',
'-Wl,-dynamic-linker,/system/bin/linker',
'-Wl,--gc-sections',
'-Wl,-z,nocopyreloc',
'-Wl,-rpath-link=' + ANDROID_TOP + '/out/target/product/generic/obj/lib',
ANDROID_TOP + '/out/target/product/generic/obj/lib/crtbegin_dynamic.o',
ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork/libgcc.a',
ANDROID_TOP + '/out/target/product/generic/obj/lib/crtend_android.o'];
LIBRARY_FLAGS = { LIBRARY_FLAGS = {
'all': { 'all': {
'CPPPATH': [join(root_dir, 'src')], 'CPPPATH': [join(root_dir, 'src')],
@ -78,9 +129,6 @@ LIBRARY_FLAGS = {
'inspector:on': { 'inspector:on': {
'CPPDEFINES': ['INSPECTOR'], 'CPPDEFINES': ['INSPECTOR'],
}, },
'fasttls:on': {
'CPPDEFINES': ['V8_FAST_TLS'],
},
'liveobjectlist:on': { 'liveobjectlist:on': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT', 'INSPECTOR', 'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT', 'INSPECTOR',
'LIVE_OBJECT_LIST', 'OBJECT_PRINT'], 'LIVE_OBJECT_LIST', 'OBJECT_PRINT'],
@ -89,7 +137,7 @@ LIBRARY_FLAGS = {
'gcc': { 'gcc': {
'all': { 'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'], 'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['-fno-rtti', '-fno-exceptions'], 'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
}, },
'visibility:hidden': { 'visibility:hidden': {
# Use visibility=default to disable this. # Use visibility=default to disable this.
@ -101,10 +149,17 @@ LIBRARY_FLAGS = {
'mode:debug': { 'mode:debug': {
'CCFLAGS': ['-g', '-O0'], 'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG'], 'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG'],
'os:android': {
'CCFLAGS': ['-mthumb']
}
}, },
'mode:release': { 'mode:release': {
'CCFLAGS': ['-O3', '-fomit-frame-pointer', '-fdata-sections', 'CCFLAGS': ['-O3', '-fomit-frame-pointer', '-fdata-sections',
'-ffunction-sections'], '-ffunction-sections'],
'os:android': {
'CCFLAGS': ['-mthumb', '-Os'],
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
}, },
'os:linux': { 'os:linux': {
'CCFLAGS': ['-ansi'] + GCC_EXTRA_CCFLAGS, 'CCFLAGS': ['-ansi'] + GCC_EXTRA_CCFLAGS,
@ -123,7 +178,6 @@ LIBRARY_FLAGS = {
'CPPPATH' : ['/usr/local/include'], 'CPPPATH' : ['/usr/local/include'],
'LIBPATH' : ['/usr/local/lib'], 'LIBPATH' : ['/usr/local/lib'],
'CCFLAGS': ['-ansi'], 'CCFLAGS': ['-ansi'],
'LIBS': ['execinfo']
}, },
'os:openbsd': { 'os:openbsd': {
'CPPPATH' : ['/usr/local/include'], 'CPPPATH' : ['/usr/local/include'],
@ -142,6 +196,14 @@ LIBRARY_FLAGS = {
'CCFLAGS': ['-DWIN32'], 'CCFLAGS': ['-DWIN32'],
'CXXFLAGS': ['-DWIN32'], 'CXXFLAGS': ['-DWIN32'],
}, },
'os:android': {
'CPPDEFINES': ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
'__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
'CCFLAGS': ANDROID_FLAGS,
'WARNINGFLAGS': ['-Wall', '-Wno-unused', '-Werror=return-type',
'-Wstrict-aliasing=2'],
'CPPPATH': ANDROID_INCLUDES,
},
'arch:ia32': { 'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32'], 'CPPDEFINES': ['V8_TARGET_ARCH_IA32'],
'CCFLAGS': ['-m32'], 'CCFLAGS': ['-m32'],
@ -154,24 +216,6 @@ LIBRARY_FLAGS = {
}, },
'unalignedaccesses:off' : { 'unalignedaccesses:off' : {
'CPPDEFINES' : ['CAN_USE_UNALIGNED_ACCESSES=0'] 'CPPDEFINES' : ['CAN_USE_UNALIGNED_ACCESSES=0']
},
'armeabi:soft' : {
'CPPDEFINES' : ['USE_EABI_HARDFLOAT=0'],
'simulator:none': {
'CCFLAGS': ['-mfloat-abi=soft'],
}
},
'armeabi:softfp' : {
'CPPDEFINES' : ['USE_EABI_HARDFLOAT=0', 'CAN_USE_VFP_INSTRUCTIONS'],
'simulator:none': {
'CCFLAGS': ['-mfloat-abi=softfp'],
}
},
'armeabi:hard' : {
'CPPDEFINES' : ['USE_EABI_HARDFLOAT=1', 'CAN_USE_VFP_INSTRUCTIONS'],
'simulator:none': {
'CCFLAGS': ['-mfloat-abi=hard'],
}
} }
}, },
'simulator:arm': { 'simulator:arm': {
@ -180,40 +224,14 @@ LIBRARY_FLAGS = {
}, },
'arch:mips': { 'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'], 'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
'simulator:none': { 'simulator:none': {
'CCFLAGS': ['-EL'], 'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
'LINKFLAGS': ['-EL'], 'LDFLAGS': ['-EL']
'mips_arch_variant:mips32r2': {
'CCFLAGS': ['-mips32r2', '-Wa,-mips32r2']
},
'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32']
},
'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc']
},
'mipsabi:softfloat': {
'CCFLAGS': ['-msoft-float'],
'LINKFLAGS': ['-msoft-float']
},
'mipsabi:hardfloat': {
'CCFLAGS': ['-mhard-float'],
'LINKFLAGS': ['-mhard-float']
}
} }
}, },
'simulator:mips': { 'simulator:mips': {
'CCFLAGS': ['-m32'], 'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32'], 'LINKFLAGS': ['-m32'],
'mipsabi:softfloat': {
'CPPDEFINES': ['__mips_soft_float=1'],
},
'mipsabi:hardfloat': {
'CPPDEFINES': ['__mips_hard_float=1'],
}
}, },
'arch:x64': { 'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'], 'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
@ -222,15 +240,12 @@ LIBRARY_FLAGS = {
}, },
'gdbjit:on': { 'gdbjit:on': {
'CPPDEFINES': ['ENABLE_GDB_JIT_INTERFACE'] 'CPPDEFINES': ['ENABLE_GDB_JIT_INTERFACE']
},
'compress_startup_data:bz2': {
'CPPDEFINES': ['COMPRESS_STARTUP_DATA_BZ2']
} }
}, },
'msvc': { 'msvc': {
'all': { 'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'], 'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['/GR-', '/Gy'], 'CXXFLAGS': ['$CCFLAGS', '/GR-', '/Gy'],
'CPPDEFINES': ['WIN32'], 'CPPDEFINES': ['WIN32'],
'LINKFLAGS': ['/INCREMENTAL:NO', '/NXCOMPAT', '/IGNORE:4221'], 'LINKFLAGS': ['/INCREMENTAL:NO', '/NXCOMPAT', '/IGNORE:4221'],
'CCPDBFLAGS': ['/Zi'] 'CCPDBFLAGS': ['/Zi']
@ -291,7 +306,6 @@ V8_EXTRA_FLAGS = {
'gcc': { 'gcc': {
'all': { 'all': {
'WARNINGFLAGS': ['-Wall', 'WARNINGFLAGS': ['-Wall',
'-Werror',
'-W', '-W',
'-Wno-unused-parameter', '-Wno-unused-parameter',
'-Wnon-virtual-dtor'] '-Wnon-virtual-dtor']
@ -310,11 +324,6 @@ V8_EXTRA_FLAGS = {
'os:macos': { 'os:macos': {
'WARNINGFLAGS': ['-pedantic'] 'WARNINGFLAGS': ['-pedantic']
}, },
'arch:arm': {
# This is to silence warnings about ABI changes that some versions of the
# CodeSourcery G++ tool chain produce for each occurrence of varargs.
'WARNINGFLAGS': ['-Wno-abi']
},
'disassembler:on': { 'disassembler:on': {
'CPPDEFINES': ['ENABLE_DISASSEMBLER'] 'CPPDEFINES': ['ENABLE_DISASSEMBLER']
} }
@ -335,9 +344,6 @@ V8_EXTRA_FLAGS = {
}, },
'arch:mips': { 'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'], 'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
}, },
'disassembler:on': { 'disassembler:on': {
'CPPDEFINES': ['ENABLE_DISASSEMBLER'] 'CPPDEFINES': ['ENABLE_DISASSEMBLER']
@ -367,11 +373,6 @@ MKSNAPSHOT_EXTRA_FLAGS = {
'os:win32': { 'os:win32': {
'LIBS': ['winmm', 'ws2_32'], 'LIBS': ['winmm', 'ws2_32'],
}, },
'compress_startup_data:bz2': {
'os:linux': {
'LIBS': ['bz2']
}
},
}, },
'msvc': { 'msvc': {
'all': { 'all': {
@ -400,16 +401,10 @@ DTOA_EXTRA_FLAGS = {
CCTEST_EXTRA_FLAGS = { CCTEST_EXTRA_FLAGS = {
'all': { 'all': {
'CPPPATH': [join(root_dir, 'src')], 'CPPPATH': [join(root_dir, 'src')],
'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED']
},
}, },
'gcc': { 'gcc': {
'all': { 'all': {
'LIBPATH': [abspath('.')], 'LIBPATH': [abspath('.')]
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['-fno-rtti', '-fno-exceptions'],
'LINKFLAGS': ['$CCFLAGS'],
}, },
'os:linux': { 'os:linux': {
'LIBS': ['pthread'], 'LIBS': ['pthread'],
@ -430,6 +425,19 @@ CCTEST_EXTRA_FLAGS = {
'os:win32': { 'os:win32': {
'LIBS': ['winmm', 'ws2_32'] 'LIBS': ['winmm', 'ws2_32']
}, },
'os:android': {
'CPPDEFINES': ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
'__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
'CCFLAGS': ANDROID_FLAGS,
'CPPPATH': ANDROID_INCLUDES,
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib',
ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork'],
'LINKFLAGS': ANDROID_LINKFLAGS,
'LIBS': ['log', 'c', 'stdc++', 'm', 'gcc'],
'mode:release': {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
},
'arch:arm': { 'arch:arm': {
'LINKFLAGS': ARM_LINK_FLAGS 'LINKFLAGS': ARM_LINK_FLAGS
}, },
@ -439,6 +447,9 @@ CCTEST_EXTRA_FLAGS = {
'CPPDEFINES': ['_HAS_EXCEPTIONS=0'], 'CPPDEFINES': ['_HAS_EXCEPTIONS=0'],
'LIBS': ['winmm', 'ws2_32'] 'LIBS': ['winmm', 'ws2_32']
}, },
'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED']
},
'arch:ia32': { 'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32'] 'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
}, },
@ -453,16 +464,11 @@ CCTEST_EXTRA_FLAGS = {
SAMPLE_FLAGS = { SAMPLE_FLAGS = {
'all': { 'all': {
'CPPPATH': [join(abspath('.'), 'include')], 'CPPPATH': [join(abspath('.'), 'include')],
'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED']
},
}, },
'gcc': { 'gcc': {
'all': { 'all': {
'LIBPATH': ['.'], 'LIBPATH': ['.'],
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'], 'CCFLAGS': ['-fno-rtti', '-fno-exceptions']
'CXXFLAGS': ['-fno-rtti', '-fno-exceptions'],
'LINKFLAGS': ['$CCFLAGS'],
}, },
'os:linux': { 'os:linux': {
'LIBS': ['pthread'], 'LIBS': ['pthread'],
@ -475,9 +481,6 @@ SAMPLE_FLAGS = {
'LIBS': ['execinfo', 'pthread'] 'LIBS': ['execinfo', 'pthread']
}, },
'os:solaris': { 'os:solaris': {
# On Solaris, to get isinf, INFINITY, fpclassify and other macros one
# needs to define __C99FEATURES__.
'CPPDEFINES': ['__C99FEATURES__'],
'LIBPATH' : ['/usr/local/lib'], 'LIBPATH' : ['/usr/local/lib'],
'LIBS': ['m', 'pthread', 'socket', 'nsl', 'rt'], 'LIBS': ['m', 'pthread', 'socket', 'nsl', 'rt'],
'LINKFLAGS': ['-mt'] 'LINKFLAGS': ['-mt']
@ -489,26 +492,21 @@ SAMPLE_FLAGS = {
'os:win32': { 'os:win32': {
'LIBS': ['winmm', 'ws2_32'] 'LIBS': ['winmm', 'ws2_32']
}, },
'arch:arm': { 'os:android': {
'LINKFLAGS': ARM_LINK_FLAGS, 'CPPDEFINES': ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
'armeabi:soft' : { '__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
'CPPDEFINES' : ['USE_EABI_HARDFLOAT=0'], 'CCFLAGS': ANDROID_FLAGS,
'simulator:none': { 'CPPPATH': ANDROID_INCLUDES,
'CCFLAGS': ['-mfloat-abi=soft'], 'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib',
} ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork'],
}, 'LINKFLAGS': ANDROID_LINKFLAGS,
'armeabi:softfp' : { 'LIBS': ['log', 'c', 'stdc++', 'm', 'gcc'],
'CPPDEFINES' : ['USE_EABI_HARDFLOAT=0'], 'mode:release': {
'simulator:none': { 'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
'CCFLAGS': ['-mfloat-abi=softfp'],
} }
}, },
'armeabi:hard' : { 'arch:arm': {
'CPPDEFINES' : ['USE_EABI_HARDFLOAT=1', 'CAN_USE_VFP_INSTRUCTIONS'], 'LINKFLAGS': ARM_LINK_FLAGS
'simulator:none': {
'CCFLAGS': ['-mfloat-abi=hard'],
}
}
}, },
'arch:ia32': { 'arch:ia32': {
'CCFLAGS': ['-m32'], 'CCFLAGS': ['-m32'],
@ -520,29 +518,10 @@ SAMPLE_FLAGS = {
}, },
'arch:mips': { 'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'], 'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
'simulator:none': { 'simulator:none': {
'CCFLAGS': ['-EL'], 'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
'LINKFLAGS': ['-EL'], 'LINKFLAGS': ['-EL'],
'mips_arch_variant:mips32r2': { 'LDFLAGS': ['-EL']
'CCFLAGS': ['-mips32r2', '-Wa,-mips32r2']
},
'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32']
},
'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc']
},
'mipsabi:softfloat': {
'CCFLAGS': ['-msoft-float'],
'LINKFLAGS': ['-msoft-float']
},
'mipsabi:hardfloat': {
'CCFLAGS': ['-mhard-float'],
'LINKFLAGS': ['-mhard-float']
}
} }
}, },
'simulator:arm': { 'simulator:arm': {
@ -560,12 +539,6 @@ SAMPLE_FLAGS = {
'CCFLAGS': ['-g', '-O0'], 'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['DEBUG'] 'CPPDEFINES': ['DEBUG']
}, },
'compress_startup_data:bz2': {
'CPPDEFINES': ['COMPRESS_STARTUP_DATA_BZ2'],
'os:linux': {
'LIBS': ['bz2']
}
},
}, },
'msvc': { 'msvc': {
'all': { 'all': {
@ -578,161 +551,9 @@ SAMPLE_FLAGS = {
'verbose:on': { 'verbose:on': {
'LINKFLAGS': ['/VERBOSE'] 'LINKFLAGS': ['/VERBOSE']
}, },
'prof:on': {
'LINKFLAGS': ['/MAP']
},
'mode:release': {
'CCFLAGS': ['/O2'],
'LINKFLAGS': ['/OPT:REF', '/OPT:ICF'],
'msvcrt:static': {
'CCFLAGS': ['/MT']
},
'msvcrt:shared': {
'CCFLAGS': ['/MD']
},
'msvcltcg:on': {
'CCFLAGS': ['/GL'],
'pgo:off': {
'LINKFLAGS': ['/LTCG'],
},
},
'pgo:instrument': {
'LINKFLAGS': ['/LTCG:PGI']
},
'pgo:optimize': {
'LINKFLAGS': ['/LTCG:PGO']
}
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32', 'WIN32'],
'LINKFLAGS': ['/MACHINE:X86']
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64', 'WIN32'],
'LINKFLAGS': ['/MACHINE:X64', '/STACK:2091752']
},
'mode:debug': {
'CCFLAGS': ['/Od'],
'LINKFLAGS': ['/DEBUG'],
'CPPDEFINES': ['DEBUG'],
'msvcrt:static': {
'CCFLAGS': ['/MTd']
},
'msvcrt:shared': {
'CCFLAGS': ['/MDd']
}
}
}
}
PREPARSER_FLAGS = {
'all': {
'CPPPATH': [join(abspath('.'), 'include'), join(abspath('.'), 'src')],
'library:shared': { 'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED'] 'CPPDEFINES': ['USING_V8_SHARED']
}, },
},
'gcc': {
'all': {
'LIBPATH': ['.'],
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['-fno-rtti', '-fno-exceptions'],
'LINKFLAGS': ['$CCFLAGS'],
},
'os:win32': {
'LIBS': ['winmm', 'ws2_32']
},
'arch:arm': {
'LINKFLAGS': ARM_LINK_FLAGS,
'armeabi:soft' : {
'CPPDEFINES' : ['USE_EABI_HARDFLOAT=0'],
'simulator:none': {
'CCFLAGS': ['-mfloat-abi=soft'],
}
},
'armeabi:softfp' : {
'simulator:none': {
'CCFLAGS': ['-mfloat-abi=softfp'],
}
},
'armeabi:hard' : {
'simulator:none': {
'CCFLAGS': ['-mfloat-abi=hard'],
}
}
},
'arch:ia32': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'arch:x64': {
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64']
},
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
'simulator:none': {
'CCFLAGS': ['-EL'],
'LINKFLAGS': ['-EL'],
'mips_arch_variant:mips32r2': {
'CCFLAGS': ['-mips32r2', '-Wa,-mips32r2']
},
'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32']
},
'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc']
},
'mipsabi:softfloat': {
'CCFLAGS': ['-msoft-float'],
'LINKFLAGS': ['-msoft-float']
},
'mipsabi:hardfloat': {
'CCFLAGS': ['-mhard-float'],
'LINKFLAGS': ['-mhard-float']
}
}
},
'simulator:arm': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'simulator:mips': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32'],
'mipsabi:softfloat': {
'CPPDEFINES': ['__mips_soft_float=1'],
},
'mipsabi:hardfloat': {
'CPPDEFINES': ['__mips_hard_float=1'],
}
},
'mode:release': {
'CCFLAGS': ['-O2']
},
'mode:debug': {
'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['DEBUG']
},
'os:freebsd': {
'LIBPATH' : ['/usr/local/lib'],
},
},
'msvc': {
'all': {
'LIBS': ['winmm', 'ws2_32']
},
'verbose:off': {
'CCFLAGS': ['/nologo'],
'LINKFLAGS': ['/NOLOGO']
},
'verbose:on': {
'LINKFLAGS': ['/VERBOSE']
},
'prof:on': { 'prof:on': {
'LINKFLAGS': ['/MAP'] 'LINKFLAGS': ['/MAP']
}, },
@ -759,11 +580,11 @@ PREPARSER_FLAGS = {
} }
}, },
'arch:ia32': { 'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32', 'WIN32'], 'CPPDEFINES': ['V8_TARGET_ARCH_IA32'],
'LINKFLAGS': ['/MACHINE:X86'] 'LINKFLAGS': ['/MACHINE:X86']
}, },
'arch:x64': { 'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64', 'WIN32'], 'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'LINKFLAGS': ['/MACHINE:X64', '/STACK:2091752'] 'LINKFLAGS': ['/MACHINE:X64', '/STACK:2091752']
}, },
'mode:debug': { 'mode:debug': {
@ -783,11 +604,6 @@ PREPARSER_FLAGS = {
D8_FLAGS = { D8_FLAGS = {
'gcc': { 'gcc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['-fno-rtti', '-fno-exceptions'],
'LINKFLAGS': ['$CCFLAGS'],
},
'console:readline': { 'console:readline': {
'LIBS': ['readline'] 'LIBS': ['readline']
}, },
@ -807,18 +623,18 @@ D8_FLAGS = {
'os:openbsd': { 'os:openbsd': {
'LIBS': ['pthread'], 'LIBS': ['pthread'],
}, },
'os:android': {
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib',
ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork'],
'LINKFLAGS': ANDROID_LINKFLAGS,
'LIBS': ['log', 'c', 'stdc++', 'm', 'gcc'],
},
'os:win32': { 'os:win32': {
'LIBS': ['winmm', 'ws2_32'], 'LIBS': ['winmm', 'ws2_32'],
}, },
'arch:arm': { 'arch:arm': {
'LINKFLAGS': ARM_LINK_FLAGS 'LINKFLAGS': ARM_LINK_FLAGS
}, },
'compress_startup_data:bz2': {
'CPPDEFINES': ['COMPRESS_STARTUP_DATA_BZ2'],
'os:linux': {
'LIBS': ['bz2']
}
}
}, },
'msvc': { 'msvc': {
'all': { 'all': {
@ -870,14 +686,12 @@ def GuessVisibility(env):
def GuessStrictAliasing(env): def GuessStrictAliasing(env):
# There seems to be a problem with gcc 4.5.x. # There seems to be a problem with gcc 4.5.x
# See http://code.google.com/p/v8/issues/detail?id=884 # see http://code.google.com/p/v8/issues/detail?id=884
# It can be worked around by disabling strict aliasing. # it can be worked around by disabling strict aliasing
toolchain = env['toolchain']; toolchain = env['toolchain'];
if toolchain == 'gcc': if toolchain == 'gcc':
env = Environment(tools=['gcc']) env = Environment(tools=['gcc'])
# The gcc version should be available in env['CCVERSION'],
# but when scons detects msvc this value is not set.
version = subprocess.Popen([env['CC'], '-dumpversion'], version = subprocess.Popen([env['CC'], '-dumpversion'],
stdout=subprocess.PIPE).communicate()[0] stdout=subprocess.PIPE).communicate()[0]
if version.find('4.5') == 0: if version.find('4.5') == 0:
@ -885,25 +699,22 @@ def GuessStrictAliasing(env):
return 'default' return 'default'
PLATFORM_OPTIONS = { SIMPLE_OPTIONS = {
'arch': { 'toolchain': {
'values': ['arm', 'ia32', 'x64', 'mips'], 'values': ['gcc', 'msvc'],
'guess': GuessArch, 'guess': GuessToolchain,
'help': 'the architecture to build for' 'help': 'the toolchain to use'
}, },
'os': { 'os': {
'values': ['freebsd', 'linux', 'macos', 'win32', 'openbsd', 'solaris', 'cygwin'], 'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'],
'guess': GuessOS, 'guess': GuessOS,
'help': 'the os to build for' 'help': 'the os to build for'
}, },
'toolchain': { 'arch': {
'values': ['gcc', 'msvc'], 'values':['arm', 'ia32', 'x64', 'mips'],
'guess': GuessToolchain, 'guess': GuessArch,
'help': 'the toolchain to use' 'help': 'the architecture to build for'
} },
}
SIMPLE_OPTIONS = {
'regexp': { 'regexp': {
'values': ['native', 'interpreted'], 'values': ['native', 'interpreted'],
'default': 'native', 'default': 'native',
@ -994,12 +805,6 @@ SIMPLE_OPTIONS = {
'default': 'off', 'default': 'off',
'help': 'enable the disassembler to inspect generated code' 'help': 'enable the disassembler to inspect generated code'
}, },
'fasttls': {
'values': ['on', 'off'],
'default': 'on',
'help': 'enable fast thread local storage support '
'(if available on the current architecture/platform)'
},
'sourcesignatures': { 'sourcesignatures': {
'values': ['MD5', 'timestamp'], 'values': ['MD5', 'timestamp'],
'default': 'MD5', 'default': 'MD5',
@ -1018,55 +823,41 @@ SIMPLE_OPTIONS = {
'visibility': { 'visibility': {
'values': ['default', 'hidden'], 'values': ['default', 'hidden'],
'guess': GuessVisibility, 'guess': GuessVisibility,
'depends': ['os', 'toolchain'],
'help': 'shared library symbol visibility' 'help': 'shared library symbol visibility'
}, },
'strictaliasing': { 'strictaliasing': {
'values': ['default', 'off'], 'values': ['default', 'off'],
'guess': GuessStrictAliasing, 'guess': GuessStrictAliasing,
'depends': ['toolchain'],
'help': 'assume strict aliasing while optimizing' 'help': 'assume strict aliasing while optimizing'
}, },
'pgo': { 'pgo': {
'values': ['off', 'instrument', 'optimize'], 'values': ['off', 'instrument', 'optimize'],
'default': 'off', 'default': 'off',
'help': 'select profile guided optimization variant', 'help': 'select profile guided optimization variant',
},
'armeabi': {
'values': ['hard', 'softfp', 'soft'],
'default': 'softfp',
'help': 'generate calling conventiont according to selected ARM EABI variant'
},
'mipsabi': {
'values': ['hardfloat', 'softfloat', 'none'],
'default': 'hardfloat',
'help': 'generate calling conventiont according to selected mips ABI'
},
'mips_arch_variant': {
'values': ['mips32r2', 'mips32r1'],
'default': 'mips32r2',
'help': 'mips variant'
},
'compress_startup_data': {
'values': ['off', 'bz2'],
'default': 'off',
'help': 'compress startup data (snapshot) [Linux only]'
},
} }
}
ALL_OPTIONS = dict(PLATFORM_OPTIONS, **SIMPLE_OPTIONS)
def AddOptions(options, result): def AddOption(result, name, option):
guess_env = Environment(options=result)
for (name, option) in options.iteritems():
if 'guess' in option: if 'guess' in option:
# Option has a guess function # Option has a guess function
guess = option.get('guess') guess = option.get('guess')
guess_env = Environment(options=result)
# Check if all options that the guess function depends on are set
if 'depends' in option:
for dependency in option.get('depends'):
if not dependency in guess_env:
return False
default = guess(guess_env) default = guess(guess_env)
else: else:
# Option has a fixed default # Option has a fixed default
default = option.get('default') default = option.get('default')
help = '%s (%s)' % (option.get('help'), ", ".join(option['values'])) help = '%s (%s)' % (option.get('help'), ", ".join(option['values']))
result.Add(name, help, default) result.Add(name, help, default)
return True
def GetOptions(): def GetOptions():
@ -1076,8 +867,13 @@ def GetOptions():
result.Add('cache', 'directory to use for scons build cache', '') result.Add('cache', 'directory to use for scons build cache', '')
result.Add('env', 'override environment settings (NAME0:value0,NAME1:value1,...)', '') result.Add('env', 'override environment settings (NAME0:value0,NAME1:value1,...)', '')
result.Add('importenv', 'import environment settings (NAME0,NAME1,...)', '') result.Add('importenv', 'import environment settings (NAME0,NAME1,...)', '')
AddOptions(PLATFORM_OPTIONS, result) options = SIMPLE_OPTIONS
AddOptions(SIMPLE_OPTIONS, result) while len(options):
postpone = {}
for (name, option) in options.iteritems():
if not AddOption(result, name, option):
postpone[name] = option
options = postpone
return result return result
@ -1158,8 +954,8 @@ def VerifyOptions(env):
return False return False
if env['os'] == 'win32' and env['library'] == 'shared' and env['prof'] == 'on': if env['os'] == 'win32' and env['library'] == 'shared' and env['prof'] == 'on':
Abort("Profiling on windows only supported for static library.") Abort("Profiling on windows only supported for static library.")
if env['gdbjit'] == 'on' and ((env['os'] != 'linux' and env['os'] != 'macos') or (env['arch'] != 'ia32' and env['arch'] != 'x64' and env['arch'] != 'arm')): if env['gdbjit'] == 'on' and (env['os'] != 'linux' or (env['arch'] != 'ia32' and env['arch'] != 'x64' and env['arch'] != 'arm')):
Abort("GDBJIT interface is supported only for Intel-compatible (ia32 or x64) Linux/OSX target.") Abort("GDBJIT interface is supported only for Intel-compatible (ia32 or x64) Linux target.")
if env['os'] == 'win32' and env['soname'] == 'on': if env['os'] == 'win32' and env['soname'] == 'on':
Abort("Shared Object soname not applicable for Windows.") Abort("Shared Object soname not applicable for Windows.")
if env['soname'] == 'on' and env['library'] == 'static': if env['soname'] == 'on' and env['library'] == 'static':
@ -1172,9 +968,7 @@ def VerifyOptions(env):
print env['arch'] print env['arch']
print env['simulator'] print env['simulator']
Abort("Option unalignedaccesses only supported for the ARM architecture.") Abort("Option unalignedaccesses only supported for the ARM architecture.")
if env['os'] != 'linux' and env['compress_startup_data'] != 'off': for (name, option) in SIMPLE_OPTIONS.iteritems():
Abort("Startup data compression is only available on Linux")
for (name, option) in ALL_OPTIONS.iteritems():
if (not name in env): if (not name in env):
message = ("A value for option %s must be specified (%s)." % message = ("A value for option %s must be specified (%s)." %
(name, ", ".join(option['values']))) (name, ", ".join(option['values'])))
@ -1196,7 +990,6 @@ class BuildContext(object):
self.options = options self.options = options
self.env_overrides = env_overrides self.env_overrides = env_overrides
self.samples = samples self.samples = samples
self.preparser_targets = []
self.use_snapshot = (options['snapshot'] != 'off') self.use_snapshot = (options['snapshot'] != 'off')
self.build_snapshot = (options['snapshot'] == 'on') self.build_snapshot = (options['snapshot'] == 'on')
self.flags = None self.flags = None
@ -1275,8 +1068,11 @@ def PostprocessOptions(options, os):
if 'msvcltcg' in ARGUMENTS: if 'msvcltcg' in ARGUMENTS:
print "Warning: forcing msvcltcg on as it is required for pgo (%s)" % options['pgo'] print "Warning: forcing msvcltcg on as it is required for pgo (%s)" % options['pgo']
options['msvcltcg'] = 'on' options['msvcltcg'] = 'on'
if (options['mipsabi'] != 'none') and (options['arch'] != 'mips') and (options['simulator'] != 'mips'): if options['arch'] == 'mips':
options['mipsabi'] = 'none' if ('regexp' in ARGUMENTS) and options['regexp'] == 'native':
# Print a warning if native regexp is specified for mips
print "Warning: forcing regexp to interpreted for mips"
options['regexp'] = 'interpreted'
if options['liveobjectlist'] == 'on': if options['liveobjectlist'] == 'on':
if (options['debuggersupport'] != 'on') or (options['mode'] == 'release'): if (options['debuggersupport'] != 'on') or (options['mode'] == 'release'):
# Print a warning that liveobjectlist will implicitly enable the debugger # Print a warning that liveobjectlist will implicitly enable the debugger
@ -1303,7 +1099,7 @@ def ParseEnvOverrides(arg, imports):
def BuildSpecific(env, mode, env_overrides, tools): def BuildSpecific(env, mode, env_overrides, tools):
options = {'mode': mode} options = {'mode': mode}
for option in ALL_OPTIONS: for option in SIMPLE_OPTIONS:
options[option] = env[option] options[option] = env[option]
PostprocessOptions(options, env['os']) PostprocessOptions(options, env['os'])
@ -1323,7 +1119,6 @@ def BuildSpecific(env, mode, env_overrides, tools):
dtoa_flags = context.AddRelevantFlags(library_flags, DTOA_EXTRA_FLAGS) dtoa_flags = context.AddRelevantFlags(library_flags, DTOA_EXTRA_FLAGS)
cctest_flags = context.AddRelevantFlags(v8_flags, CCTEST_EXTRA_FLAGS) cctest_flags = context.AddRelevantFlags(v8_flags, CCTEST_EXTRA_FLAGS)
sample_flags = context.AddRelevantFlags(user_environ, SAMPLE_FLAGS) sample_flags = context.AddRelevantFlags(user_environ, SAMPLE_FLAGS)
preparser_flags = context.AddRelevantFlags(user_environ, PREPARSER_FLAGS)
d8_flags = context.AddRelevantFlags(library_flags, D8_FLAGS) d8_flags = context.AddRelevantFlags(library_flags, D8_FLAGS)
context.flags = { context.flags = {
@ -1332,15 +1127,13 @@ def BuildSpecific(env, mode, env_overrides, tools):
'dtoa': dtoa_flags, 'dtoa': dtoa_flags,
'cctest': cctest_flags, 'cctest': cctest_flags,
'sample': sample_flags, 'sample': sample_flags,
'd8': d8_flags, 'd8': d8_flags
'preparser': preparser_flags
} }
# Generate library base name. # Generate library base name.
target_id = mode target_id = mode
suffix = SUFFIXES[target_id] suffix = SUFFIXES[target_id]
library_name = 'v8' + suffix library_name = 'v8' + suffix
preparser_library_name = 'v8preparser' + suffix
version = GetVersion() version = GetVersion()
if context.options['soname'] == 'on': if context.options['soname'] == 'on':
# When building shared object with SONAME version the library name. # When building shared object with SONAME version the library name.
@ -1354,7 +1147,7 @@ def BuildSpecific(env, mode, env_overrides, tools):
env['SONAME'] = soname env['SONAME'] = soname
# Build the object files by invoking SCons recursively. # Build the object files by invoking SCons recursively.
(object_files, shell_files, mksnapshot, preparser_files) = env.SConscript( (object_files, shell_files, mksnapshot) = env.SConscript(
join('src', 'SConscript'), join('src', 'SConscript'),
build_dir=join('obj', target_id), build_dir=join('obj', target_id),
exports='context tools', exports='context tools',
@ -1369,22 +1162,13 @@ def BuildSpecific(env, mode, env_overrides, tools):
context.ApplyEnvOverrides(env) context.ApplyEnvOverrides(env)
if context.options['library'] == 'static': if context.options['library'] == 'static':
library = env.StaticLibrary(library_name, object_files) library = env.StaticLibrary(library_name, object_files)
preparser_library = env.StaticLibrary(preparser_library_name,
preparser_files)
else: else:
# There seems to be a glitch in the way scons decides where to put # There seems to be a glitch in the way scons decides where to put
# PDB files when compiling using MSVC so we specify it manually. # PDB files when compiling using MSVC so we specify it manually.
# This should not affect any other platforms. # This should not affect any other platforms.
pdb_name = library_name + '.dll.pdb' pdb_name = library_name + '.dll.pdb'
library = env.SharedLibrary(library_name, object_files, PDB=pdb_name) library = env.SharedLibrary(library_name, object_files, PDB=pdb_name)
preparser_pdb_name = preparser_library_name + '.dll.pdb';
preparser_soname = 'lib' + preparser_library_name + '.so';
preparser_library = env.SharedLibrary(preparser_library_name,
preparser_files,
PDB=preparser_pdb_name,
SONAME=preparser_soname)
context.library_targets.append(library) context.library_targets.append(library)
context.library_targets.append(preparser_library)
d8_env = Environment(tools=tools) d8_env = Environment(tools=tools)
d8_env.Replace(**context.flags['d8']) d8_env.Replace(**context.flags['d8'])
@ -1418,21 +1202,6 @@ def BuildSpecific(env, mode, env_overrides, tools):
) )
context.cctest_targets.append(cctest_program) context.cctest_targets.append(cctest_program)
preparser_env = env.Copy()
preparser_env.Replace(**context.flags['preparser'])
preparser_env.Prepend(LIBS=[preparser_library_name])
context.ApplyEnvOverrides(preparser_env)
preparser_object = preparser_env.SConscript(
join('preparser', 'SConscript'),
build_dir=join('obj', 'preparser', target_id),
exports='context',
duplicate=False
)
preparser_name = join('obj', 'preparser', target_id, 'preparser')
preparser_program = preparser_env.Program(preparser_name, preparser_object);
preparser_env.Depends(preparser_program, preparser_library)
context.preparser_targets.append(preparser_program)
return context return context
@ -1451,7 +1220,6 @@ def Build():
mksnapshots = [] mksnapshots = []
cctests = [] cctests = []
samples = [] samples = []
preparsers = []
d8s = [] d8s = []
modes = SplitList(env['mode']) modes = SplitList(env['mode'])
for mode in modes: for mode in modes:
@ -1460,7 +1228,6 @@ def Build():
mksnapshots += context.mksnapshot_targets mksnapshots += context.mksnapshot_targets
cctests += context.cctest_targets cctests += context.cctest_targets
samples += context.sample_targets samples += context.sample_targets
preparsers += context.preparser_targets
d8s += context.d8_targets d8s += context.d8_targets
env.Alias('library', libraries) env.Alias('library', libraries)
@ -1468,7 +1235,6 @@ def Build():
env.Alias('cctests', cctests) env.Alias('cctests', cctests)
env.Alias('sample', samples) env.Alias('sample', samples)
env.Alias('d8', d8s) env.Alias('d8', d8s)
env.Alias('preparser', preparsers)
if env['sample']: if env['sample']:
env.Default('sample') env.Default('sample')

45
deps/v8/include/v8-debug.h

@ -127,7 +127,7 @@ class EXPORT Debug {
/** /**
* Get the context active when the debug event happened. Note this is not * Get the context active when the debug event happened. Note this is not
* the current active context as the JavaScript part of the debugger is * the current active context as the JavaScript part of the debugger is
* running in its own context which is entered at this point. * running in it's own context which is entered at this point.
*/ */
virtual Handle<Context> GetEventContext() const = 0; virtual Handle<Context> GetEventContext() const = 0;
@ -164,13 +164,12 @@ class EXPORT Debug {
/** /**
* Get the context active when the debug event happened. Note this is not * Get the context active when the debug event happened. Note this is not
* the current active context as the JavaScript part of the debugger is * the current active context as the JavaScript part of the debugger is
* running in its own context which is entered at this point. * running in it's own context which is entered at this point.
*/ */
virtual Handle<Context> GetEventContext() const = 0; virtual Handle<Context> GetEventContext() const = 0;
/** /**
* Client data passed with the corresponding callback when it was * Client data passed with the corresponding callbak whet it was registered.
* registered.
*/ */
virtual Handle<Value> GetCallbackData() const = 0; virtual Handle<Value> GetCallbackData() const = 0;
@ -228,7 +227,7 @@ class EXPORT Debug {
* Debug message callback function. * Debug message callback function.
* *
* \param message the debug message handler message object * \param message the debug message handler message object
*
* A MessageHandler does not take possession of the message data, * A MessageHandler does not take possession of the message data,
* and must not rely on the data persisting after the handler returns. * and must not rely on the data persisting after the handler returns.
*/ */
@ -254,35 +253,25 @@ class EXPORT Debug {
static bool SetDebugEventListener(v8::Handle<v8::Object> that, static bool SetDebugEventListener(v8::Handle<v8::Object> that,
Handle<Value> data = Handle<Value>()); Handle<Value> data = Handle<Value>());
// Schedule a debugger break to happen when JavaScript code is run // Schedule a debugger break to happen when JavaScript code is run.
// in the given isolate. If no isolate is provided the default static void DebugBreak();
// isolate is used.
static void DebugBreak(Isolate* isolate = NULL);
// Remove scheduled debugger break in given isolate if it has not // Remove scheduled debugger break if it has not happened yet.
// happened yet. If no isolate is provided the default isolate is static void CancelDebugBreak();
// used.
static void CancelDebugBreak(Isolate* isolate = NULL);
// Break execution of JavaScript in the given isolate (this method // Break execution of JavaScript (this method can be invoked from a
// can be invoked from a non-VM thread) for further client command // non-VM thread) for further client command execution on a VM
// execution on a VM thread. Client data is then passed in // thread. Client data is then passed in EventDetails to
// EventDetails to EventCallback at the moment when the VM actually // EventCallback at the moment when the VM actually stops.
// stops. If no isolate is provided the default isolate is used. static void DebugBreakForCommand(ClientData* data = NULL);
static void DebugBreakForCommand(ClientData* data = NULL,
Isolate* isolate = NULL);
// Message based interface. The message protocol is JSON. NOTE the message // Message based interface. The message protocol is JSON. NOTE the message
// handler thread is not supported any more parameter must be false. // handler thread is not supported any more parameter must be false.
static void SetMessageHandler(MessageHandler handler, static void SetMessageHandler(MessageHandler handler,
bool message_handler_thread = false); bool message_handler_thread = false);
static void SetMessageHandler2(MessageHandler2 handler); static void SetMessageHandler2(MessageHandler2 handler);
// If no isolate is provided the default isolate is
// used.
static void SendCommand(const uint16_t* command, int length, static void SendCommand(const uint16_t* command, int length,
ClientData* client_data = NULL, ClientData* client_data = NULL);
Isolate* isolate = NULL);
// Dispatch interface. // Dispatch interface.
static void SetHostDispatchHandler(HostDispatchHandler handler, static void SetHostDispatchHandler(HostDispatchHandler handler,
@ -311,7 +300,7 @@ class EXPORT Debug {
* get access to information otherwise not available during normal JavaScript * get access to information otherwise not available during normal JavaScript
* execution e.g. details on stack frames. Receiver of the function call will * execution e.g. details on stack frames. Receiver of the function call will
* be the debugger context global object, however this is a subject to change. * be the debugger context global object, however this is a subject to change.
* The following example shows a JavaScript function which when passed to * The following example show a JavaScript function which when passed to
* v8::Debug::Call will return the current line of JavaScript execution. * v8::Debug::Call will return the current line of JavaScript execution.
* *
* \code * \code
@ -353,7 +342,7 @@ class EXPORT Debug {
* 2. V8 is suspended on debug breakpoint; in this state V8 is dedicated * 2. V8 is suspended on debug breakpoint; in this state V8 is dedicated
* to reading and processing debug messages; * to reading and processing debug messages;
* 3. V8 is not running at all or has called some long-working C++ function; * 3. V8 is not running at all or has called some long-working C++ function;
* by default it means that processing of all debug messages will be deferred * by default it means that processing of all debug message will be deferred
* until V8 gets control again; however, embedding application may improve * until V8 gets control again; however, embedding application may improve
* this by manually calling this method. * this by manually calling this method.
* *
@ -377,7 +366,7 @@ class EXPORT Debug {
static void ProcessDebugMessages(); static void ProcessDebugMessages();
/** /**
* Debugger is running in its own context which is entered while debugger * Debugger is running in it's own context which is entered while debugger
* messages are being dispatched. This is an explicit getter for this * messages are being dispatched. This is an explicit getter for this
* debugger context. Note that the content of the debugger context is subject * debugger context. Note that the content of the debugger context is subject
* to change. * to change.

13
deps/v8/include/v8-preparser.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -66,18 +66,17 @@
namespace v8 { namespace v8 {
// The result of preparsing is either a stack overflow error, or an opaque
// blob of data that can be passed back into the parser. class PreParserData {
class V8EXPORT PreParserData {
public: public:
PreParserData(size_t size, const uint8_t* data) PreParserData(size_t size, const uint8_t* data)
: data_(data), size_(size) { } : data_(data), size_(size) { }
// Create a PreParserData value where stack_overflow reports true. // Create a PreParserData value where stack_overflow reports true.
static PreParserData StackOverflow() { return PreParserData(0, NULL); } static PreParserData StackOverflow() { return PreParserData(NULL, 0); }
// Whether the pre-parser stopped due to a stack overflow. // Whether the pre-parser stopped due to a stack overflow.
// If this is the case, size() and data() should not be used. // If this is the case, size() and data() should not be used.
bool stack_overflow() { return size_ == 0u; } bool stack_overflow() { return size_ == 0u; }
// The size of the data in bytes. // The size of the data in bytes.
@ -93,7 +92,7 @@ class V8EXPORT PreParserData {
// Interface for a stream of Unicode characters. // Interface for a stream of Unicode characters.
class V8EXPORT UnicodeInputStream { // NOLINT - Thinks V8EXPORT is class name. class UnicodeInputStream {
public: public:
virtual ~UnicodeInputStream(); virtual ~UnicodeInputStream();

174
deps/v8/include/v8-profiler.h

@ -131,16 +131,6 @@ class V8EXPORT CpuProfile {
/** Returns the root node of the top down call tree. */ /** Returns the root node of the top down call tree. */
const CpuProfileNode* GetTopDownRoot() const; const CpuProfileNode* GetTopDownRoot() const;
/**
* Deletes the profile and removes it from CpuProfiler's list.
* All pointers to nodes previously returned become invalid.
* Profiles with the same uid but obtained using different
* security token are not deleted, but become inaccessible
* using FindProfile method. It is embedder's responsibility
* to call Delete on these profiles.
*/
void Delete();
}; };
@ -191,13 +181,6 @@ class V8EXPORT CpuProfiler {
static const CpuProfile* StopProfiling( static const CpuProfile* StopProfiling(
Handle<String> title, Handle<String> title,
Handle<Value> security_token = Handle<Value>()); Handle<Value> security_token = Handle<Value>());
/**
* Deletes all existing profiles, also cancelling all profiling
* activity. All previously returned pointers to profiles and their
* contents become invalid after this call.
*/
static void DeleteAllProfiles();
}; };
@ -206,7 +189,7 @@ class HeapGraphNode;
/** /**
* HeapSnapshotEdge represents a directed connection between heap * HeapSnapshotEdge represents a directed connection between heap
* graph nodes: from retainers to retained nodes. * graph nodes: from retaners to retained nodes.
*/ */
class V8EXPORT HeapGraphEdge { class V8EXPORT HeapGraphEdge {
public: public:
@ -240,6 +223,22 @@ class V8EXPORT HeapGraphEdge {
}; };
class V8EXPORT HeapGraphPath {
public:
/** Returns the number of edges in the path. */
int GetEdgesCount() const;
/** Returns an edge from the path. */
const HeapGraphEdge* GetEdge(int index) const;
/** Returns origin node. */
const HeapGraphNode* GetFromNode() const;
/** Returns destination node. */
const HeapGraphNode* GetToNode() const;
};
/** /**
* HeapGraphNode represents a node in a heap graph. * HeapGraphNode represents a node in a heap graph.
*/ */
@ -253,8 +252,7 @@ class V8EXPORT HeapGraphNode {
kCode = 4, // Compiled code. kCode = 4, // Compiled code.
kClosure = 5, // Function closure. kClosure = 5, // Function closure.
kRegExp = 6, // RegExp. kRegExp = 6, // RegExp.
kHeapNumber = 7, // Number stored in the heap. kHeapNumber = 7 // Number stored in the heap.
kNative = 8 // Native object (not from V8 heap).
}; };
/** Returns node type (see HeapGraphNode::Type). */ /** Returns node type (see HeapGraphNode::Type). */
@ -269,10 +267,17 @@ class V8EXPORT HeapGraphNode {
/** /**
* Returns node id. For the same heap object, the id remains the same * Returns node id. For the same heap object, the id remains the same
* across all snapshots. * across all snapshots. Not applicable to aggregated heap snapshots
* as they only contain aggregated instances.
*/ */
uint64_t GetId() const; uint64_t GetId() const;
/**
* Returns the number of instances. Only applicable to aggregated
* heap snapshots.
*/
int GetInstancesCount() const;
/** Returns node's own size, in bytes. */ /** Returns node's own size, in bytes. */
int GetSelfSize() const; int GetSelfSize() const;
@ -302,6 +307,12 @@ class V8EXPORT HeapGraphNode {
/** Returns a retainer by index. */ /** Returns a retainer by index. */
const HeapGraphEdge* GetRetainer(int index) const; const HeapGraphEdge* GetRetainer(int index) const;
/** Returns the number of simple retaining paths from the root to the node. */
int GetRetainingPathsCount() const;
/** Returns a retaining path by index. */
const HeapGraphPath* GetRetainingPath(int index) const;
/** /**
* Returns a dominator node. This is the node that participates in every * Returns a dominator node. This is the node that participates in every
* path from the snapshot root to the current node. * path from the snapshot root to the current node.
@ -310,13 +321,25 @@ class V8EXPORT HeapGraphNode {
}; };
class V8EXPORT HeapSnapshotsDiff {
public:
/** Returns the root node for added nodes. */
const HeapGraphNode* GetAdditionsRoot() const;
/** Returns the root node for deleted nodes. */
const HeapGraphNode* GetDeletionsRoot() const;
};
/** /**
* HeapSnapshots record the state of the JS heap at some moment. * HeapSnapshots record the state of the JS heap at some moment.
*/ */
class V8EXPORT HeapSnapshot { class V8EXPORT HeapSnapshot {
public: public:
enum Type { enum Type {
kFull = 0 // Heap snapshot with all instances and references. kFull = 0, // Heap snapshot with all instances and references.
kAggregated = 1 // Snapshot doesn't contain individual heap entries,
// instead they are grouped by constructor name.
}; };
enum SerializationFormat { enum SerializationFormat {
kJSON = 0 // See format description near 'Serialize' method. kJSON = 0 // See format description near 'Serialize' method.
@ -337,24 +360,17 @@ class V8EXPORT HeapSnapshot {
/** Returns a node by its id. */ /** Returns a node by its id. */
const HeapGraphNode* GetNodeById(uint64_t id) const; const HeapGraphNode* GetNodeById(uint64_t id) const;
/** Returns total nodes count in the snapshot. */
int GetNodesCount() const;
/** Returns a node by index. */
const HeapGraphNode* GetNode(int index) const;
/** /**
* Deletes the snapshot and removes it from HeapProfiler's list. * Returns a diff between this snapshot and another one. Only snapshots
* All pointers to nodes, edges and paths previously returned become * of the same type can be compared.
* invalid.
*/ */
void Delete(); const HeapSnapshotsDiff* CompareWith(const HeapSnapshot* snapshot) const;
/** /**
* Prepare a serialized representation of the snapshot. The result * Prepare a serialized representation of the snapshot. The result
* is written into the stream provided in chunks of specified size. * is written into the stream provided in chunks of specified size.
* The total length of the serialized snapshot is unknown in * The total length of the serialized snapshot is unknown in
* advance, it can be roughly equal to JS heap size (that means, * advance, it is can be roughly equal to JS heap size (that means,
* it can be really big - tens of megabytes). * it can be really big - tens of megabytes).
* *
* For the JSON format, heap contents are represented as an object * For the JSON format, heap contents are represented as an object
@ -376,22 +392,11 @@ class V8EXPORT HeapSnapshot {
}; };
class RetainedObjectInfo;
/** /**
* Interface for controlling heap profiling. * Interface for controlling heap profiling.
*/ */
class V8EXPORT HeapProfiler { class V8EXPORT HeapProfiler {
public: public:
/**
* Callback function invoked for obtaining RetainedObjectInfo for
* the given JavaScript wrapper object. It is prohibited to enter V8
* while the callback is running: only getters on the handle and
* GetPointerFromInternalField on the objects are allowed.
*/
typedef RetainedObjectInfo* (*WrapperInfoCallback)
(uint16_t class_id, Handle<Value> wrapper);
/** Returns the number of snapshots taken. */ /** Returns the number of snapshots taken. */
static int GetSnapshotsCount(); static int GetSnapshotsCount();
@ -409,87 +414,6 @@ class V8EXPORT HeapProfiler {
Handle<String> title, Handle<String> title,
HeapSnapshot::Type type = HeapSnapshot::kFull, HeapSnapshot::Type type = HeapSnapshot::kFull,
ActivityControl* control = NULL); ActivityControl* control = NULL);
/**
* Deletes all snapshots taken. All previously returned pointers to
* snapshots and their contents become invalid after this call.
*/
static void DeleteAllSnapshots();
/** Binds a callback to embedder's class ID. */
static void DefineWrapperClass(
uint16_t class_id,
WrapperInfoCallback callback);
/**
* Default value of persistent handle class ID. Must not be used to
* define a class. Can be used to reset a class of a persistent
* handle.
*/
static const uint16_t kPersistentHandleNoClassId = 0;
};
/**
* Interface for providing information about embedder's objects
* held by global handles. This information is reported in two ways:
*
* 1. When calling AddObjectGroup, an embedder may pass
* RetainedObjectInfo instance describing the group. To collect
* this information while taking a heap snapshot, V8 calls GC
* prologue and epilogue callbacks.
*
* 2. When a heap snapshot is collected, V8 additionally
* requests RetainedObjectInfos for persistent handles that
* were not previously reported via AddObjectGroup.
*
* Thus, if an embedder wants to provide information about native
* objects for heap snapshots, he can do it in a GC prologue
* handler, and / or by assigning wrapper class ids in the following way:
*
* 1. Bind a callback to class id by calling DefineWrapperClass.
* 2. Call SetWrapperClassId on certain persistent handles.
*
* V8 takes ownership of RetainedObjectInfo instances passed to it and
* keeps them alive only during snapshot collection. Afterwards, they
* are freed by calling the Dispose class function.
*/
class V8EXPORT RetainedObjectInfo { // NOLINT
public:
/** Called by V8 when it no longer needs an instance. */
virtual void Dispose() = 0;
/** Returns whether two instances are equivalent. */
virtual bool IsEquivalent(RetainedObjectInfo* other) = 0;
/**
* Returns hash value for the instance. Equivalent instances
* must have the same hash value.
*/
virtual intptr_t GetHash() = 0;
/**
* Returns human-readable label. It must be a NUL-terminated UTF-8
* encoded string. V8 copies its contents during a call to GetLabel.
*/
virtual const char* GetLabel() = 0;
/**
* Returns element count in case if a global handle retains
* a subgraph by holding one of its nodes.
*/
virtual intptr_t GetElementCount() { return -1; }
/** Returns embedder's object size in bytes. */
virtual intptr_t GetSizeInBytes() { return -1; }
protected:
RetainedObjectInfo() {}
virtual ~RetainedObjectInfo() {}
private:
RetainedObjectInfo(const RetainedObjectInfo&);
RetainedObjectInfo& operator=(const RetainedObjectInfo&);
}; };

5
deps/v8/include/v8-testing.h

@ -87,11 +87,6 @@ class V8EXPORT Testing {
* should be between 0 and one less than the result from GetStressRuns() * should be between 0 and one less than the result from GetStressRuns()
*/ */
static void PrepareStressRun(int run); static void PrepareStressRun(int run);
/**
* Force deoptimization of all functions.
*/
static void DeoptimizeAll();
}; };

648
deps/v8/include/v8.h

File diff suppressed because it is too large

38
deps/v8/preparser/SConscript

@ -1,38 +0,0 @@
# Copyright 2011 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from os.path import join
Import('context')
def ConfigureObjectFiles():
env = Environment()
env.Replace(**context.flags['preparser'])
context.ApplyEnvOverrides(env)
return env.Object('preparser-process.cc')
preparser_object = ConfigureObjectFiles()
Return('preparser_object')

395
deps/v8/preparser/preparser-process.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -27,353 +27,180 @@
#include <stdlib.h> #include <stdlib.h>
#include <stdarg.h> #include <stdarg.h>
#include <stdio.h>
#include <string.h>
#include "../include/v8stdint.h" #include "../include/v8stdint.h"
#include "../include/v8-preparser.h" #include "../include/v8-preparser.h"
#include "unicode-inl.h"
#include "../src/preparse-data-format.h" enum ResultCode { kSuccess = 0, kErrorReading = 1, kErrorWriting = 2 };
namespace i = v8::internal; namespace v8 {
namespace internal {
// This file is only used for testing the stand-alone preparser // THIS FILE IS PROOF-OF-CONCEPT ONLY.
// library. // The final goal is a stand-alone preparser library.
// The first argument must be the path of a JavaScript source file, or
// the flags "-e" and the next argument is then the source of a JavaScript
// program.
// Optionally this can be followed by the word "throws" (case sensitive),
// which signals that the parsing is expected to throw - the default is
// to expect the parsing to not throw.
// The command line can further be followed by a message text (the
// *type* of the exception to throw), and even more optionally, the
// start and end position reported with the exception.
//
// This source file is preparsed and tested against the expectations, and if
// successful, the resulting preparser data is written to stdout.
// Diagnostic output is output on stderr.
// The source file must contain only ASCII characters (UTF-8 isn't supported).
// The file is read into memory, so it should have a reasonable size.
// Adapts an ASCII string to the UnicodeInputStream interface. class UTF8InputStream : public v8::UnicodeInputStream {
class AsciiInputStream : public v8::UnicodeInputStream {
public: public:
AsciiInputStream(const uint8_t* buffer, size_t length) UTF8InputStream(uint8_t* buffer, size_t length)
: buffer_(buffer), : buffer_(buffer),
end_offset_(static_cast<int>(length)), offset_(0),
offset_(0) { } pos_(0),
end_offset_(static_cast<int>(length)) { }
virtual ~AsciiInputStream() { } virtual ~UTF8InputStream() { }
virtual void PushBack(int32_t ch) { virtual void PushBack(int32_t ch) {
offset_--; // Pushback assumes that the character pushed back is the
// one that was most recently read, and jumps back in the
// UTF-8 stream by the length of that character's encoding.
offset_ -= unibrow::Utf8::Length(ch);
pos_--;
#ifdef DEBUG #ifdef DEBUG
if (offset_ < 0 || if (static_cast<unsigned>(ch) <= unibrow::Utf8::kMaxOneByteChar) {
(ch != ((offset_ >= end_offset_) ? -1 : buffer_[offset_]))) { if (ch != buffer_[offset_]) {
fprintf(stderr, "Invalid pushback: '%c' at offset %d.", ch, offset_); fprintf(stderr, "Invalid pushback: '%c'.", ch);
exit(1);
}
} else {
unsigned tmp = 0;
if (static_cast<unibrow::uchar>(ch) !=
unibrow::Utf8::CalculateValue(buffer_ + offset_,
end_offset_ - offset_,
&tmp)) {
fprintf(stderr, "Invalid pushback: 0x%x.", ch);
exit(1); exit(1);
} }
}
#endif #endif
} }
virtual int32_t Next() { virtual int32_t Next() {
if (offset_ >= end_offset_) { if (offset_ == end_offset_) return -1;
offset_++; // Increment anyway to allow symmetric pushbacks. uint8_t first_char = buffer_[offset_];
return -1; if (first_char <= unibrow::Utf8::kMaxOneByteChar) {
} pos_++;
uint8_t next_char = buffer_[offset_];
#ifdef DEBUG
if (next_char > 0x7fu) {
fprintf(stderr, "Non-ASCII character in input: '%c'.", next_char);
exit(1);
}
#endif
offset_++; offset_++;
return static_cast<int32_t>(next_char); return static_cast<int32_t>(first_char);
}
unibrow::uchar codepoint =
unibrow::Utf8::CalculateValue(buffer_ + offset_,
end_offset_ - offset_,
&offset_);
pos_++;
return static_cast<int32_t>(codepoint);
} }
private: private:
const uint8_t* buffer_; const uint8_t* buffer_;
const int end_offset_; unsigned offset_;
int offset_; unsigned pos_;
unsigned end_offset_;
}; };
bool ReadBuffer(FILE* source, void* buffer, size_t length) { // Write a number to dest in network byte order.
size_t actually_read = fread(buffer, 1, length, source); void WriteUInt32(FILE* dest, uint32_t value, bool* ok) {
return (actually_read == length); for (int i = 3; i >= 0; i--) {
uint8_t byte = static_cast<uint8_t>(value >> (i << 3));
int result = fputc(byte, dest);
if (result == EOF) {
*ok = false;
return;
} }
bool WriteBuffer(FILE* dest, const void* buffer, size_t length) {
size_t actually_written = fwrite(buffer, 1, length, dest);
return (actually_written == length);
}
class PreparseDataInterpreter {
public:
PreparseDataInterpreter(const uint8_t* data, int length)
: data_(data), length_(length), message_(NULL) { }
~PreparseDataInterpreter() {
if (message_ != NULL) delete[] message_;
} }
bool valid() {
int header_length =
i::PreparseDataConstants::kHeaderSize * sizeof(int); // NOLINT
return length_ >= header_length;
} }
bool throws() { // Read number from FILE* in network byte order.
return valid() && uint32_t ReadUInt32(FILE* source, bool* ok) {
word(i::PreparseDataConstants::kHasErrorOffset) != 0; uint32_t n = 0;
for (int i = 0; i < 4; i++) {
int c = fgetc(source);
if (c == EOF) {
*ok = false;
return 0;
} }
n = (n << 8) + static_cast<uint32_t>(c);
const char* message() {
if (message_ != NULL) return message_;
if (!throws()) return NULL;
int text_pos = i::PreparseDataConstants::kHeaderSize +
i::PreparseDataConstants::kMessageTextPos;
int length = word(text_pos);
char* buffer = new char[length + 1];
for (int i = 1; i <= length; i++) {
int character = word(text_pos + i);
buffer[i - 1] = character;
} }
buffer[length] = '\0'; return n;
message_ = buffer;
return buffer;
} }
int beg_pos() {
if (!throws()) return -1;
return word(i::PreparseDataConstants::kHeaderSize +
i::PreparseDataConstants::kMessageStartPos);
}
int end_pos() { bool ReadBuffer(FILE* source, void* buffer, size_t length) {
if (!throws()) return -1; size_t actually_read = fread(buffer, 1, length, source);
return word(i::PreparseDataConstants::kHeaderSize + return (actually_read == length);
i::PreparseDataConstants::kMessageEndPos);
} }
private:
int word(int offset) {
const int* word_data = reinterpret_cast<const int*>(data_);
if (word_data + offset < reinterpret_cast<const int*>(data_ + length_)) {
return word_data[offset];
}
return -1;
}
const uint8_t* const data_; bool WriteBuffer(FILE* dest, const void* buffer, size_t length) {
const int length_; size_t actually_written = fwrite(buffer, 1, length, dest);
const char* message_; return (actually_written == length);
}; }
template <typename T> template <typename T>
class ScopedPointer { class ScopedPointer {
public: public:
explicit ScopedPointer() : pointer_(NULL) {}
explicit ScopedPointer(T* pointer) : pointer_(pointer) {} explicit ScopedPointer(T* pointer) : pointer_(pointer) {}
~ScopedPointer() { if (pointer_ != NULL) delete[] pointer_; } ~ScopedPointer() { delete[] pointer_; }
T& operator[](int index) { return pointer_[index]; } T& operator[](int index) { return pointer_[index]; }
T* operator*() { return pointer_ ;} T* operator*() { return pointer_ ;}
T* operator=(T* new_value) {
if (pointer_ != NULL) delete[] pointer_;
pointer_ = new_value;
return new_value;
}
private: private:
T* pointer_; T* pointer_;
}; };
// Preparse input and output result on stdout.
int PreParseIO(FILE* input) {
fprintf(stderr, "LOG: Enter parsing loop\n");
bool ok = true;
uint32_t length = ReadUInt32(input, &ok);
fprintf(stderr, "LOG: Input length: %d\n", length);
if (!ok) return kErrorReading;
ScopedPointer<uint8_t> buffer(new uint8_t[length]);
void fail(v8::PreParserData* data, const char* message, ...) { if (!ReadBuffer(input, *buffer, length)) {
va_list args; return kErrorReading;
va_start(args, message);
vfprintf(stderr, message, args);
va_end(args);
fflush(stderr);
// Print preparser data to stdout.
uint32_t size = data->size();
fprintf(stderr, "LOG: data size: %u\n", size);
if (!WriteBuffer(stdout, data->data(), size)) {
perror("ERROR: Writing data");
fflush(stderr);
} }
exit(EXIT_FAILURE); UTF8InputStream input_buffer(*buffer, static_cast<size_t>(length));
};
v8::PreParserData data =
bool IsFlag(const char* arg) { v8::Preparse(&input_buffer, 64 * 1024 * sizeof(void*)); // NOLINT
// Anything starting with '-' is considered a flag. if (data.stack_overflow()) {
// It's summarily ignored for now. fprintf(stderr, "LOG: Stack overflow\n");
return arg[0] == '-'; fflush(stderr);
// Report stack overflow error/no-preparser-data.
WriteUInt32(stdout, 0, &ok);
if (!ok) return kErrorWriting;
return 0;
} }
uint32_t size = data.size();
struct ExceptionExpectation { fprintf(stderr, "LOG: Success, data size: %u\n", size);
ExceptionExpectation() fflush(stderr);
: throws(false), type(NULL), beg_pos(-1), end_pos(-1) { } WriteUInt32(stdout, size, &ok);
bool throws; if (!ok) return kErrorWriting;
const char* type; if (!WriteBuffer(stdout, data.data(), size)) {
int beg_pos; return kErrorWriting;
int end_pos;
};
void CheckException(v8::PreParserData* data,
ExceptionExpectation* expects) {
PreparseDataInterpreter reader(data->data(), data->size());
if (expects->throws) {
if (!reader.throws()) {
if (expects->type == NULL) {
fail(data, "Didn't throw as expected\n");
} else {
fail(data, "Didn't throw \"%s\" as expected\n", expects->type);
}
}
if (expects->type != NULL) {
const char* actual_message = reader.message();
if (strcmp(expects->type, actual_message)) {
fail(data, "Wrong error message. Expected <%s>, found <%s> at %d..%d\n",
expects->type, actual_message, reader.beg_pos(), reader.end_pos());
}
}
if (expects->beg_pos >= 0) {
if (expects->beg_pos != reader.beg_pos()) {
fail(data, "Wrong error start position: Expected %i, found %i\n",
expects->beg_pos, reader.beg_pos());
}
}
if (expects->end_pos >= 0) {
if (expects->end_pos != reader.end_pos()) {
fail(data, "Wrong error end position: Expected %i, found %i\n",
expects->end_pos, reader.end_pos());
}
}
} else if (reader.throws()) {
const char* message = reader.message();
fail(data, "Throws unexpectedly with message: %s at location %d-%d\n",
message, reader.beg_pos(), reader.end_pos());
} }
return 0;
} }
} } // namespace v8::internal
ExceptionExpectation ParseExpectation(int argc, const char* argv[]) {
ExceptionExpectation expects;
// Parse exception expectations from (the remainder of) the command line. int main(int argc, char* argv[]) {
int arg_index = 0; FILE* input = stdin;
// Skip any flags. if (argc > 1) {
while (argc > arg_index && IsFlag(argv[arg_index])) arg_index++; char* arg = argv[1];
if (argc > arg_index) { input = fopen(arg, "rb");
if (strncmp("throws", argv[arg_index], 7)) { if (input == NULL) return EXIT_FAILURE;
// First argument after filename, if present, must be the verbatim
// "throws", marking that the preparsing should fail with an exception.
fail(NULL, "ERROR: Extra arguments not prefixed by \"throws\".\n");
} }
expects.throws = true; int status = 0;
do { do {
arg_index++; status = v8::internal::PreParseIO(input);
} while (argc > arg_index && IsFlag(argv[arg_index])); } while (status == 0);
if (argc > arg_index) { fprintf(stderr, "EXIT: Failure %d\n", status);
// Next argument is the exception type identifier.
expects.type = argv[arg_index];
do {
arg_index++;
} while (argc > arg_index && IsFlag(argv[arg_index]));
if (argc > arg_index) {
expects.beg_pos = atoi(argv[arg_index]); // NOLINT
do {
arg_index++;
} while (argc > arg_index && IsFlag(argv[arg_index]));
if (argc > arg_index) {
expects.end_pos = atoi(argv[arg_index]); // NOLINT
}
}
}
}
return expects;
}
int main(int argc, const char* argv[]) {
// Parse command line.
// Format: preparser (<scriptfile> | -e "<source>")
// ["throws" [<exn-type> [<start> [<end>]]]]
// Any flags (except an initial -s) are ignored.
// Check for mandatory filename argument.
int arg_index = 1;
if (argc <= arg_index) {
fail(NULL, "ERROR: No filename on command line.\n");
}
const uint8_t* source = NULL;
const char* filename = argv[arg_index];
if (!strcmp(filename, "-e")) {
arg_index++;
if (argc <= arg_index) {
fail(NULL, "ERROR: No source after -e on command line.\n");
}
source = reinterpret_cast<const uint8_t*>(argv[arg_index]);
}
// Check remainder of command line for exception expectations.
arg_index++;
ExceptionExpectation expects =
ParseExpectation(argc - arg_index, argv + arg_index);
ScopedPointer<uint8_t> buffer;
size_t length;
if (source == NULL) {
// Open JS file.
FILE* input = fopen(filename, "rb");
if (input == NULL) {
perror("ERROR: Error opening file");
fflush(stderr);
return EXIT_FAILURE;
}
// Find length of JS file.
if (fseek(input, 0, SEEK_END) != 0) {
perror("ERROR: Error during seek");
fflush(stderr); fflush(stderr);
return EXIT_FAILURE; return EXIT_FAILURE;
} }
length = static_cast<size_t>(ftell(input));
rewind(input);
// Read JS file into memory buffer.
buffer = new uint8_t[length];
if (!ReadBuffer(input, *buffer, length)) {
perror("ERROR: Reading file");
fflush(stderr);
return EXIT_FAILURE;
}
fclose(input);
source = *buffer;
} else {
length = strlen(reinterpret_cast<const char*>(source));
}
// Preparse input file.
AsciiInputStream input_buffer(source, length);
size_t kMaxStackSize = 64 * 1024 * sizeof(void*); // NOLINT
v8::PreParserData data = v8::Preparse(&input_buffer, kMaxStackSize);
// Fail if stack overflow.
if (data.stack_overflow()) {
fail(&data, "ERROR: Stack overflow\n");
}
// Check that the expected exception is thrown, if an exception is
// expected.
CheckException(&data, &expects);
return EXIT_SUCCESS;
}

6
deps/v8/samples/process.cc

@ -30,10 +30,6 @@
#include <string> #include <string>
#include <map> #include <map>
#ifdef COMPRESS_STARTUP_DATA_BZ2
#error Using compressed startup data is not supported for this sample
#endif
using namespace std; using namespace std;
using namespace v8; using namespace v8;
@ -535,7 +531,7 @@ void ParseOptions(int argc,
string* file) { string* file) {
for (int i = 1; i < argc; i++) { for (int i = 1; i < argc; i++) {
string arg = argv[i]; string arg = argv[i];
size_t index = arg.find('=', 0); int index = arg.find('=', 0);
if (index == string::npos) { if (index == string::npos) {
*file = arg; *file = arg;
} else { } else {

440
deps/v8/samples/shell.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -28,36 +28,12 @@
#include <v8.h> #include <v8.h>
#include <v8-testing.h> #include <v8-testing.h>
#include <assert.h> #include <assert.h>
#ifdef COMPRESS_STARTUP_DATA_BZ2
#include <bzlib.h>
#endif
#include <fcntl.h> #include <fcntl.h>
#include <string.h> #include <string.h>
#include <stdio.h> #include <stdio.h>
#include <stdlib.h> #include <stdlib.h>
// When building with V8 in a shared library we cannot use functions which
// is not explicitly a part of the public V8 API. This extensive use of
// #ifndef USING_V8_SHARED/#endif is a hack until we can resolve whether to
// still use the shell sample for testing or change to use the developer
// shell d8 TODO(1272).
#if !(defined(USING_V8_SHARED) || defined(V8_SHARED))
#include "../src/v8.h"
#endif // USING_V8_SHARED
#if !defined(_WIN32) && !defined(_WIN64)
#include <unistd.h> // NOLINT
#endif
static void ExitShell(int exit_code) {
// Use _exit instead of exit to avoid races between isolate
// threads and static destructors.
fflush(stdout);
fflush(stderr);
_exit(exit_code);
}
v8::Persistent<v8::Context> CreateShellContext();
void RunShell(v8::Handle<v8::Context> context); void RunShell(v8::Handle<v8::Context> context);
bool ExecuteString(v8::Handle<v8::String> source, bool ExecuteString(v8::Handle<v8::String> source,
v8::Handle<v8::Value> name, v8::Handle<v8::Value> name,
@ -68,214 +44,33 @@ v8::Handle<v8::Value> Read(const v8::Arguments& args);
v8::Handle<v8::Value> Load(const v8::Arguments& args); v8::Handle<v8::Value> Load(const v8::Arguments& args);
v8::Handle<v8::Value> Quit(const v8::Arguments& args); v8::Handle<v8::Value> Quit(const v8::Arguments& args);
v8::Handle<v8::Value> Version(const v8::Arguments& args); v8::Handle<v8::Value> Version(const v8::Arguments& args);
v8::Handle<v8::Value> Int8Array(const v8::Arguments& args);
v8::Handle<v8::Value> Uint8Array(const v8::Arguments& args);
v8::Handle<v8::Value> Int16Array(const v8::Arguments& args);
v8::Handle<v8::Value> Uint16Array(const v8::Arguments& args);
v8::Handle<v8::Value> Int32Array(const v8::Arguments& args);
v8::Handle<v8::Value> Uint32Array(const v8::Arguments& args);
v8::Handle<v8::Value> Float32Array(const v8::Arguments& args);
v8::Handle<v8::Value> Float64Array(const v8::Arguments& args);
v8::Handle<v8::Value> PixelArray(const v8::Arguments& args);
v8::Handle<v8::String> ReadFile(const char* name); v8::Handle<v8::String> ReadFile(const char* name);
void ReportException(v8::TryCatch* handler); void ReportException(v8::TryCatch* handler);
static bool last_run = true;
class SourceGroup {
public:
SourceGroup() :
#if !(defined(USING_V8_SHARED) || defined(V8_SHARED))
next_semaphore_(v8::internal::OS::CreateSemaphore(0)),
done_semaphore_(v8::internal::OS::CreateSemaphore(0)),
thread_(NULL),
#endif // USING_V8_SHARED
argv_(NULL),
begin_offset_(0),
end_offset_(0) { }
void Begin(char** argv, int offset) {
argv_ = const_cast<const char**>(argv);
begin_offset_ = offset;
}
void End(int offset) { end_offset_ = offset; }
void Execute() {
for (int i = begin_offset_; i < end_offset_; ++i) {
const char* arg = argv_[i];
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
// Execute argument given to -e option directly.
v8::HandleScope handle_scope;
v8::Handle<v8::String> file_name = v8::String::New("unnamed");
v8::Handle<v8::String> source = v8::String::New(argv_[i + 1]);
if (!ExecuteString(source, file_name, false, true)) {
ExitShell(1);
return;
}
++i;
} else if (arg[0] == '-') {
// Ignore other options. They have been parsed already.
} else {
// Use all other arguments as names of files to load and run.
v8::HandleScope handle_scope;
v8::Handle<v8::String> file_name = v8::String::New(arg);
v8::Handle<v8::String> source = ReadFile(arg);
if (source.IsEmpty()) {
printf("Error reading '%s'\n", arg);
continue;
}
if (!ExecuteString(source, file_name, false, true)) {
ExitShell(1);
return;
}
}
}
}
#if !(defined(USING_V8_SHARED) || defined(V8_SHARED))
void StartExecuteInThread() {
if (thread_ == NULL) {
thread_ = new IsolateThread(this);
thread_->Start();
}
next_semaphore_->Signal();
}
void WaitForThread() {
if (thread_ == NULL) return;
if (last_run) {
thread_->Join();
thread_ = NULL;
} else {
done_semaphore_->Wait();
}
}
#endif // USING_V8_SHARED
private:
#if !(defined(USING_V8_SHARED) || defined(V8_SHARED))
static v8::internal::Thread::Options GetThreadOptions() {
v8::internal::Thread::Options options;
options.name = "IsolateThread";
// On some systems (OSX 10.6) the stack size default is 0.5Mb or less
// which is not enough to parse the big literal expressions used in tests.
// The stack size should be at least StackGuard::kLimitSize + some
// OS-specific padding for thread startup code.
options.stack_size = 2 << 20; // 2 Mb seems to be enough
return options;
}
class IsolateThread : public v8::internal::Thread {
public:
explicit IsolateThread(SourceGroup* group)
: v8::internal::Thread(GetThreadOptions()), group_(group) {}
virtual void Run() {
group_->ExecuteInThread();
}
private:
SourceGroup* group_;
};
void ExecuteInThread() {
v8::Isolate* isolate = v8::Isolate::New();
do {
if (next_semaphore_ != NULL) next_semaphore_->Wait();
{
v8::Isolate::Scope iscope(isolate);
v8::HandleScope scope;
v8::Persistent<v8::Context> context = CreateShellContext();
{
v8::Context::Scope cscope(context);
Execute();
}
context.Dispose();
}
if (done_semaphore_ != NULL) done_semaphore_->Signal();
} while (!last_run);
isolate->Dispose();
}
v8::internal::Semaphore* next_semaphore_;
v8::internal::Semaphore* done_semaphore_;
v8::internal::Thread* thread_;
#endif // USING_V8_SHARED
const char** argv_;
int begin_offset_;
int end_offset_;
};
static SourceGroup* isolate_sources = NULL;
#ifdef COMPRESS_STARTUP_DATA_BZ2
class BZip2Decompressor : public v8::StartupDataDecompressor {
public:
virtual ~BZip2Decompressor() { }
protected:
virtual int DecompressData(char* raw_data,
int* raw_data_size,
const char* compressed_data,
int compressed_data_size) {
ASSERT_EQ(v8::StartupData::kBZip2,
v8::V8::GetCompressedStartupDataAlgorithm());
unsigned int decompressed_size = *raw_data_size;
int result =
BZ2_bzBuffToBuffDecompress(raw_data,
&decompressed_size,
const_cast<char*>(compressed_data),
compressed_data_size,
0, 1);
if (result == BZ_OK) {
*raw_data_size = decompressed_size;
}
return result;
}
};
#endif
int RunMain(int argc, char* argv[]) { int RunMain(int argc, char* argv[]) {
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::HandleScope handle_scope; v8::HandleScope handle_scope;
v8::Persistent<v8::Context> context = CreateShellContext(); // Create a template for the global object.
// Enter the newly created execution environment. v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
context->Enter(); // Bind the global 'print' function to the C++ Print callback.
if (context.IsEmpty()) { global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
printf("Error creating context\n"); // Bind the global 'read' function to the C++ Read callback.
return 1; global->Set(v8::String::New("read"), v8::FunctionTemplate::New(Read));
} // Bind the global 'load' function to the C++ Load callback.
global->Set(v8::String::New("load"), v8::FunctionTemplate::New(Load));
// Bind the 'quit' function
global->Set(v8::String::New("quit"), v8::FunctionTemplate::New(Quit));
// Bind the 'version' function
global->Set(v8::String::New("version"), v8::FunctionTemplate::New(Version));
// Create a new execution environment containing the built-in
// functions
v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
bool run_shell = (argc == 1); bool run_shell = (argc == 1);
int num_isolates = 1;
for (int i = 1; i < argc; i++) {
if (strcmp(argv[i], "--isolate") == 0) {
#if !(defined(USING_V8_SHARED) || defined(V8_SHARED))
++num_isolates;
#else // USING_V8_SHARED
printf("Error: --isolate not supported when linked with shared "
"library\n");
ExitShell(1);
#endif // USING_V8_SHARED
}
}
if (isolate_sources == NULL) {
isolate_sources = new SourceGroup[num_isolates];
SourceGroup* current = isolate_sources;
current->Begin(argv, 1);
for (int i = 1; i < argc; i++) { for (int i = 1; i < argc; i++) {
// Enter the execution environment before evaluating any code.
v8::Context::Scope context_scope(context);
const char* str = argv[i]; const char* str = argv[i];
if (strcmp(str, "--isolate") == 0) { if (strcmp(str, "--shell") == 0) {
current->End(i);
current++;
current->Begin(argv, i + 1);
} else if (strcmp(str, "--shell") == 0) {
run_shell = true; run_shell = true;
} else if (strcmp(str, "-f") == 0) { } else if (strcmp(str, "-f") == 0) {
// Ignore any -f flags for compatibility with the other stand- // Ignore any -f flags for compatibility with the other stand-
@ -283,27 +78,28 @@ int RunMain(int argc, char* argv[]) {
continue; continue;
} else if (strncmp(str, "--", 2) == 0) { } else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str); printf("Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
// Execute argument given to -e option directly
v8::HandleScope handle_scope;
v8::Handle<v8::String> file_name = v8::String::New("unnamed");
v8::Handle<v8::String> source = v8::String::New(argv[i + 1]);
if (!ExecuteString(source, file_name, false, true))
return 1;
i++;
} else {
// Use all other arguments as names of files to load and run.
v8::HandleScope handle_scope;
v8::Handle<v8::String> file_name = v8::String::New(str);
v8::Handle<v8::String> source = ReadFile(str);
if (source.IsEmpty()) {
printf("Error reading '%s'\n", str);
return 1;
} }
if (!ExecuteString(source, file_name, false, true))
return 1;
} }
current->End(argc);
}
#if !(defined(USING_V8_SHARED) || defined(V8_SHARED))
for (int i = 1; i < num_isolates; ++i) {
isolate_sources[i].StartExecuteInThread();
} }
#endif // USING_V8_SHARED
isolate_sources[0].Execute();
if (run_shell) RunShell(context); if (run_shell) RunShell(context);
#if !(defined(USING_V8_SHARED) || defined(V8_SHARED))
for (int i = 1; i < num_isolates; ++i) {
isolate_sources[i].WaitForThread();
}
#endif // USING_V8_SHARED
if (last_run) {
delete[] isolate_sources;
isolate_sources = NULL;
}
context->Exit();
context.Dispose(); context.Dispose();
return 0; return 0;
} }
@ -330,15 +126,6 @@ int main(int argc, char* argv[]) {
} }
} }
#ifdef COMPRESS_STARTUP_DATA_BZ2
BZip2Decompressor startup_data_decompressor;
int bz2_result = startup_data_decompressor.Decompress();
if (bz2_result != BZ_OK) {
fprintf(stderr, "bzip error code: %d\n", bz2_result);
exit(1);
}
#endif
v8::V8::SetFlagsFromCommandLine(&argc, argv, true); v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
int result = 0; int result = 0;
if (FLAG_stress_opt || FLAG_stress_deopt) { if (FLAG_stress_opt || FLAG_stress_deopt) {
@ -350,16 +137,12 @@ int main(int argc, char* argv[]) {
printf("============ Stress %d/%d ============\n", printf("============ Stress %d/%d ============\n",
i + 1, stress_runs); i + 1, stress_runs);
v8::Testing::PrepareStressRun(i); v8::Testing::PrepareStressRun(i);
last_run = (i == stress_runs - 1);
result = RunMain(argc, argv); result = RunMain(argc, argv);
} }
printf("======== Full Deoptimization =======\n");
v8::Testing::DeoptimizeAll();
} else { } else {
result = RunMain(argc, argv); result = RunMain(argc, argv);
} }
v8::V8::Dispose(); v8::V8::Dispose();
return result; return result;
} }
@ -370,46 +153,6 @@ const char* ToCString(const v8::String::Utf8Value& value) {
} }
// Creates a new execution environment containing the built-in
// functions.
v8::Persistent<v8::Context> CreateShellContext() {
// Create a template for the global object.
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
// Bind the global 'print' function to the C++ Print callback.
global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
// Bind the global 'read' function to the C++ Read callback.
global->Set(v8::String::New("read"), v8::FunctionTemplate::New(Read));
// Bind the global 'load' function to the C++ Load callback.
global->Set(v8::String::New("load"), v8::FunctionTemplate::New(Load));
// Bind the 'quit' function
global->Set(v8::String::New("quit"), v8::FunctionTemplate::New(Quit));
// Bind the 'version' function
global->Set(v8::String::New("version"), v8::FunctionTemplate::New(Version));
// Bind the handlers for external arrays.
global->Set(v8::String::New("Int8Array"),
v8::FunctionTemplate::New(Int8Array));
global->Set(v8::String::New("Uint8Array"),
v8::FunctionTemplate::New(Uint8Array));
global->Set(v8::String::New("Int16Array"),
v8::FunctionTemplate::New(Int16Array));
global->Set(v8::String::New("Uint16Array"),
v8::FunctionTemplate::New(Uint16Array));
global->Set(v8::String::New("Int32Array"),
v8::FunctionTemplate::New(Int32Array));
global->Set(v8::String::New("Uint32Array"),
v8::FunctionTemplate::New(Uint32Array));
global->Set(v8::String::New("Float32Array"),
v8::FunctionTemplate::New(Float32Array));
global->Set(v8::String::New("Float64Array"),
v8::FunctionTemplate::New(Float64Array));
global->Set(v8::String::New("PixelArray"),
v8::FunctionTemplate::New(PixelArray));
return v8::Context::New(NULL, global);
}
// The callback that is invoked by v8 whenever the JavaScript 'print' // The callback that is invoked by v8 whenever the JavaScript 'print'
// function is called. Prints its arguments on stdout separated by // function is called. Prints its arguments on stdout separated by
// spaces and ending with a newline. // spaces and ending with a newline.
@ -479,7 +222,7 @@ v8::Handle<v8::Value> Quit(const v8::Arguments& args) {
// If not arguments are given args[0] will yield undefined which // If not arguments are given args[0] will yield undefined which
// converts to the integer value 0. // converts to the integer value 0.
int exit_code = args[0]->Int32Value(); int exit_code = args[0]->Int32Value();
ExitShell(exit_code); exit(exit_code);
return v8::Undefined(); return v8::Undefined();
} }
@ -489,113 +232,6 @@ v8::Handle<v8::Value> Version(const v8::Arguments& args) {
} }
void ExternalArrayWeakCallback(v8::Persistent<v8::Value> object, void* data) {
free(data);
object.Dispose();
}
v8::Handle<v8::Value> CreateExternalArray(const v8::Arguments& args,
v8::ExternalArrayType type,
size_t element_size) {
assert(element_size == 1 ||
element_size == 2 ||
element_size == 4 ||
element_size == 8);
if (args.Length() != 1) {
return v8::ThrowException(
v8::String::New("Array constructor needs one parameter."));
}
static const int kMaxLength = 0x3fffffff;
size_t length = 0;
if (args[0]->IsUint32()) {
length = args[0]->Uint32Value();
} else if (args[0]->IsNumber()) {
double raw_length = args[0]->NumberValue();
if (raw_length < 0) {
return v8::ThrowException(
v8::String::New("Array length must not be negative."));
}
if (raw_length > kMaxLength) {
return v8::ThrowException(
v8::String::New("Array length exceeds maximum length."));
}
length = static_cast<size_t>(raw_length);
} else {
return v8::ThrowException(
v8::String::New("Array length must be a number."));
}
if (length > static_cast<size_t>(kMaxLength)) {
return v8::ThrowException(
v8::String::New("Array length exceeds maximum length."));
}
void* data = calloc(length, element_size);
if (data == NULL) {
return v8::ThrowException(v8::String::New("Memory allocation failed."));
}
v8::Handle<v8::Object> array = v8::Object::New();
v8::Persistent<v8::Object> persistent_array =
v8::Persistent<v8::Object>::New(array);
persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
persistent_array.MarkIndependent();
array->SetIndexedPropertiesToExternalArrayData(data, type, length);
array->Set(v8::String::New("length"), v8::Int32::New(length),
v8::ReadOnly);
array->Set(v8::String::New("BYTES_PER_ELEMENT"),
v8::Int32::New(element_size));
return array;
}
v8::Handle<v8::Value> Int8Array(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t));
}
v8::Handle<v8::Value> Uint8Array(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalUnsignedByteArray,
sizeof(uint8_t));
}
v8::Handle<v8::Value> Int16Array(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalShortArray, sizeof(int16_t));
}
v8::Handle<v8::Value> Uint16Array(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalUnsignedShortArray,
sizeof(uint16_t));
}
v8::Handle<v8::Value> Int32Array(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalIntArray, sizeof(int32_t));
}
v8::Handle<v8::Value> Uint32Array(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalUnsignedIntArray,
sizeof(uint32_t));
}
v8::Handle<v8::Value> Float32Array(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalFloatArray,
sizeof(float)); // NOLINT
}
v8::Handle<v8::Value> Float64Array(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalDoubleArray,
sizeof(double)); // NOLINT
}
v8::Handle<v8::Value> PixelArray(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalPixelArray, sizeof(uint8_t));
}
// Reads a file into a v8 string. // Reads a file into a v8 string.
v8::Handle<v8::String> ReadFile(const char* name) { v8::Handle<v8::String> ReadFile(const char* name) {
FILE* file = fopen(name, "rb"); FILE* file = fopen(name, "rb");

84
deps/v8/src/SConscript

@ -68,6 +68,7 @@ SOURCES = {
execution.cc execution.cc
factory.cc factory.cc
flags.cc flags.cc
frame-element.cc
frames.cc frames.cc
full-codegen.cc full-codegen.cc
func-name-inferrer.cc func-name-inferrer.cc
@ -84,8 +85,8 @@ SOURCES = {
ic.cc ic.cc
inspector.cc inspector.cc
interpreter-irregexp.cc interpreter-irregexp.cc
isolate.cc
jsregexp.cc jsregexp.cc
jump-target.cc
lithium-allocator.cc lithium-allocator.cc
lithium.cc lithium.cc
liveedit.cc liveedit.cc
@ -105,6 +106,7 @@ SOURCES = {
regexp-macro-assembler-irregexp.cc regexp-macro-assembler-irregexp.cc
regexp-macro-assembler.cc regexp-macro-assembler.cc
regexp-stack.cc regexp-stack.cc
register-allocator.cc
rewriter.cc rewriter.cc
runtime.cc runtime.cc
runtime-profiler.cc runtime-profiler.cc
@ -121,6 +123,7 @@ SOURCES = {
strtod.cc strtod.cc
stub-cache.cc stub-cache.cc
token.cc token.cc
top.cc
type-info.cc type-info.cc
unicode.cc unicode.cc
utils.cc utils.cc
@ -129,11 +132,14 @@ SOURCES = {
v8threads.cc v8threads.cc
variables.cc variables.cc
version.cc version.cc
virtual-frame.cc
zone.cc zone.cc
extensions/gc-extension.cc extensions/gc-extension.cc
extensions/externalize-string-extension.cc extensions/externalize-string-extension.cc
"""), """),
'arch:arm': Split(""" 'arch:arm': Split("""
jump-target-light.cc
virtual-frame-light.cc
arm/builtins-arm.cc arm/builtins-arm.cc
arm/code-stubs-arm.cc arm/code-stubs-arm.cc
arm/codegen-arm.cc arm/codegen-arm.cc
@ -145,32 +151,37 @@ SOURCES = {
arm/frames-arm.cc arm/frames-arm.cc
arm/full-codegen-arm.cc arm/full-codegen-arm.cc
arm/ic-arm.cc arm/ic-arm.cc
arm/jump-target-arm.cc
arm/lithium-arm.cc arm/lithium-arm.cc
arm/lithium-codegen-arm.cc arm/lithium-codegen-arm.cc
arm/lithium-gap-resolver-arm.cc arm/lithium-gap-resolver-arm.cc
arm/macro-assembler-arm.cc arm/macro-assembler-arm.cc
arm/regexp-macro-assembler-arm.cc arm/regexp-macro-assembler-arm.cc
arm/register-allocator-arm.cc
arm/stub-cache-arm.cc arm/stub-cache-arm.cc
arm/virtual-frame-arm.cc
arm/assembler-arm.cc arm/assembler-arm.cc
"""), """),
'arch:mips': Split(""" 'arch:mips': Split("""
mips/assembler-mips.cc mips/assembler-mips.cc
mips/builtins-mips.cc mips/builtins-mips.cc
mips/code-stubs-mips.cc
mips/codegen-mips.cc mips/codegen-mips.cc
mips/constants-mips.cc mips/constants-mips.cc
mips/cpu-mips.cc mips/cpu-mips.cc
mips/debug-mips.cc mips/debug-mips.cc
mips/deoptimizer-mips.cc
mips/disasm-mips.cc mips/disasm-mips.cc
mips/frames-mips.cc
mips/full-codegen-mips.cc mips/full-codegen-mips.cc
mips/frames-mips.cc
mips/ic-mips.cc mips/ic-mips.cc
mips/jump-target-mips.cc
mips/macro-assembler-mips.cc mips/macro-assembler-mips.cc
mips/regexp-macro-assembler-mips.cc mips/register-allocator-mips.cc
mips/stub-cache-mips.cc mips/stub-cache-mips.cc
mips/virtual-frame-mips.cc
"""), """),
'arch:ia32': Split(""" 'arch:ia32': Split("""
jump-target-heavy.cc
virtual-frame-heavy.cc
ia32/assembler-ia32.cc ia32/assembler-ia32.cc
ia32/builtins-ia32.cc ia32/builtins-ia32.cc
ia32/code-stubs-ia32.cc ia32/code-stubs-ia32.cc
@ -182,14 +193,19 @@ SOURCES = {
ia32/frames-ia32.cc ia32/frames-ia32.cc
ia32/full-codegen-ia32.cc ia32/full-codegen-ia32.cc
ia32/ic-ia32.cc ia32/ic-ia32.cc
ia32/jump-target-ia32.cc
ia32/lithium-codegen-ia32.cc ia32/lithium-codegen-ia32.cc
ia32/lithium-gap-resolver-ia32.cc ia32/lithium-gap-resolver-ia32.cc
ia32/lithium-ia32.cc ia32/lithium-ia32.cc
ia32/macro-assembler-ia32.cc ia32/macro-assembler-ia32.cc
ia32/regexp-macro-assembler-ia32.cc ia32/regexp-macro-assembler-ia32.cc
ia32/register-allocator-ia32.cc
ia32/stub-cache-ia32.cc ia32/stub-cache-ia32.cc
ia32/virtual-frame-ia32.cc
"""), """),
'arch:x64': Split(""" 'arch:x64': Split("""
jump-target-heavy.cc
virtual-frame-heavy.cc
x64/assembler-x64.cc x64/assembler-x64.cc
x64/builtins-x64.cc x64/builtins-x64.cc
x64/code-stubs-x64.cc x64/code-stubs-x64.cc
@ -201,12 +217,15 @@ SOURCES = {
x64/frames-x64.cc x64/frames-x64.cc
x64/full-codegen-x64.cc x64/full-codegen-x64.cc
x64/ic-x64.cc x64/ic-x64.cc
x64/jump-target-x64.cc
x64/lithium-codegen-x64.cc x64/lithium-codegen-x64.cc
x64/lithium-gap-resolver-x64.cc x64/lithium-gap-resolver-x64.cc
x64/lithium-x64.cc x64/lithium-x64.cc
x64/macro-assembler-x64.cc x64/macro-assembler-x64.cc
x64/regexp-macro-assembler-x64.cc x64/regexp-macro-assembler-x64.cc
x64/register-allocator-x64.cc
x64/stub-cache-x64.cc x64/stub-cache-x64.cc
x64/virtual-frame-x64.cc
"""), """),
'simulator:arm': ['arm/simulator-arm.cc'], 'simulator:arm': ['arm/simulator-arm.cc'],
'simulator:mips': ['mips/simulator-mips.cc'], 'simulator:mips': ['mips/simulator-mips.cc'],
@ -226,20 +245,6 @@ SOURCES = {
} }
PREPARSER_SOURCES = {
'all': Split("""
allocation.cc
hashmap.cc
preparse-data.cc
preparser.cc
preparser-api.cc
scanner-base.cc
token.cc
unicode.cc
""")
}
D8_FILES = { D8_FILES = {
'all': [ 'all': [
'd8.cc', 'd8-debug.cc' 'd8.cc', 'd8-debug.cc'
@ -295,11 +300,6 @@ debug-debugger.js
'''.split() '''.split()
EXPERIMENTAL_LIBRARY_FILES = '''
proxy.js
'''.split()
def Abort(message): def Abort(message):
print message print message
sys.exit(1) sys.exit(1)
@ -310,22 +310,13 @@ def ConfigureObjectFiles():
env.Replace(**context.flags['v8']) env.Replace(**context.flags['v8'])
context.ApplyEnvOverrides(env) context.ApplyEnvOverrides(env)
env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C) env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
if 'ENABLE_LOGGING_AND_PROFILING' in env['CPPDEFINES']:
env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions') env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions')
else:
env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET')
def BuildJS2CEnv(type):
js2c_env = { 'TYPE': type, 'COMPRESSION': 'off' }
if 'COMPRESS_STARTUP_DATA_BZ2' in env['CPPDEFINES']:
js2c_env['COMPRESSION'] = 'bz2'
return js2c_env
# Build the standard platform-independent source files. # Build the standard platform-independent source files.
source_files = context.GetRelevantSources(SOURCES) source_files = context.GetRelevantSources(SOURCES)
d8_files = context.GetRelevantSources(D8_FILES) d8_files = context.GetRelevantSources(D8_FILES)
d8_js = env.JS2C('d8-js.cc', 'd8.js', **{'TYPE': 'D8', 'COMPRESSION': 'off'}) d8_js = env.JS2C('d8-js.cc', 'd8.js', TYPE='D8')
d8_js_obj = context.ConfigureObject(env, d8_js, CPPPATH=['.']) d8_js_obj = context.ConfigureObject(env, d8_js, CPPPATH=['.'])
d8_objs = [context.ConfigureObject(env, [d8_files]), d8_js_obj] d8_objs = [context.ConfigureObject(env, [d8_files]), d8_js_obj]
@ -333,25 +324,12 @@ def ConfigureObjectFiles():
# compile it. # compile it.
library_files = [s for s in LIBRARY_FILES] library_files = [s for s in LIBRARY_FILES]
library_files.append('macros.py') library_files.append('macros.py')
libraries_src = env.JS2C( libraries_src, libraries_empty_src = env.JS2C(['libraries.cc', 'libraries-empty.cc'], library_files, TYPE='CORE')
['libraries.cc'], library_files, **BuildJS2CEnv('CORE'))
libraries_obj = context.ConfigureObject(env, libraries_src, CPPPATH=['.']) libraries_obj = context.ConfigureObject(env, libraries_src, CPPPATH=['.'])
# Combine the experimental JavaScript library files into a C++ file
# and compile it.
experimental_library_files = [ s for s in EXPERIMENTAL_LIBRARY_FILES ]
experimental_library_files.append('macros.py')
experimental_libraries_src = env.JS2C(['experimental-libraries.cc'],
experimental_library_files,
**BuildJS2CEnv('EXPERIMENTAL'))
experimental_libraries_obj = context.ConfigureObject(env, experimental_libraries_src, CPPPATH=['.'])
source_objs = context.ConfigureObject(env, source_files) source_objs = context.ConfigureObject(env, source_files)
non_snapshot_files = [source_objs] non_snapshot_files = [source_objs]
preparser_source_files = context.GetRelevantSources(PREPARSER_SOURCES)
preparser_objs = context.ConfigureObject(env, preparser_source_files)
# Create snapshot if necessary. For cross compilation you should either # Create snapshot if necessary. For cross compilation you should either
# do without snapshots and take the performance hit or you should build a # do without snapshots and take the performance hit or you should build a
# host VM with the simulator=arm and snapshot=on options and then take the # host VM with the simulator=arm and snapshot=on options and then take the
@ -362,7 +340,7 @@ def ConfigureObjectFiles():
mksnapshot_env = env.Copy() mksnapshot_env = env.Copy()
mksnapshot_env.Replace(**context.flags['mksnapshot']) mksnapshot_env.Replace(**context.flags['mksnapshot'])
mksnapshot_src = 'mksnapshot.cc' mksnapshot_src = 'mksnapshot.cc'
mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, experimental_libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb') mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
if context.use_snapshot: if context.use_snapshot:
if context.build_snapshot: if context.build_snapshot:
snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath) snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
@ -371,9 +349,9 @@ def ConfigureObjectFiles():
snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.']) snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
else: else:
snapshot_obj = empty_snapshot_obj snapshot_obj = empty_snapshot_obj
library_objs = [non_snapshot_files, libraries_obj, experimental_libraries_obj, snapshot_obj] library_objs = [non_snapshot_files, libraries_obj, snapshot_obj]
return (library_objs, d8_objs, [mksnapshot], preparser_objs) return (library_objs, d8_objs, [mksnapshot])
(library_objs, d8_objs, mksnapshot, preparser_objs) = ConfigureObjectFiles() (library_objs, d8_objs, mksnapshot) = ConfigureObjectFiles()
Return('library_objs d8_objs mksnapshot preparser_objs') Return('library_objs d8_objs mksnapshot')

307
deps/v8/src/accessors.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -32,9 +32,9 @@
#include "deoptimizer.h" #include "deoptimizer.h"
#include "execution.h" #include "execution.h"
#include "factory.h" #include "factory.h"
#include "list-inl.h"
#include "safepoint-table.h" #include "safepoint-table.h"
#include "scopeinfo.h" #include "scopeinfo.h"
#include "top.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -43,9 +43,8 @@ namespace internal {
template <class C> template <class C>
static C* FindInPrototypeChain(Object* obj, bool* found_it) { static C* FindInPrototypeChain(Object* obj, bool* found_it) {
ASSERT(!*found_it); ASSERT(!*found_it);
Heap* heap = HEAP;
while (!Is<C>(obj)) { while (!Is<C>(obj)) {
if (obj == heap->null_value()) return NULL; if (obj == Heap::null_value()) return NULL;
obj = obj->GetPrototype(); obj = obj->GetPrototype();
} }
*found_it = true; *found_it = true;
@ -91,34 +90,24 @@ MaybeObject* Accessors::ArrayGetLength(Object* object, void*) {
Object* Accessors::FlattenNumber(Object* value) { Object* Accessors::FlattenNumber(Object* value) {
if (value->IsNumber() || !value->IsJSValue()) return value; if (value->IsNumber() || !value->IsJSValue()) return value;
JSValue* wrapper = JSValue::cast(value); JSValue* wrapper = JSValue::cast(value);
ASSERT(Isolate::Current()->context()->global_context()->number_function()-> ASSERT(
has_initial_map()); Top::context()->global_context()->number_function()->has_initial_map());
Map* number_map = Isolate::Current()->context()->global_context()-> Map* number_map =
number_function()->initial_map(); Top::context()->global_context()->number_function()->initial_map();
if (wrapper->map() == number_map) return wrapper->value(); if (wrapper->map() == number_map) return wrapper->value();
return value; return value;
} }
MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) { MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
Isolate* isolate = object->GetIsolate();
// This means one of the object's prototypes is a JSArray and the
// object does not have a 'length' property. Calling SetProperty
// causes an infinite loop.
if (!object->IsJSArray()) {
return object->SetLocalPropertyIgnoreAttributes(
isolate->heap()->length_symbol(), value, NONE);
}
value = FlattenNumber(value); value = FlattenNumber(value);
// Need to call methods that may trigger GC. // Need to call methods that may trigger GC.
HandleScope scope(isolate); HandleScope scope;
// Protect raw pointers. // Protect raw pointers.
Handle<JSObject> object_handle(object, isolate); Handle<JSObject> object_handle(object);
Handle<Object> value_handle(value, isolate); Handle<Object> value_handle(value);
bool has_exception; bool has_exception;
Handle<Object> uint32_v = Execution::ToUint32(value_handle, &has_exception); Handle<Object> uint32_v = Execution::ToUint32(value_handle, &has_exception);
@ -126,11 +115,22 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
Handle<Object> number_v = Execution::ToNumber(value_handle, &has_exception); Handle<Object> number_v = Execution::ToNumber(value_handle, &has_exception);
if (has_exception) return Failure::Exception(); if (has_exception) return Failure::Exception();
// Restore raw pointers,
object = *object_handle;
value = *value_handle;
if (uint32_v->Number() == number_v->Number()) { if (uint32_v->Number() == number_v->Number()) {
return Handle<JSArray>::cast(object_handle)->SetElementsLength(*uint32_v); if (object->IsJSArray()) {
return JSArray::cast(object)->SetElementsLength(*uint32_v);
} else {
// This means one of the object's prototypes is a JSArray and
// the object does not have a 'length' property.
// Calling SetProperty causes an infinite loop.
return object->SetLocalPropertyIgnoreAttributes(Heap::length_symbol(),
value, NONE);
}
} }
return isolate->Throw( return Top::Throw(*Factory::NewRangeError("invalid_array_length",
*isolate->factory()->NewRangeError("invalid_array_length",
HandleVector<Object>(NULL, 0))); HandleVector<Object>(NULL, 0)));
} }
@ -314,18 +314,15 @@ const AccessorDescriptor Accessors::ScriptCompilationType = {
MaybeObject* Accessors::ScriptGetLineEnds(Object* object, void*) { MaybeObject* Accessors::ScriptGetLineEnds(Object* object, void*) {
JSValue* wrapper = JSValue::cast(object); HandleScope scope;
Isolate* isolate = wrapper->GetIsolate(); Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
HandleScope scope(isolate);
Handle<Script> script(Script::cast(wrapper->value()), isolate);
InitScriptLineEnds(script); InitScriptLineEnds(script);
ASSERT(script->line_ends()->IsFixedArray()); ASSERT(script->line_ends()->IsFixedArray());
Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends())); Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
// We do not want anyone to modify this array from JS. // We do not want anyone to modify this array from JS.
ASSERT(*line_ends == isolate->heap()->empty_fixed_array() || ASSERT(*line_ends == Heap::empty_fixed_array() ||
line_ends->map() == isolate->heap()->fixed_cow_array_map()); line_ends->map() == Heap::fixed_cow_array_map());
Handle<JSArray> js_array = Handle<JSArray> js_array = Factory::NewJSArrayWithElements(line_ends);
isolate->factory()->NewJSArrayWithElements(line_ends);
return *js_array; return *js_array;
} }
@ -371,7 +368,7 @@ MaybeObject* Accessors::ScriptGetEvalFromScript(Object* object, void*) {
return *GetScriptWrapper(eval_from_script); return *GetScriptWrapper(eval_from_script);
} }
} }
return HEAP->undefined_value(); return Heap::undefined_value();
} }
@ -394,7 +391,7 @@ MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Object* object, void*) {
// If this is not a script compiled through eval there is no eval position. // If this is not a script compiled through eval there is no eval position.
int compilation_type = Smi::cast(script->compilation_type())->value(); int compilation_type = Smi::cast(script->compilation_type())->value();
if (compilation_type != Script::COMPILATION_TYPE_EVAL) { if (compilation_type != Script::COMPILATION_TYPE_EVAL) {
return HEAP->undefined_value(); return Heap::undefined_value();
} }
// Get the function from where eval was called and find the source position // Get the function from where eval was called and find the source position
@ -446,10 +443,9 @@ const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) { MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
Heap* heap = Isolate::Current()->heap();
bool found_it = false; bool found_it = false;
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it); JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return heap->undefined_value(); if (!found_it) return Heap::undefined_value();
while (!function->should_have_prototype()) { while (!function->should_have_prototype()) {
found_it = false; found_it = false;
function = FindInPrototypeChain<JSFunction>(object->GetPrototype(), function = FindInPrototypeChain<JSFunction>(object->GetPrototype(),
@ -460,7 +456,7 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
if (!function->has_prototype()) { if (!function->has_prototype()) {
Object* prototype; Object* prototype;
{ MaybeObject* maybe_prototype = heap->AllocateFunctionPrototype(function); { MaybeObject* maybe_prototype = Heap::AllocateFunctionPrototype(function);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype; if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
} }
Object* result; Object* result;
@ -475,13 +471,12 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
MaybeObject* Accessors::FunctionSetPrototype(JSObject* object, MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
Object* value, Object* value,
void*) { void*) {
Heap* heap = object->GetHeap();
bool found_it = false; bool found_it = false;
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it); JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return heap->undefined_value(); if (!found_it) return Heap::undefined_value();
if (!function->should_have_prototype()) { if (!function->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property. // Since we hit this accessor, object will have no prototype property.
return object->SetLocalPropertyIgnoreAttributes(heap->prototype_symbol(), return object->SetLocalPropertyIgnoreAttributes(Heap::prototype_symbol(),
value, value,
NONE); NONE);
} }
@ -550,7 +545,7 @@ const AccessorDescriptor Accessors::FunctionLength = {
MaybeObject* Accessors::FunctionGetName(Object* object, void*) { MaybeObject* Accessors::FunctionGetName(Object* object, void*) {
bool found_it = false; bool found_it = false;
JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it); JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return HEAP->undefined_value(); if (!found_it) return Heap::undefined_value();
return holder->shared()->name(); return holder->shared()->name();
} }
@ -566,20 +561,183 @@ const AccessorDescriptor Accessors::FunctionName = {
// Accessors::FunctionArguments // Accessors::FunctionArguments
// //
static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
if (slot_index >= 0) {
const int offset = JavaScriptFrameConstants::kLocal0Offset;
return frame->fp() + offset - (slot_index * kPointerSize);
} else {
const int offset = JavaScriptFrameConstants::kSavedRegistersOffset;
return frame->fp() + offset - ((slot_index + 1) * kPointerSize);
}
}
// We can't intermix stack decoding and allocations because
// deoptimization infrastracture is not GC safe.
// Thus we build a temporary structure in malloced space.
class SlotRef BASE_EMBEDDED {
public:
enum SlotRepresentation {
UNKNOWN,
TAGGED,
INT32,
DOUBLE,
LITERAL
};
SlotRef()
: addr_(NULL), representation_(UNKNOWN) { }
SlotRef(Address addr, SlotRepresentation representation)
: addr_(addr), representation_(representation) { }
explicit SlotRef(Object* literal)
: literal_(literal), representation_(LITERAL) { }
Handle<Object> GetValue() {
switch (representation_) {
case TAGGED:
return Handle<Object>(Memory::Object_at(addr_));
case INT32: {
int value = Memory::int32_at(addr_);
if (Smi::IsValid(value)) {
return Handle<Object>(Smi::FromInt(value));
} else {
return Factory::NewNumberFromInt(value);
}
}
case DOUBLE: {
double value = Memory::double_at(addr_);
return Factory::NewNumber(value);
}
case LITERAL:
return literal_;
default:
UNREACHABLE();
return Handle<Object>::null();
}
}
private:
Address addr_;
Handle<Object> literal_;
SlotRepresentation representation_;
};
static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
DeoptimizationInputData* data,
JavaScriptFrame* frame) {
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
switch (opcode) {
case Translation::BEGIN:
case Translation::FRAME:
// Peeled off before getting here.
break;
case Translation::ARGUMENTS_OBJECT:
// This can be only emitted for local slots not for argument slots.
break;
case Translation::REGISTER:
case Translation::INT32_REGISTER:
case Translation::DOUBLE_REGISTER:
case Translation::DUPLICATE:
// We are at safepoint which corresponds to call. All registers are
// saved by caller so there would be no live registers at this
// point. Thus these translation commands should not be used.
break;
case Translation::STACK_SLOT: {
int slot_index = iterator->Next();
Address slot_addr = SlotAddress(frame, slot_index);
return SlotRef(slot_addr, SlotRef::TAGGED);
}
case Translation::INT32_STACK_SLOT: {
int slot_index = iterator->Next();
Address slot_addr = SlotAddress(frame, slot_index);
return SlotRef(slot_addr, SlotRef::INT32);
}
case Translation::DOUBLE_STACK_SLOT: {
int slot_index = iterator->Next();
Address slot_addr = SlotAddress(frame, slot_index);
return SlotRef(slot_addr, SlotRef::DOUBLE);
}
case Translation::LITERAL: {
int literal_index = iterator->Next();
return SlotRef(data->LiteralArray()->get(literal_index));
}
}
UNREACHABLE();
return SlotRef();
}
static void ComputeSlotMappingForArguments(JavaScriptFrame* frame,
int inlined_frame_index,
Vector<SlotRef>* args_slots) {
AssertNoAllocation no_gc;
int deopt_index = AstNode::kNoNumber;
DeoptimizationInputData* data =
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::BEGIN);
int frame_count = it.Next();
USE(frame_count);
ASSERT(frame_count > inlined_frame_index);
int frames_to_skip = inlined_frame_index;
while (true) {
opcode = static_cast<Translation::Opcode>(it.Next());
// Skip over operands to advance to the next opcode.
it.Skip(Translation::NumberOfOperandsFor(opcode));
if (opcode == Translation::FRAME) {
if (frames_to_skip == 0) {
// We reached the frame corresponding to the inlined function
// in question. Process the translation commands for the
// arguments.
//
// Skip the translation command for the receiver.
it.Skip(Translation::NumberOfOperandsFor(
static_cast<Translation::Opcode>(it.Next())));
// Compute slots for arguments.
for (int i = 0; i < args_slots->length(); ++i) {
(*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
}
return;
}
frames_to_skip--;
}
}
UNREACHABLE();
}
static MaybeObject* ConstructArgumentsObjectForInlinedFunction( static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
JavaScriptFrame* frame, JavaScriptFrame* frame,
Handle<JSFunction> inlined_function, Handle<JSFunction> inlined_function,
int inlined_frame_index) { int inlined_frame_index) {
Factory* factory = Isolate::Current()->factory();
int args_count = inlined_function->shared()->formal_parameter_count(); int args_count = inlined_function->shared()->formal_parameter_count();
ScopedVector<SlotRef> args_slots(args_count); ScopedVector<SlotRef> args_slots(args_count);
SlotRef::ComputeSlotMappingForArguments(frame, ComputeSlotMappingForArguments(frame, inlined_frame_index, &args_slots);
inlined_frame_index,
&args_slots);
Handle<JSObject> arguments = Handle<JSObject> arguments =
factory->NewArgumentsObject(inlined_function, args_count); Factory::NewArgumentsObject(inlined_function, args_count);
Handle<FixedArray> array = factory->NewFixedArray(args_count); Handle<FixedArray> array = Factory::NewFixedArray(args_count);
for (int i = 0; i < args_count; ++i) { for (int i = 0; i < args_count; ++i) {
Handle<Object> value = args_slots[i].GetValue(); Handle<Object> value = args_slots[i].GetValue();
array->set(i, *value); array->set(i, *value);
@ -592,16 +750,15 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) { MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
Isolate* isolate = Isolate::Current(); HandleScope scope;
HandleScope scope(isolate);
bool found_it = false; bool found_it = false;
JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it); JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return isolate->heap()->undefined_value(); if (!found_it) return Heap::undefined_value();
Handle<JSFunction> function(holder, isolate); Handle<JSFunction> function(holder);
// Find the top invocation of the function by traversing frames. // Find the top invocation of the function by traversing frames.
List<JSFunction*> functions(2); List<JSFunction*> functions(2);
for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) { for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame(); JavaScriptFrame* frame = it.frame();
frame->GetFunctions(&functions); frame->GetFunctions(&functions);
for (int i = functions.length() - 1; i >= 0; i--) { for (int i = functions.length() - 1; i >= 0; i--) {
@ -619,9 +776,9 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
if (!frame->is_optimized()) { if (!frame->is_optimized()) {
// If there is an arguments variable in the stack, we return that. // If there is an arguments variable in the stack, we return that.
Handle<SerializedScopeInfo> info(function->shared()->scope_info()); Handle<SerializedScopeInfo> info(function->shared()->scope_info());
int index = info->StackSlotIndex(isolate->heap()->arguments_symbol()); int index = info->StackSlotIndex(Heap::arguments_symbol());
if (index >= 0) { if (index >= 0) {
Handle<Object> arguments(frame->GetExpression(index), isolate); Handle<Object> arguments(frame->GetExpression(index));
if (!arguments->IsArgumentsMarker()) return *arguments; if (!arguments->IsArgumentsMarker()) return *arguments;
} }
} }
@ -635,13 +792,15 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
// Get the number of arguments and construct an arguments object // Get the number of arguments and construct an arguments object
// mirror for the right frame. // mirror for the right frame.
const int length = frame->ComputeParametersCount(); const int length = frame->ComputeParametersCount();
Handle<JSObject> arguments = isolate->factory()->NewArgumentsObject( Handle<JSObject> arguments = Factory::NewArgumentsObject(function,
function, length); length);
Handle<FixedArray> array = isolate->factory()->NewFixedArray(length); Handle<FixedArray> array = Factory::NewFixedArray(length);
// Copy the parameters to the arguments object. // Copy the parameters to the arguments object.
ASSERT(array->length() == length); ASSERT(array->length() == length);
for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i)); for (int i = 0; i < length; i++) {
array->set(i, frame->GetParameter(i));
}
arguments->set_elements(*array); arguments->set_elements(*array);
// Return the freshly allocated arguments object. // Return the freshly allocated arguments object.
@ -651,7 +810,7 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
} }
// No frame corresponding to the given function found. Return null. // No frame corresponding to the given function found. Return null.
return isolate->heap()->null_value(); return Heap::null_value();
} }
@ -667,30 +826,16 @@ const AccessorDescriptor Accessors::FunctionArguments = {
// //
static MaybeObject* CheckNonStrictCallerOrThrow(
Isolate* isolate,
JSFunction* caller) {
DisableAssertNoAllocation enable_allocation;
if (caller->shared()->strict_mode()) {
return isolate->Throw(
*isolate->factory()->NewTypeError("strict_caller",
HandleVector<Object>(NULL, 0)));
}
return caller;
}
MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) { MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
Isolate* isolate = Isolate::Current(); HandleScope scope;
HandleScope scope(isolate);
AssertNoAllocation no_alloc; AssertNoAllocation no_alloc;
bool found_it = false; bool found_it = false;
JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it); JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return isolate->heap()->undefined_value(); if (!found_it) return Heap::undefined_value();
Handle<JSFunction> function(holder, isolate); Handle<JSFunction> function(holder);
List<JSFunction*> functions(2); List<JSFunction*> functions(2);
for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) { for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame(); JavaScriptFrame* frame = it.frame();
frame->GetFunctions(&functions); frame->GetFunctions(&functions);
for (int i = functions.length() - 1; i >= 0; i--) { for (int i = functions.length() - 1; i >= 0; i--) {
@ -700,18 +845,18 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
// frames, e.g. frames for scripts not functions. // frames, e.g. frames for scripts not functions.
if (i > 0) { if (i > 0) {
ASSERT(!functions[i - 1]->shared()->is_toplevel()); ASSERT(!functions[i - 1]->shared()->is_toplevel());
return CheckNonStrictCallerOrThrow(isolate, functions[i - 1]); return functions[i - 1];
} else { } else {
for (it.Advance(); !it.done(); it.Advance()) { for (it.Advance(); !it.done(); it.Advance()) {
frame = it.frame(); frame = it.frame();
functions.Rewind(0); functions.Rewind(0);
frame->GetFunctions(&functions); frame->GetFunctions(&functions);
if (!functions.last()->shared()->is_toplevel()) { if (!functions.last()->shared()->is_toplevel()) {
return CheckNonStrictCallerOrThrow(isolate, functions.last()); return functions.last();
} }
ASSERT(functions.length() == 1); ASSERT(functions.length() == 1);
} }
if (it.done()) return isolate->heap()->null_value(); if (it.done()) return Heap::null_value();
break; break;
} }
} }
@ -720,7 +865,7 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
} }
// No frame corresponding to the given function found. Return null. // No frame corresponding to the given function found. Return null.
return isolate->heap()->null_value(); return Heap::null_value();
} }

2
deps/v8/src/accessors.h

@ -28,8 +28,6 @@
#ifndef V8_ACCESSORS_H_ #ifndef V8_ACCESSORS_H_
#define V8_ACCESSORS_H_ #define V8_ACCESSORS_H_
#include "allocation.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {

82
deps/v8/src/allocation.cc

@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
#include "../include/v8stdint.h" #include "../include/v8stdint.h"
#include "globals.h" #include "globals.h"
#include "checks.h" #include "checks.h"
@ -35,6 +37,7 @@ namespace v8 {
namespace internal { namespace internal {
void* Malloced::New(size_t size) { void* Malloced::New(size_t size) {
ASSERT(NativeAllocationChecker::allocation_allowed());
void* result = malloc(size); void* result = malloc(size);
if (result == NULL) { if (result == NULL) {
v8::internal::FatalProcessOutOfMemory("Malloced operator new"); v8::internal::FatalProcessOutOfMemory("Malloced operator new");
@ -100,6 +103,85 @@ char* StrNDup(const char* str, int n) {
} }
int NativeAllocationChecker::allocation_disallowed_ = 0;
PreallocatedStorage PreallocatedStorage::in_use_list_(0);
PreallocatedStorage PreallocatedStorage::free_list_(0);
bool PreallocatedStorage::preallocated_ = false;
void PreallocatedStorage::Init(size_t size) {
ASSERT(free_list_.next_ == &free_list_);
ASSERT(free_list_.previous_ == &free_list_);
PreallocatedStorage* free_chunk =
reinterpret_cast<PreallocatedStorage*>(new char[size]);
free_list_.next_ = free_list_.previous_ = free_chunk;
free_chunk->next_ = free_chunk->previous_ = &free_list_;
free_chunk->size_ = size - sizeof(PreallocatedStorage);
preallocated_ = true;
}
void* PreallocatedStorage::New(size_t size) {
if (!preallocated_) {
return FreeStoreAllocationPolicy::New(size);
}
ASSERT(free_list_.next_ != &free_list_);
ASSERT(free_list_.previous_ != &free_list_);
size = (size + kPointerSize - 1) & ~(kPointerSize - 1);
// Search for exact fit.
for (PreallocatedStorage* storage = free_list_.next_;
storage != &free_list_;
storage = storage->next_) {
if (storage->size_ == size) {
storage->Unlink();
storage->LinkTo(&in_use_list_);
return reinterpret_cast<void*>(storage + 1);
}
}
// Search for first fit.
for (PreallocatedStorage* storage = free_list_.next_;
storage != &free_list_;
storage = storage->next_) {
if (storage->size_ >= size + sizeof(PreallocatedStorage)) {
storage->Unlink();
storage->LinkTo(&in_use_list_);
PreallocatedStorage* left_over =
reinterpret_cast<PreallocatedStorage*>(
reinterpret_cast<char*>(storage + 1) + size);
left_over->size_ = storage->size_ - size - sizeof(PreallocatedStorage);
ASSERT(size + left_over->size_ + sizeof(PreallocatedStorage) ==
storage->size_);
storage->size_ = size;
left_over->LinkTo(&free_list_);
return reinterpret_cast<void*>(storage + 1);
}
}
// Allocation failure.
ASSERT(false);
return NULL;
}
// We don't attempt to coalesce.
void PreallocatedStorage::Delete(void* p) {
if (p == NULL) {
return;
}
if (!preallocated_) {
FreeStoreAllocationPolicy::Delete(p);
return;
}
PreallocatedStorage* storage = reinterpret_cast<PreallocatedStorage*>(p) - 1;
ASSERT(storage->next_->previous_ == storage);
ASSERT(storage->previous_->next_ == storage);
storage->Unlink();
storage->LinkTo(&free_list_);
}
void PreallocatedStorage::LinkTo(PreallocatedStorage* other) { void PreallocatedStorage::LinkTo(PreallocatedStorage* other) {
next_ = other->next_; next_ = other->next_;
other->next_->previous_ = this; other->next_->previous_ = this;

51
deps/v8/src/allocation.h

@ -39,6 +39,38 @@ namespace internal {
// processing. // processing.
void FatalProcessOutOfMemory(const char* message); void FatalProcessOutOfMemory(const char* message);
// A class that controls whether allocation is allowed. This is for
// the C++ heap only!
class NativeAllocationChecker {
public:
typedef enum { ALLOW, DISALLOW } NativeAllocationAllowed;
explicit inline NativeAllocationChecker(NativeAllocationAllowed allowed)
: allowed_(allowed) {
#ifdef DEBUG
if (allowed == DISALLOW) {
allocation_disallowed_++;
}
#endif
}
~NativeAllocationChecker() {
#ifdef DEBUG
if (allowed_ == DISALLOW) {
allocation_disallowed_--;
}
#endif
ASSERT(allocation_disallowed_ >= 0);
}
static inline bool allocation_allowed() {
return allocation_disallowed_ == 0;
}
private:
// This static counter ensures that NativeAllocationCheckers can be nested.
static int allocation_disallowed_;
// This flag applies to this particular instance.
NativeAllocationAllowed allowed_;
};
// Superclass for classes managed with new & delete. // Superclass for classes managed with new & delete.
class Malloced { class Malloced {
public: public:
@ -82,6 +114,7 @@ class AllStatic {
template <typename T> template <typename T>
static T* NewArray(int size) { static T* NewArray(int size) {
ASSERT(NativeAllocationChecker::allocation_allowed());
T* result = new T[size]; T* result = new T[size];
if (result == NULL) Malloced::FatalProcessOutOfMemory(); if (result == NULL) Malloced::FatalProcessOutOfMemory();
return result; return result;
@ -113,27 +146,27 @@ class FreeStoreAllocationPolicy {
// Allocation policy for allocating in preallocated space. // Allocation policy for allocating in preallocated space.
// Used as an allocation policy for ScopeInfo when generating // Used as an allocation policy for ScopeInfo when generating
// stack traces. // stack traces.
class PreallocatedStorage { class PreallocatedStorage : public AllStatic {
public: public:
explicit PreallocatedStorage(size_t size); explicit PreallocatedStorage(size_t size);
size_t size() { return size_; } size_t size() { return size_; }
static void* New(size_t size);
static void Delete(void* p);
// TODO(isolates): Get rid of these-- we'll have to change the allocator // Preallocate a set number of bytes.
// interface to include a pointer to an isolate to do this static void Init(size_t size);
// efficiently.
static inline void* New(size_t size);
static inline void Delete(void* p);
private: private:
size_t size_; size_t size_;
PreallocatedStorage* previous_; PreallocatedStorage* previous_;
PreallocatedStorage* next_; PreallocatedStorage* next_;
static bool preallocated_;
static PreallocatedStorage in_use_list_;
static PreallocatedStorage free_list_;
void LinkTo(PreallocatedStorage* other); void LinkTo(PreallocatedStorage* other);
void Unlink(); void Unlink();
friend class Isolate;
DISALLOW_IMPLICIT_CONSTRUCTORS(PreallocatedStorage); DISALLOW_IMPLICIT_CONSTRUCTORS(PreallocatedStorage);
}; };

3440
deps/v8/src/api.cc

File diff suppressed because it is too large

111
deps/v8/src/api.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -53,8 +53,8 @@ class Consts {
class NeanderObject { class NeanderObject {
public: public:
explicit NeanderObject(int size); explicit NeanderObject(int size);
explicit inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj); inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
explicit inline NeanderObject(v8::internal::Object* obj); inline NeanderObject(v8::internal::Object* obj);
inline v8::internal::Object* get(int index); inline v8::internal::Object* get(int index);
inline void set(int index, v8::internal::Object* value); inline void set(int index, v8::internal::Object* value);
inline v8::internal::Handle<v8::internal::JSObject> value() { return value_; } inline v8::internal::Handle<v8::internal::JSObject> value() { return value_; }
@ -69,7 +69,7 @@ class NeanderObject {
class NeanderArray { class NeanderArray {
public: public:
NeanderArray(); NeanderArray();
explicit inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj); inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
inline v8::internal::Handle<v8::internal::JSObject> value() { inline v8::internal::Handle<v8::internal::JSObject> value() {
return obj_.value(); return obj_.value();
} }
@ -115,14 +115,14 @@ void NeanderObject::set(int offset, v8::internal::Object* value) {
template <typename T> static inline T ToCData(v8::internal::Object* obj) { template <typename T> static inline T ToCData(v8::internal::Object* obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address)); STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
return reinterpret_cast<T>( return reinterpret_cast<T>(
reinterpret_cast<intptr_t>(v8::internal::Foreign::cast(obj)->address())); reinterpret_cast<intptr_t>(v8::internal::Proxy::cast(obj)->proxy()));
} }
template <typename T> template <typename T>
static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) { static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address)); STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
return FACTORY->NewForeign( return v8::internal::Factory::NewProxy(
reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj))); reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
} }
@ -157,6 +157,7 @@ class RegisteredExtension {
RegisteredExtension* next_auto_; RegisteredExtension* next_auto_;
ExtensionTraversalState state_; ExtensionTraversalState state_;
static RegisteredExtension* first_extension_; static RegisteredExtension* first_extension_;
static RegisteredExtension* first_auto_extension_;
}; };
@ -182,7 +183,7 @@ class Utils {
static inline Local<Array> ToLocal( static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj); v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<External> ToLocal( static inline Local<External> ToLocal(
v8::internal::Handle<v8::internal::Foreign> obj); v8::internal::Handle<v8::internal::Proxy> obj);
static inline Local<Message> MessageToLocal( static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj); v8::internal::Handle<v8::internal::Object> obj);
static inline Local<StackTrace> StackTraceToLocal( static inline Local<StackTrace> StackTraceToLocal(
@ -236,7 +237,7 @@ class Utils {
OpenHandle(const v8::Signature* sig); OpenHandle(const v8::Signature* sig);
static inline v8::internal::Handle<v8::internal::TypeSwitchInfo> static inline v8::internal::Handle<v8::internal::TypeSwitchInfo>
OpenHandle(const v8::TypeSwitch* that); OpenHandle(const v8::TypeSwitch* that);
static inline v8::internal::Handle<v8::internal::Foreign> static inline v8::internal::Handle<v8::internal::Proxy>
OpenHandle(const v8::External* that); OpenHandle(const v8::External* that);
}; };
@ -273,7 +274,7 @@ MAKE_TO_LOCAL(ToLocal, String, String)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp) MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object) MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array) MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, Foreign, External) MAKE_TO_LOCAL(ToLocal, Proxy, External)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate) MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate) MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature) MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
@ -311,7 +312,7 @@ MAKE_OPEN_HANDLE(Script, Object)
MAKE_OPEN_HANDLE(Function, JSFunction) MAKE_OPEN_HANDLE(Function, JSFunction)
MAKE_OPEN_HANDLE(Message, JSObject) MAKE_OPEN_HANDLE(Message, JSObject)
MAKE_OPEN_HANDLE(Context, Context) MAKE_OPEN_HANDLE(Context, Context)
MAKE_OPEN_HANDLE(External, Foreign) MAKE_OPEN_HANDLE(External, Proxy)
MAKE_OPEN_HANDLE(StackTrace, JSArray) MAKE_OPEN_HANDLE(StackTrace, JSArray)
MAKE_OPEN_HANDLE(StackFrame, JSObject) MAKE_OPEN_HANDLE(StackFrame, JSObject)
@ -320,101 +321,36 @@ MAKE_OPEN_HANDLE(StackFrame, JSObject)
namespace internal { namespace internal {
// Tracks string usage to help make better decisions when
// externalizing strings.
//
// Implementation note: internally this class only tracks fresh
// strings and keeps a single use counter for them.
class StringTracker {
public:
// Records that the given string's characters were copied to some
// external buffer. If this happens often we should honor
// externalization requests for the string.
void RecordWrite(Handle<String> string) {
Address address = reinterpret_cast<Address>(*string);
Address top = isolate_->heap()->NewSpaceTop();
if (IsFreshString(address, top)) {
IncrementUseCount(top);
}
}
// Estimates freshness and use frequency of the given string based
// on how close it is to the new space top and the recorded usage
// history.
inline bool IsFreshUnusedString(Handle<String> string) {
Address address = reinterpret_cast<Address>(*string);
Address top = isolate_->heap()->NewSpaceTop();
return IsFreshString(address, top) && IsUseCountLow(top);
}
private:
StringTracker() : use_count_(0), last_top_(NULL), isolate_(NULL) { }
static inline bool IsFreshString(Address string, Address top) {
return top - kFreshnessLimit <= string && string <= top;
}
inline bool IsUseCountLow(Address top) {
if (last_top_ != top) return true;
return use_count_ < kUseLimit;
}
inline void IncrementUseCount(Address top) {
if (last_top_ != top) {
use_count_ = 0;
last_top_ = top;
}
++use_count_;
}
// Single use counter shared by all fresh strings.
int use_count_;
// Last new space top when the use count above was valid.
Address last_top_;
Isolate* isolate_;
// How close to the new space top a fresh string has to be.
static const int kFreshnessLimit = 1024;
// The number of uses required to consider a string useful.
static const int kUseLimit = 32;
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(StringTracker);
};
// This class is here in order to be able to declare it a friend of // This class is here in order to be able to declare it a friend of
// HandleScope. Moving these methods to be members of HandleScope would be // HandleScope. Moving these methods to be members of HandleScope would be
// neat in some ways, but it would expose internal implementation details in // neat in some ways, but it would expose external implementation details in
// our public header file, which is undesirable. // our public header file, which is undesirable.
// //
// An isolate has a single instance of this class to hold the current thread's // There is a singleton instance of this class to hold the per-thread data.
// data. In multithreaded V8 programs this data is copied in and out of storage // For multithreaded V8 programs this data is copied in and out of storage
// so that the currently executing thread always has its own copy of this // so that the currently executing thread always has its own copy of this
// data. // data.
class HandleScopeImplementer { class HandleScopeImplementer {
public: public:
explicit HandleScopeImplementer(Isolate* isolate)
: isolate_(isolate), HandleScopeImplementer()
blocks_(0), : blocks_(0),
entered_contexts_(0), entered_contexts_(0),
saved_contexts_(0), saved_contexts_(0),
spare_(NULL), spare_(NULL),
ignore_out_of_memory_(false), ignore_out_of_memory_(false),
call_depth_(0) { } call_depth_(0) { }
static HandleScopeImplementer* instance();
// Threading support for handle data. // Threading support for handle data.
static int ArchiveSpacePerThread(); static int ArchiveSpacePerThread();
char* RestoreThread(char* from); static char* RestoreThread(char* from);
char* ArchiveThread(char* to); static char* ArchiveThread(char* to);
void FreeThreadResources(); static void FreeThreadResources();
// Garbage collection support. // Garbage collection support.
void Iterate(v8::internal::ObjectVisitor* v); static void Iterate(v8::internal::ObjectVisitor* v);
static char* Iterate(v8::internal::ObjectVisitor* v, char* data); static char* Iterate(v8::internal::ObjectVisitor* v, char* data);
@ -466,7 +402,6 @@ class HandleScopeImplementer {
ASSERT(call_depth_ == 0); ASSERT(call_depth_ == 0);
} }
Isolate* isolate_;
List<internal::Object**> blocks_; List<internal::Object**> blocks_;
// Used as a stack to keep track of entered contexts. // Used as a stack to keep track of entered contexts.
List<Handle<Object> > entered_contexts_; List<Handle<Object> > entered_contexts_;

8
deps/v8/src/apinatives.js

@ -73,15 +73,7 @@ function InstantiateFunction(data, name) {
if (name) %FunctionSetName(fun, name); if (name) %FunctionSetName(fun, name);
cache[serialNumber] = fun; cache[serialNumber] = fun;
var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset); var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
var attributes = %GetTemplateField(data, kApiPrototypeAttributesOffset);
if (attributes != NONE) {
%IgnoreAttributesAndSetProperty(
fun, "prototype",
prototype ? Instantiate(prototype) : {},
attributes);
} else {
fun.prototype = prototype ? Instantiate(prototype) : {}; fun.prototype = prototype ? Instantiate(prototype) : {};
}
%SetProperty(fun.prototype, "constructor", fun, DONT_ENUM); %SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
var parent = %GetTemplateField(data, kApiParentTemplateOffset); var parent = %GetTemplateField(data, kApiParentTemplateOffset);
if (parent) { if (parent) {

7
deps/v8/src/apiutils.h

@ -31,6 +31,11 @@
namespace v8 { namespace v8 {
class ImplementationUtilities { class ImplementationUtilities {
public: public:
static v8::Handle<v8::Primitive> Undefined();
static v8::Handle<v8::Primitive> Null();
static v8::Handle<v8::Boolean> True();
static v8::Handle<v8::Boolean> False();
static int GetNameCount(ExtensionConfiguration* that) { static int GetNameCount(ExtensionConfiguration* that) {
return that->name_count_; return that->name_count_;
} }
@ -63,6 +68,8 @@ class ImplementationUtilities {
// to access the HandleScope data. // to access the HandleScope data.
typedef v8::HandleScope::Data HandleScopeData; typedef v8::HandleScope::Data HandleScopeData;
static HandleScopeData* CurrentHandleScope();
#ifdef DEBUG #ifdef DEBUG
static void ZapHandleRange(internal::Object** begin, internal::Object** end); static void ZapHandleRange(internal::Object** begin, internal::Object** end);
#endif #endif

29
deps/v8/src/arguments.h

@ -28,8 +28,6 @@
#ifndef V8_ARGUMENTS_H_ #ifndef V8_ARGUMENTS_H_
#define V8_ARGUMENTS_H_ #define V8_ARGUMENTS_H_
#include "allocation.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -63,18 +61,11 @@ class Arguments BASE_EMBEDDED {
return Handle<S>(reinterpret_cast<S**>(value)); return Handle<S>(reinterpret_cast<S**>(value));
} }
int smi_at(int index) {
return Smi::cast((*this)[index])->value();
}
double number_at(int index) {
return (*this)[index]->Number();
}
// Get the total number of arguments including the receiver. // Get the total number of arguments including the receiver.
int length() const { return length_; } int length() const { return length_; }
Object** arguments() { return arguments_; } Object** arguments() { return arguments_; }
private: private:
int length_; int length_;
Object** arguments_; Object** arguments_;
@ -86,16 +77,15 @@ class Arguments BASE_EMBEDDED {
// can. // can.
class CustomArguments : public Relocatable { class CustomArguments : public Relocatable {
public: public:
inline CustomArguments(Isolate* isolate, inline CustomArguments(Object* data,
Object* data,
Object* self, Object* self,
JSObject* holder) : Relocatable(isolate) { JSObject* holder) {
values_[2] = self; values_[2] = self;
values_[1] = holder; values_[1] = holder;
values_[0] = data; values_[0] = data;
} }
inline explicit CustomArguments(Isolate* isolate) : Relocatable(isolate) { inline CustomArguments() {
#ifdef DEBUG #ifdef DEBUG
for (size_t i = 0; i < ARRAY_SIZE(values_); i++) { for (size_t i = 0; i < ARRAY_SIZE(values_); i++) {
values_[i] = reinterpret_cast<Object*>(kZapValue); values_[i] = reinterpret_cast<Object*>(kZapValue);
@ -110,17 +100,6 @@ class CustomArguments : public Relocatable {
}; };
#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
Type Name(Arguments args, Isolate* isolate)
#define RUNTIME_FUNCTION(Type, Name) \
Type Name(Arguments args, Isolate* isolate)
#define RUNTIME_ARGUMENTS(isolate, args) args, isolate
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_ARGUMENTS_H_ #endif // V8_ARGUMENTS_H_

19
deps/v8/src/arm/assembler-arm-inl.h

@ -203,12 +203,11 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) { } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address()); visitor->VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// TODO(isolates): Get a cached isolate below. } else if (Debug::has_break_points() &&
} else if (((RelocInfo::IsJSReturn(mode) && ((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) || IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) && (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence())) && IsPatchedDebugBreakSlotSequence()))) {
Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this); visitor->VisitDebugTarget(this);
#endif #endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) { } else if (mode == RelocInfo::RUNTIME_ENTRY) {
@ -218,23 +217,23 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
template<typename StaticVisitor> template<typename StaticVisitor>
void RelocInfo::Visit(Heap* heap) { void RelocInfo::Visit() {
RelocInfo::Mode mode = rmode(); RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) { if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitPointer(heap, target_object_address()); StaticVisitor::VisitPointer(target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) { } else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(heap, this); StaticVisitor::VisitCodeTarget(this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) { } else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
StaticVisitor::VisitGlobalPropertyCell(heap, this); StaticVisitor::VisitGlobalPropertyCell(this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) { } else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address()); StaticVisitor::VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
} else if (heap->isolate()->debug()->has_break_points() && } else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) && ((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) || IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) && (RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) { IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(heap, this); StaticVisitor::VisitDebugTarget(this);
#endif #endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) { } else if (mode == RelocInfo::RUNTIME_ENTRY) {
StaticVisitor::VisitRuntimeEntry(this); StaticVisitor::VisitRuntimeEntry(this);

638
deps/v8/src/arm/assembler-arm.cc

@ -32,7 +32,7 @@
// The original source code covered by the above license above has been // The original source code covered by the above license above has been
// modified significantly by Google Inc. // modified significantly by Google Inc.
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
#include "v8.h" #include "v8.h"
@ -44,80 +44,62 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#ifdef DEBUG // Safe default is no features.
bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0; unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::enabled_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_ = 0; unsigned CpuFeatures::found_by_runtime_probing_ = 0;
// Get the CPU features enabled by the build. For cross compilation the #ifdef __arm__
// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP_INSTRUCTIONS
// can be defined to enable ARMv7 and VFPv3 instructions when building the
// snapshot.
static uint64_t CpuFeaturesImpliedByCompiler() { static uint64_t CpuFeaturesImpliedByCompiler() {
uint64_t answer = 0; uint64_t answer = 0;
#ifdef CAN_USE_ARMV7_INSTRUCTIONS #ifdef CAN_USE_ARMV7_INSTRUCTIONS
answer |= 1u << ARMv7; answer |= 1u << ARMv7;
#endif // def CAN_USE_ARMV7_INSTRUCTIONS #endif // def CAN_USE_ARMV7_INSTRUCTIONS
#ifdef CAN_USE_VFP_INSTRUCTIONS
answer |= 1u << VFP3 | 1u << ARMv7;
#endif // def CAN_USE_VFP_INSTRUCTIONS
#ifdef __arm__
// If the compiler is allowed to use VFP then we can use VFP too in our code // If the compiler is allowed to use VFP then we can use VFP too in our code
// generation even when generating snapshots. This won't work for cross // generation even when generating snapshots. This won't work for cross
// compilation. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6. // compilation.
#if defined(__VFP_FP__) && !defined(__SOFTFP__) #if defined(__VFP_FP__) && !defined(__SOFTFP__)
answer |= 1u << VFP3 | 1u << ARMv7; answer |= 1u << VFP3;
#endif // defined(__VFP_FP__) && !defined(__SOFTFP__) #endif // defined(__VFP_FP__) && !defined(__SOFTFP__)
#endif // def __arm__ #ifdef CAN_USE_VFP_INSTRUCTIONS
answer |= 1u << VFP3;
#endif // def CAN_USE_VFP_INSTRUCTIONS
return answer; return answer;
} }
#endif // def __arm__
void CpuFeatures::Probe() { void CpuFeatures::Probe(bool portable) {
ASSERT(!initialized_);
#ifdef DEBUG
initialized_ = true;
#endif
// Get the features implied by the OS and the compiler settings. This is the
// minimal set of features which is also alowed for generated code in the
// snapshot.
supported_ |= OS::CpuFeaturesImpliedByPlatform();
supported_ |= CpuFeaturesImpliedByCompiler();
if (Serializer::enabled()) {
// No probing for features if we might serialize (generate snapshot).
return;
}
#ifndef __arm__ #ifndef __arm__
// For the simulator=arm build, use VFP when FLAG_enable_vfp3 is // For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
// enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
if (FLAG_enable_vfp3) { if (FLAG_enable_vfp3) {
supported_ |= 1u << VFP3 | 1u << ARMv7; supported_ |= 1u << VFP3;
} }
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled // For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
if (FLAG_enable_armv7) { if (FLAG_enable_armv7) {
supported_ |= 1u << ARMv7; supported_ |= 1u << ARMv7;
} }
#else // def __arm__ #else // def __arm__
// Probe for additional features not already known to be available. if (portable && Serializer::enabled()) {
if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) { supported_ |= OS::CpuFeaturesImpliedByPlatform();
// This implementation also sets the VFP flags if runtime supported_ |= CpuFeaturesImpliedByCompiler();
// detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI return; // No features if we might serialize.
// 0406B, page A1-6. }
supported_ |= 1u << VFP3 | 1u << ARMv7;
found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7; if (OS::ArmCpuHasFeature(VFP3)) {
// This implementation also sets the VFP flags if
// runtime detection of VFP returns true.
supported_ |= 1u << VFP3;
found_by_runtime_probing_ |= 1u << VFP3;
} }
if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) { if (OS::ArmCpuHasFeature(ARMv7)) {
supported_ |= 1u << ARMv7; supported_ |= 1u << ARMv7;
found_by_runtime_probing_ |= 1u << ARMv7; found_by_runtime_probing_ |= 1u << ARMv7;
} }
if (!portable) found_by_runtime_probing_ = 0;
#endif #endif
} }
@ -166,7 +148,7 @@ Operand::Operand(Handle<Object> handle) {
rm_ = no_reg; rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space. // Verify all Objects referred by code are NOT in new space.
Object* obj = *handle; Object* obj = *handle;
ASSERT(!HEAP->InNewSpace(obj)); ASSERT(!Heap::InNewSpace(obj));
if (obj->IsHeapObject()) { if (obj->IsHeapObject()) {
imm32_ = reinterpret_cast<intptr_t>(handle.location()); imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT; rmode_ = RelocInfo::EMBEDDED_OBJECT;
@ -284,20 +266,21 @@ const Instr kLdrStrOffsetMask = 0x00000fff;
// Spare buffer. // Spare buffer.
static const int kMinimalBufferSize = 4*KB; static const int kMinimalBufferSize = 4*KB;
static byte* spare_buffer_ = NULL;
Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size) Assembler::Assembler(void* buffer, int buffer_size)
: AssemblerBase(arg_isolate), : positions_recorder_(this),
positions_recorder_(this), allow_peephole_optimization_(false) {
emit_debug_code_(FLAG_debug_code) { allow_peephole_optimization_ = FLAG_peephole_optimization;
if (buffer == NULL) { if (buffer == NULL) {
// Do our own buffer management. // Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) { if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize; buffer_size = kMinimalBufferSize;
if (isolate()->assembler_spare_buffer() != NULL) { if (spare_buffer_ != NULL) {
buffer = isolate()->assembler_spare_buffer(); buffer = spare_buffer_;
isolate()->set_assembler_spare_buffer(NULL); spare_buffer_ = NULL;
} }
} }
if (buffer == NULL) { if (buffer == NULL) {
@ -320,22 +303,20 @@ Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
ASSERT(buffer_ != NULL); ASSERT(buffer_ != NULL);
pc_ = buffer_; pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_); reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
num_pending_reloc_info_ = 0; num_prinfo_ = 0;
next_buffer_check_ = 0; next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0; const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0; no_const_pool_before_ = 0;
first_const_pool_use_ = -1; last_const_pool_end_ = 0;
last_bound_pos_ = 0; last_bound_pos_ = 0;
ast_id_for_reloc_info_ = kNoASTId;
} }
Assembler::~Assembler() { Assembler::~Assembler() {
ASSERT(const_pool_blocked_nesting_ == 0); ASSERT(const_pool_blocked_nesting_ == 0);
if (own_buffer_) { if (own_buffer_) {
if (isolate()->assembler_spare_buffer() == NULL && if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
buffer_size_ == kMinimalBufferSize) { spare_buffer_ = buffer_;
isolate()->set_assembler_spare_buffer(buffer_);
} else { } else {
DeleteArray(buffer_); DeleteArray(buffer_);
} }
@ -346,7 +327,7 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) { void Assembler::GetCode(CodeDesc* desc) {
// Emit constant pool if necessary. // Emit constant pool if necessary.
CheckConstPool(true, false); CheckConstPool(true, false);
ASSERT(num_pending_reloc_info_ == 0); ASSERT(num_prinfo_ == 0);
// Setup code descriptor. // Setup code descriptor.
desc->buffer = buffer_; desc->buffer = buffer_;
@ -786,36 +767,11 @@ bool Operand::must_use_constant_pool() const {
} }
bool Operand::is_single_instruction(Instr instr) const { bool Operand::is_single_instruction() const {
if (rm_.is_valid()) return true; if (rm_.is_valid()) return true;
if (must_use_constant_pool()) return false;
uint32_t dummy1, dummy2; uint32_t dummy1, dummy2;
if (must_use_constant_pool() || return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
!fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or use of
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (must_use_constant_pool() ||
!CpuFeatures::IsSupported(ARMv7)) {
// mov instruction will be an ldr from constant pool (one instruction).
return true;
} else {
// mov instruction will be a mov or movw followed by movt (two
// instructions).
return false;
}
} else {
// If this is not a mov or mvn instruction there will always an additional
// instructions - either mov or ldr. The mov might actually be two
// instructions mov or movw followed by movt so including the actual
// instruction two or three instructions will be generated.
return false;
}
} else {
// No use of constant pool and the immediate operand can be encoded as a
// shifter operand.
return true;
}
} }
@ -838,8 +794,7 @@ void Assembler::addrmod1(Instr instr,
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = Instruction::ConditionField(instr); Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (x.must_use_constant_pool() || if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) {
!CpuFeatures::IsSupported(ARMv7)) {
RecordRelocInfo(x.rmode_, x.imm32_); RecordRelocInfo(x.rmode_, x.imm32_);
ldr(rd, MemOperand(pc, 0), cond); ldr(rd, MemOperand(pc, 0), cond);
} else { } else {
@ -873,7 +828,7 @@ void Assembler::addrmod1(Instr instr,
emit(instr | rn.code()*B16 | rd.code()*B12); emit(instr | rn.code()*B16 | rd.code()*B12);
if (rn.is(pc) || x.rm_.is(pc)) { if (rn.is(pc) || x.rm_.is(pc)) {
// Block constant pool emission for one instruction after reading pc. // Block constant pool emission for one instruction after reading pc.
BlockConstPoolFor(1); BlockConstPoolBefore(pc_offset() + kInstrSize);
} }
} }
@ -997,7 +952,7 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
// Block the emission of the constant pool, since the branch instruction must // Block the emission of the constant pool, since the branch instruction must
// be emitted at the pc offset recorded by the label. // be emitted at the pc offset recorded by the label.
BlockConstPoolFor(1); BlockConstPoolBefore(pc_offset() + kInstrSize);
return target_pos - (pc_offset() + kPcLoadDelta); return target_pos - (pc_offset() + kPcLoadDelta);
} }
@ -1094,6 +1049,20 @@ void Assembler::rsb(Register dst, Register src1, const Operand& src2,
void Assembler::add(Register dst, Register src1, const Operand& src2, void Assembler::add(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) { SBit s, Condition cond) {
addrmod1(cond | ADD | s, src1, dst, src2); addrmod1(cond | ADD | s, src1, dst, src2);
// Eliminate pattern: push(r), pop()
// str(src, MemOperand(sp, 4, NegPreIndex), al);
// add(sp, sp, Operand(kPointerSize));
// Both instructions can be eliminated.
if (can_peephole_optimize(2) &&
// Pattern.
instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
(instr_at(pc_ - 2 * kInstrSize) & ~kRdMask) == kPushRegPattern) {
pc_ -= 2 * kInstrSize;
if (FLAG_print_peephole_optimization) {
PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
}
}
} }
@ -1398,11 +1367,195 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
positions_recorder()->WriteRecordedPositions(); positions_recorder()->WriteRecordedPositions();
} }
addrmod2(cond | B26 | L, dst, src); addrmod2(cond | B26 | L, dst, src);
// Eliminate pattern: push(ry), pop(rx)
// str(ry, MemOperand(sp, 4, NegPreIndex), al)
// ldr(rx, MemOperand(sp, 4, PostIndex), al)
// Both instructions can be eliminated if ry = rx.
// If ry != rx, a register copy from ry to rx is inserted
// after eliminating the push and the pop instructions.
if (can_peephole_optimize(2)) {
Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
if (IsPush(push_instr) && IsPop(pop_instr)) {
if (Instruction::RdValue(pop_instr) != Instruction::RdValue(push_instr)) {
// For consecutive push and pop on different registers,
// we delete both the push & pop and insert a register move.
// push ry, pop rx --> mov rx, ry
Register reg_pushed, reg_popped;
reg_pushed = GetRd(push_instr);
reg_popped = GetRd(pop_instr);
pc_ -= 2 * kInstrSize;
// Insert a mov instruction, which is better than a pair of push & pop
mov(reg_popped, reg_pushed);
if (FLAG_print_peephole_optimization) {
PrintF("%x push/pop (diff reg) replaced by a reg move\n",
pc_offset());
}
} else {
// For consecutive push and pop on the same register,
// both the push and the pop can be deleted.
pc_ -= 2 * kInstrSize;
if (FLAG_print_peephole_optimization) {
PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
}
}
}
}
if (can_peephole_optimize(2)) {
Instr str_instr = instr_at(pc_ - 2 * kInstrSize);
Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize);
if ((IsStrRegFpOffset(str_instr) &&
IsLdrRegFpOffset(ldr_instr)) ||
(IsStrRegFpNegOffset(str_instr) &&
IsLdrRegFpNegOffset(ldr_instr))) {
if ((ldr_instr & kLdrStrInstrArgumentMask) ==
(str_instr & kLdrStrInstrArgumentMask)) {
// Pattern: Ldr/str same fp+offset, same register.
//
// The following:
// str rx, [fp, #-12]
// ldr rx, [fp, #-12]
//
// Becomes:
// str rx, [fp, #-12]
pc_ -= 1 * kInstrSize;
if (FLAG_print_peephole_optimization) {
PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset());
}
} else if ((ldr_instr & kLdrStrOffsetMask) ==
(str_instr & kLdrStrOffsetMask)) {
// Pattern: Ldr/str same fp+offset, different register.
//
// The following:
// str rx, [fp, #-12]
// ldr ry, [fp, #-12]
//
// Becomes:
// str rx, [fp, #-12]
// mov ry, rx
Register reg_stored, reg_loaded;
reg_stored = GetRd(str_instr);
reg_loaded = GetRd(ldr_instr);
pc_ -= 1 * kInstrSize;
// Insert a mov instruction, which is better than ldr.
mov(reg_loaded, reg_stored);
if (FLAG_print_peephole_optimization) {
PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset());
}
}
}
}
if (can_peephole_optimize(3)) {
Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize);
Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize);
Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize);
if (IsPush(mem_write_instr) &&
IsPop(mem_read_instr)) {
if ((IsLdrRegFpOffset(ldr_instr) ||
IsLdrRegFpNegOffset(ldr_instr))) {
if (Instruction::RdValue(mem_write_instr) ==
Instruction::RdValue(mem_read_instr)) {
// Pattern: push & pop from/to same register,
// with a fp+offset ldr in between
//
// The following:
// str rx, [sp, #-4]!
// ldr rz, [fp, #-24]
// ldr rx, [sp], #+4
//
// Becomes:
// if(rx == rz)
// delete all
// else
// ldr rz, [fp, #-24]
if (Instruction::RdValue(mem_write_instr) ==
Instruction::RdValue(ldr_instr)) {
pc_ -= 3 * kInstrSize;
} else {
pc_ -= 3 * kInstrSize;
// Reinsert back the ldr rz.
emit(ldr_instr);
}
if (FLAG_print_peephole_optimization) {
PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
}
} else {
// Pattern: push & pop from/to different registers
// with a fp+offset ldr in between
//
// The following:
// str rx, [sp, #-4]!
// ldr rz, [fp, #-24]
// ldr ry, [sp], #+4
//
// Becomes:
// if(ry == rz)
// mov ry, rx;
// else if(rx != rz)
// ldr rz, [fp, #-24]
// mov ry, rx
// else if((ry != rz) || (rx == rz)) becomes:
// mov ry, rx
// ldr rz, [fp, #-24]
Register reg_pushed, reg_popped;
if (Instruction::RdValue(mem_read_instr) ==
Instruction::RdValue(ldr_instr)) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
mov(reg_popped, reg_pushed);
} else if (Instruction::RdValue(mem_write_instr) !=
Instruction::RdValue(ldr_instr)) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
emit(ldr_instr);
mov(reg_popped, reg_pushed);
} else if ((Instruction::RdValue(mem_read_instr) !=
Instruction::RdValue(ldr_instr)) ||
(Instruction::RdValue(mem_write_instr) ==
Instruction::RdValue(ldr_instr))) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
mov(reg_popped, reg_pushed);
emit(ldr_instr);
}
if (FLAG_print_peephole_optimization) {
PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
}
}
}
}
}
} }
void Assembler::str(Register src, const MemOperand& dst, Condition cond) { void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
addrmod2(cond | B26, src, dst); addrmod2(cond | B26, src, dst);
// Eliminate pattern: pop(), push(r)
// add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
// -> str r, [sp, 0], al
if (can_peephole_optimize(2) &&
// Pattern.
instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
pc_ -= 2 * kInstrSize;
emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
if (FLAG_print_peephole_optimization) {
PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
}
}
} }
@ -1493,17 +1646,15 @@ void Assembler::stm(BlockAddrMode am,
void Assembler::stop(const char* msg, Condition cond, int32_t code) { void Assembler::stop(const char* msg, Condition cond, int32_t code) {
#ifndef __arm__ #ifndef __arm__
ASSERT(code >= kDefaultStopCode); ASSERT(code >= kDefaultStopCode);
{ // The Simulator will handle the stop instruction and get the message address.
// The Simulator will handle the stop instruction and get the message // It expects to find the address just after the svc instruction.
// address. It expects to find the address just after the svc instruction. BlockConstPoolFor(2);
BlockConstPoolScope block_const_pool(this);
if (code >= 0) { if (code >= 0) {
svc(kStopCode + code, cond); svc(kStopCode + code, cond);
} else { } else {
svc(kStopCode + kMaxStopCode, cond); svc(kStopCode + kMaxStopCode, cond);
} }
emit(reinterpret_cast<Instr>(msg)); emit(reinterpret_cast<Instr>(msg));
}
#else // def __arm__ #else // def __arm__
#ifdef CAN_USE_ARMV5_INSTRUCTIONS #ifdef CAN_USE_ARMV5_INSTRUCTIONS
if (cond != al) { if (cond != al) {
@ -1642,6 +1793,45 @@ void Assembler::ldc2(Coprocessor coproc,
} }
void Assembler::stc(Coprocessor coproc,
CRegister crd,
const MemOperand& dst,
LFlag l,
Condition cond) {
addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
}
void Assembler::stc(Coprocessor coproc,
CRegister crd,
Register rn,
int option,
LFlag l,
Condition cond) {
// Unindexed addressing.
ASSERT(is_uint8(option));
emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
coproc*B8 | (option & 255));
}
void Assembler::stc2(Coprocessor
coproc, CRegister crd,
const MemOperand& dst,
LFlag l) { // v5 and above
stc(coproc, crd, dst, l, kSpecialCondition);
}
void Assembler::stc2(Coprocessor coproc,
CRegister crd,
Register rn,
int option,
LFlag l) { // v5 and above
stc(coproc, crd, rn, option, l, kSpecialCondition);
}
// Support for VFP. // Support for VFP.
void Assembler::vldr(const DwVfpRegister dst, void Assembler::vldr(const DwVfpRegister dst,
@ -1814,88 +2004,6 @@ void Assembler::vstr(const SwVfpRegister src,
} }
void Assembler::vldm(BlockAddrMode am,
Register base,
DwVfpRegister first,
DwVfpRegister last,
Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count * 2)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
void Assembler::vstm(BlockAddrMode am,
Register base,
DwVfpRegister first,
DwVfpRegister last,
Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
void Assembler::vldm(BlockAddrMode am,
Register base,
SwVfpRegister first,
SwVfpRegister last,
Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count/2)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
0xA*B8 | count);
}
void Assembler::vstm(BlockAddrMode am,
Register base,
SwVfpRegister first,
SwVfpRegister last,
Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count/2)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
0xA*B8 | count);
}
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) { static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i; uint64_t i;
memcpy(&i, &d, 8); memcpy(&i, &d, 8);
@ -2252,14 +2360,6 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
} }
void Assembler::vneg(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
0x5*B9 | B8 | B6 | src.code());
}
void Assembler::vabs(const DwVfpRegister dst, void Assembler::vabs(const DwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
const Condition cond) { const Condition cond) {
@ -2408,6 +2508,11 @@ bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
} }
void Assembler::BlockConstPoolFor(int instructions) {
BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
}
// Debugging. // Debugging.
void Assembler::RecordJSReturn() { void Assembler::RecordJSReturn() {
positions_recorder()->WriteRecordedPositions(); positions_recorder()->WriteRecordedPositions();
@ -2471,8 +2576,8 @@ void Assembler::GrowBuffer() {
// to relocate any emitted relocation entries. // to relocate any emitted relocation entries.
// Relocate pending relocation entries. // Relocate pending relocation entries.
for (int i = 0; i < num_pending_reloc_info_; i++) { for (int i = 0; i < num_prinfo_; i++) {
RelocInfo& rinfo = pending_reloc_info_[i]; RelocInfo& rinfo = prinfo_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT && ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION); rinfo.rmode() != RelocInfo::POSITION);
if (rinfo.rmode() != RelocInfo::JS_RETURN) { if (rinfo.rmode() != RelocInfo::JS_RETURN) {
@ -2486,7 +2591,7 @@ void Assembler::db(uint8_t data) {
// No relocation info should be pending while using db. db is used // No relocation info should be pending while using db. db is used
// to write pure data with no pointers and the constant pool should // to write pure data with no pointers and the constant pool should
// be emitted before using db. // be emitted before using db.
ASSERT(num_pending_reloc_info_ == 0); ASSERT(num_prinfo_ == 0);
CheckBuffer(); CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data; *reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t); pc_ += sizeof(uint8_t);
@ -2497,7 +2602,7 @@ void Assembler::dd(uint32_t data) {
// No relocation info should be pending while using dd. dd is used // No relocation info should be pending while using dd. dd is used
// to write pure data with no pointers and the constant pool should // to write pure data with no pointers and the constant pool should
// be emitted before using dd. // be emitted before using dd.
ASSERT(num_pending_reloc_info_ == 0); ASSERT(num_prinfo_ == 0);
CheckBuffer(); CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data; *reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t); pc_ += sizeof(uint32_t);
@ -2514,14 +2619,11 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|| RelocInfo::IsPosition(rmode)); || RelocInfo::IsPosition(rmode));
// These modes do not need an entry in the constant pool. // These modes do not need an entry in the constant pool.
} else { } else {
ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo); ASSERT(num_prinfo_ < kMaxNumPRInfo);
if (num_pending_reloc_info_ == 0) { prinfo_[num_prinfo_++] = rinfo;
first_const_pool_use_ = pc_offset();
}
pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
// Make sure the constant pool is not emitted in place of the next // Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info. // instruction for which we just recorded relocation info.
BlockConstPoolFor(1); BlockConstPoolBefore(pc_offset() + kInstrSize);
} }
if (rinfo.rmode() != RelocInfo::NONE) { if (rinfo.rmode() != RelocInfo::NONE) {
// Don't record external references unless the heap will be serialized. // Don't record external references unless the heap will be serialized.
@ -2531,129 +2633,121 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
Serializer::TooLateToEnableNow(); Serializer::TooLateToEnableNow();
} }
#endif #endif
if (!Serializer::enabled() && !emit_debug_code()) { if (!Serializer::enabled() && !FLAG_debug_code) {
return; return;
} }
} }
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
ASSERT(ast_id_for_reloc_info_ != kNoASTId);
RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_);
ast_id_for_reloc_info_ = kNoASTId;
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
reloc_info_writer.Write(&rinfo); reloc_info_writer.Write(&rinfo);
} }
} }
}
void Assembler::BlockConstPoolFor(int instructions) { void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
int pc_limit = pc_offset() + instructions * kInstrSize; // Calculate the offset of the next check. It will be overwritten
if (no_const_pool_before_ < pc_limit) { // when a const pool is generated or when const pools are being
// If there are some pending entries, the constant pool cannot be blocked // blocked for a specific range.
// further than first_const_pool_use_ + kMaxDistToPool next_buffer_check_ = pc_offset() + kCheckConstInterval;
ASSERT((num_pending_reloc_info_ == 0) ||
(pc_limit < (first_const_pool_use_ + kMaxDistToPool))); // There is nothing to do if there are no pending relocation info entries.
no_const_pool_before_ = pc_limit; if (num_prinfo_ == 0) return;
// We emit a constant pool at regular intervals of about kDistBetweenPools
// or when requested by parameter force_emit (e.g. after each function).
// We prefer not to emit a jump unless the max distance is reached or if we
// are running low on slots, which can happen if a lot of constants are being
// emitted (e.g. --debug-code and many static references).
int dist = pc_offset() - last_const_pool_end_;
if (!force_emit && dist < kMaxDistBetweenPools &&
(require_jump || dist < kDistBetweenPools) &&
// TODO(1236125): Cleanup the "magic" number below. We know that
// the code generation will test every kCheckConstIntervalInst.
// Thus we are safe as long as we generate less than 7 constant
// entries per instruction.
(num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
return;
} }
if (next_buffer_check_ < no_const_pool_before_) { // If we did not return by now, we need to emit the constant pool soon.
// However, some small sequences of instructions must not be broken up by the
// insertion of a constant pool; such sequences are protected by setting
// either const_pool_blocked_nesting_ or no_const_pool_before_, which are
// both checked here. Also, recursive calls to CheckConstPool are blocked by
// no_const_pool_before_.
if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) {
// Emission is currently blocked; make sure we try again as soon as
// possible.
if (const_pool_blocked_nesting_ > 0) {
next_buffer_check_ = pc_offset() + kInstrSize;
} else {
next_buffer_check_ = no_const_pool_before_; next_buffer_check_ = no_const_pool_before_;
} }
}
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Some short sequence of instruction mustn't be broken up by constant pool
// emission, such sequences are protected by calls to BlockConstPoolFor and
// BlockConstPoolScope.
if (is_const_pool_blocked()) {
// Something is wrong if emission is forced and blocked at the same time. // Something is wrong if emission is forced and blocked at the same time.
ASSERT(!force_emit); ASSERT(!force_emit);
return; return;
} }
// There is nothing to do if there are no pending constant pool entries. int jump_instr = require_jump ? kInstrSize : 0;
if (num_pending_reloc_info_ == 0) {
// Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
return;
}
// We emit a constant pool when:
// * requested to do so by parameter force_emit (e.g. after each function).
// * the distance to the first instruction accessing the constant pool is
// kAvgDistToPool or more.
// * no jump is required and the distance to the first instruction accessing
// the constant pool is at least kMaxDistToPool / 2.
ASSERT(first_const_pool_use_ >= 0);
int dist = pc_offset() - first_const_pool_use_;
if (!force_emit && dist < kAvgDistToPool &&
(require_jump || (dist < (kMaxDistToPool / 2)))) {
return;
}
// Check that the code buffer is large enough before emitting the constant // Check that the code buffer is large enough before emitting the constant
// pool (include the jump over the pool and the constant pool marker and // pool and relocation information (include the jump over the pool and the
// the gap to the relocation information). // constant pool marker).
int jump_instr = require_jump ? kInstrSize : 0; int max_needed_space =
int needed_space = jump_instr + kInstrSize + jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
num_pending_reloc_info_ * kInstrSize + kGap; while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
while (buffer_space() <= needed_space) GrowBuffer();
{
// Block recursive calls to CheckConstPool. // Block recursive calls to CheckConstPool.
BlockConstPoolScope block_const_pool(this); BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
num_prinfo_*kInstrSize);
// Don't bother to check for the emit calls below.
next_buffer_check_ = no_const_pool_before_;
// Emit jump over constant pool if necessary. // Emit jump over constant pool if necessary.
Label after_pool; Label after_pool;
if (require_jump) { if (require_jump) b(&after_pool);
b(&after_pool);
}
RecordComment("[ Constant Pool"); RecordComment("[ Constant Pool");
// Put down constant pool marker "Undefined instruction" as specified by // Put down constant pool marker "Undefined instruction" as specified by
// A5.6 (ARMv7) Instruction set encoding. // A3.1 Instruction set encoding.
emit(kConstantPoolMarker | num_pending_reloc_info_); emit(0x03000000 | num_prinfo_);
// Emit constant pool entries. // Emit constant pool entries.
for (int i = 0; i < num_pending_reloc_info_; i++) { for (int i = 0; i < num_prinfo_; i++) {
RelocInfo& rinfo = pending_reloc_info_[i]; RelocInfo& rinfo = prinfo_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT && ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION && rinfo.rmode() != RelocInfo::POSITION &&
rinfo.rmode() != RelocInfo::STATEMENT_POSITION); rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
Instr instr = instr_at(rinfo.pc()); Instr instr = instr_at(rinfo.pc());
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
ASSERT(IsLdrPcImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0);
int delta = pc_ - rinfo.pc() - kPcLoadDelta;
// 0 is the smallest delta:
// ldr rd, [pc, #0]
// constant pool marker
// data
ASSERT(is_uint12(delta));
instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta)); // Instruction to patch must be a ldr/str [pc, #offset].
// P and U set, B and W clear, Rn == pc, offset12 still 0.
ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
(2*B25 | P | U | pc.code()*B16));
int delta = pc_ - rinfo.pc() - 8;
ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
if (delta < 0) {
instr &= ~U;
delta = -delta;
}
ASSERT(is_uint12(delta));
instr_at_put(rinfo.pc(), instr + delta);
emit(rinfo.data()); emit(rinfo.data());
} }
num_prinfo_ = 0;
num_pending_reloc_info_ = 0; last_const_pool_end_ = pc_offset();
first_const_pool_use_ = -1;
RecordComment("]"); RecordComment("]");
if (after_pool.is_linked()) { if (after_pool.is_linked()) {
bind(&after_pool); bind(&after_pool);
} }
}
// Since a constant pool was just emitted, move the check offset forward by // Since a constant pool was just emitted, move the check offset forward by
// the standard interval. // the standard interval.
next_buffer_check_ = pc_offset() + kCheckPoolInterval; next_buffer_check_ = pc_offset() + kCheckConstInterval;
} }

263
deps/v8/src/arm/assembler-arm.h

@ -32,7 +32,7 @@
// The original source code covered by the above license above has been // The original source code covered by the above license above has been
// modified significantly by Google Inc. // modified significantly by Google Inc.
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// A light-weight ARM Assembler // A light-weight ARM Assembler
// Generates user mode instructions for the ARM architecture up to version 5 // Generates user mode instructions for the ARM architecture up to version 5
@ -72,7 +72,6 @@ namespace internal {
struct Register { struct Register {
static const int kNumRegisters = 16; static const int kNumRegisters = 16;
static const int kNumAllocatableRegisters = 8; static const int kNumAllocatableRegisters = 8;
static const int kSizeInBytes = 4;
static int ToAllocationIndex(Register reg) { static int ToAllocationIndex(Register reg) {
ASSERT(reg.code() < kNumAllocatableRegisters); ASSERT(reg.code() < kNumAllocatableRegisters);
@ -167,14 +166,13 @@ struct SwVfpRegister {
// Double word VFP register. // Double word VFP register.
struct DwVfpRegister { struct DwVfpRegister {
// d0 has been excluded from allocation. This is following ia32
// where xmm0 is excluded. This should be revisited.
// Currently d0 is used as a scratch register.
// d1 has also been excluded from allocation to be used as a scratch
// register as well.
static const int kNumRegisters = 16; static const int kNumRegisters = 16;
// A few double registers are reserved: one as a scratch register and one to static const int kNumAllocatableRegisters = 15;
// hold 0.0, that does not fit in the immediate field of vmov instructions.
// d14: 0.0
// d15: scratch register.
static const int kNumReservedRegisters = 2;
static const int kNumAllocatableRegisters = kNumRegisters -
kNumReservedRegisters;
static int ToAllocationIndex(DwVfpRegister reg) { static int ToAllocationIndex(DwVfpRegister reg) {
ASSERT(reg.code() != 0); ASSERT(reg.code() != 0);
@ -189,7 +187,6 @@ struct DwVfpRegister {
static const char* AllocationIndexToString(int index) { static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters); ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = { const char* const names[] = {
"d0",
"d1", "d1",
"d2", "d2",
"d3", "d3",
@ -202,7 +199,9 @@ struct DwVfpRegister {
"d10", "d10",
"d11", "d11",
"d12", "d12",
"d13" "d13",
"d14",
"d15"
}; };
return names[index]; return names[index];
} }
@ -303,11 +302,6 @@ const DwVfpRegister d13 = { 13 };
const DwVfpRegister d14 = { 14 }; const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 }; const DwVfpRegister d15 = { 15 };
// Aliases for double registers.
const DwVfpRegister kFirstCalleeSavedDoubleReg = d8;
const DwVfpRegister kLastCalleeSavedDoubleReg = d15;
const DwVfpRegister kDoubleRegZero = d14;
// Coprocessor register // Coprocessor register
struct CRegister { struct CRegister {
@ -378,6 +372,7 @@ class Operand BASE_EMBEDDED {
INLINE(explicit Operand(int32_t immediate, INLINE(explicit Operand(int32_t immediate,
RelocInfo::Mode rmode = RelocInfo::NONE)); RelocInfo::Mode rmode = RelocInfo::NONE));
INLINE(explicit Operand(const ExternalReference& f)); INLINE(explicit Operand(const ExternalReference& f));
INLINE(explicit Operand(const char* s));
explicit Operand(Handle<Object> handle); explicit Operand(Handle<Object> handle);
INLINE(explicit Operand(Smi* value)); INLINE(explicit Operand(Smi* value));
@ -394,11 +389,8 @@ class Operand BASE_EMBEDDED {
INLINE(bool is_reg() const); INLINE(bool is_reg() const);
// Return true if this operand fits in one instruction so that no // Return true if this operand fits in one instruction so that no
// 2-instruction solution with a load into the ip register is necessary. If // 2-instruction solution with a load into the ip register is necessary.
// the instruction this operand is used for is a MOV or MVN instruction the bool is_single_instruction() const;
// actual instruction to use is required for this calculation. For other
// instructions instr is ignored.
bool is_single_instruction(Instr instr = 0) const;
bool must_use_constant_pool() const; bool must_use_constant_pool() const;
inline int32_t immediate() const { inline int32_t immediate() const {
@ -455,7 +447,6 @@ class MemOperand BASE_EMBEDDED {
Register rn() const { return rn_; } Register rn() const { return rn_; }
Register rm() const { return rm_; } Register rm() const { return rm_; }
AddrMode am() const { return am_; }
bool OffsetIsUint12Encodable() const { bool OffsetIsUint12Encodable() const {
return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_); return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
@ -478,98 +469,43 @@ class CpuFeatures : public AllStatic {
public: public:
// Detect features of the target CPU. Set safe defaults if the serializer // Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable). // is enabled (snapshots must be portable).
static void Probe(); static void Probe(bool portable);
// Check whether a feature is supported by the target CPU. // Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) { static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
if (f == VFP3 && !FLAG_enable_vfp3) return false; if (f == VFP3 && !FLAG_enable_vfp3) return false;
return (supported_ & (1u << f)) != 0; return (supported_ & (1u << f)) != 0;
} }
#ifdef DEBUG
// Check whether a feature is currently enabled. // Check whether a feature is currently enabled.
static bool IsEnabled(CpuFeature f) { static bool IsEnabled(CpuFeature f) {
ASSERT(initialized_); return (enabled_ & (1u << f)) != 0;
Isolate* isolate = Isolate::UncheckedCurrent();
if (isolate == NULL) {
// When no isolate is available, work as if we're running in
// release mode.
return IsSupported(f);
}
unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
return (enabled & (1u << f)) != 0;
} }
#endif
// Enable a specified feature within a scope. // Enable a specified feature within a scope.
class Scope BASE_EMBEDDED { class Scope BASE_EMBEDDED {
#ifdef DEBUG #ifdef DEBUG
public: public:
explicit Scope(CpuFeature f) { explicit Scope(CpuFeature f) {
unsigned mask = 1u << f;
ASSERT(CpuFeatures::IsSupported(f)); ASSERT(CpuFeatures::IsSupported(f));
ASSERT(!Serializer::enabled() || ASSERT(!Serializer::enabled() ||
(CpuFeatures::found_by_runtime_probing_ & mask) == 0); (found_by_runtime_probing_ & (1u << f)) == 0);
isolate_ = Isolate::UncheckedCurrent(); old_enabled_ = CpuFeatures::enabled_;
old_enabled_ = 0; CpuFeatures::enabled_ |= 1u << f;
if (isolate_ != NULL) {
old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
isolate_->set_enabled_cpu_features(old_enabled_ | mask);
} }
} ~Scope() { CpuFeatures::enabled_ = old_enabled_; }
~Scope() {
ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
if (isolate_ != NULL) {
isolate_->set_enabled_cpu_features(old_enabled_);
}
}
private: private:
Isolate* isolate_;
unsigned old_enabled_; unsigned old_enabled_;
#else #else
public: public:
explicit Scope(CpuFeature f) {} explicit Scope(CpuFeature f) {}
#endif #endif
}; };
class TryForceFeatureScope BASE_EMBEDDED {
public:
explicit TryForceFeatureScope(CpuFeature f)
: old_supported_(CpuFeatures::supported_) {
if (CanForce()) {
CpuFeatures::supported_ |= (1u << f);
}
}
~TryForceFeatureScope() {
if (CanForce()) {
CpuFeatures::supported_ = old_supported_;
}
}
private:
static bool CanForce() {
// It's only safe to temporarily force support of CPU features
// when there's only a single isolate, which is guaranteed when
// the serializer is enabled.
return Serializer::enabled();
}
const unsigned old_supported_;
};
private: private:
#ifdef DEBUG
static bool initialized_;
#endif
static unsigned supported_; static unsigned supported_;
static unsigned enabled_;
static unsigned found_by_runtime_probing_; static unsigned found_by_runtime_probing_;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
}; };
@ -597,7 +533,7 @@ extern const Instr kAndBicFlip;
class Assembler : public AssemblerBase { class Assembler : public Malloced {
public: public:
// Create an assembler. Instructions and relocation information are emitted // Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the // into a buffer, with the instructions starting from the beginning and the
@ -612,12 +548,9 @@ class Assembler : public AssemblerBase {
// for code generation and assumes its size to be buffer_size. If the buffer // for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done // is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler. // upon destruction of the assembler.
Assembler(Isolate* isolate, void* buffer, int buffer_size); Assembler(void* buffer, int buffer_size);
~Assembler(); ~Assembler();
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
// GetCode emits any pending (non-emitted) code and fills the descriptor // GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other // desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls. // Assembler functions are invoked in between GetCode() calls.
@ -956,6 +889,16 @@ class Assembler : public AssemblerBase {
void ldc2(Coprocessor coproc, CRegister crd, Register base, int option, void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
LFlag l = Short); // v5 and above LFlag l = Short); // v5 and above
void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
LFlag l = Short, Condition cond = al);
void stc(Coprocessor coproc, CRegister crd, Register base, int option,
LFlag l = Short, Condition cond = al);
void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
LFlag l = Short); // v5 and above
void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
LFlag l = Short); // v5 and above
// Support for VFP. // Support for VFP.
// All these APIs support S0 to S31 and D0 to D15. // All these APIs support S0 to S31 and D0 to D15.
// Currently these APIs do not support extended D registers, i.e, D16 to D31. // Currently these APIs do not support extended D registers, i.e, D16 to D31.
@ -994,30 +937,6 @@ class Assembler : public AssemblerBase {
const MemOperand& dst, const MemOperand& dst,
const Condition cond = al); const Condition cond = al);
void vldm(BlockAddrMode am,
Register base,
DwVfpRegister first,
DwVfpRegister last,
Condition cond = al);
void vstm(BlockAddrMode am,
Register base,
DwVfpRegister first,
DwVfpRegister last,
Condition cond = al);
void vldm(BlockAddrMode am,
Register base,
SwVfpRegister first,
SwVfpRegister last,
Condition cond = al);
void vstm(BlockAddrMode am,
Register base,
SwVfpRegister first,
SwVfpRegister last,
Condition cond = al);
void vmov(const DwVfpRegister dst, void vmov(const DwVfpRegister dst,
double imm, double imm,
const Condition cond = al); const Condition cond = al);
@ -1070,9 +989,6 @@ class Assembler : public AssemblerBase {
VFPConversionMode mode = kDefaultRoundToZero, VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al); const Condition cond = al);
void vneg(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void vabs(const DwVfpRegister dst, void vabs(const DwVfpRegister dst,
const DwVfpRegister src, const DwVfpRegister src,
const Condition cond = al); const Condition cond = al);
@ -1140,13 +1056,8 @@ class Assembler : public AssemblerBase {
void jmp(Label* L) { b(L, al); } void jmp(Label* L) { b(L, al); }
// Check the code size generated from label to here. // Check the code size generated from label to here.
int SizeOfCodeGeneratedSince(Label* label) { int InstructionsGeneratedSince(Label* l) {
return pc_offset() - label->pos(); return (pc_offset() - l->pos()) / kInstrSize;
}
// Check the number of instructions generated from label to here.
int InstructionsGeneratedSince(Label* label) {
return SizeOfCodeGeneratedSince(label) / kInstrSize;
} }
// Check whether an immediate fits an addressing mode 1 instruction. // Check whether an immediate fits an addressing mode 1 instruction.
@ -1168,6 +1079,10 @@ class Assembler : public AssemblerBase {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope); DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
}; };
// Postpone the generation of the constant pool for the specified number of
// instructions.
void BlockConstPoolFor(int instructions);
// Debugging // Debugging
// Mark address of the ExitJSFrame code. // Mark address of the ExitJSFrame code.
@ -1176,10 +1091,6 @@ class Assembler : public AssemblerBase {
// Mark address of a debug break slot. // Mark address of a debug break slot.
void RecordDebugBreakSlot(); void RecordDebugBreakSlot();
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
void RecordAstId(unsigned ast_id) { ast_id_for_reloc_info_ = ast_id; }
// Record a comment relocation entry that can be used by a disassembler. // Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable. // Use --code-comments to enable.
void RecordComment(const char* msg); void RecordComment(const char* msg);
@ -1195,6 +1106,12 @@ class Assembler : public AssemblerBase {
PositionsRecorder* positions_recorder() { return &positions_recorder_; } PositionsRecorder* positions_recorder() { return &positions_recorder_; }
bool can_peephole_optimize(int instructions) {
if (!allow_peephole_optimization_) return false;
if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
}
// Read/patch instructions // Read/patch instructions
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); } static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
static void instr_at_put(byte* pc, Instr instr) { static void instr_at_put(byte* pc, Instr instr) {
@ -1227,27 +1144,10 @@ class Assembler : public AssemblerBase {
static int GetCmpImmediateRawImmediate(Instr instr); static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP); static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
// Constants in pools are accessed via pc relative addressing, which can // Check if is time to emit a constant pool for pending reloc info entries
// reach +/-4KB thereby defining a maximum distance between the instruction
// and the accessed constant.
static const int kMaxDistToPool = 4*KB;
static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize;
// Postpone the generation of the constant pool for the specified number of
// instructions.
void BlockConstPoolFor(int instructions);
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump); void CheckConstPool(bool force_emit, bool require_jump);
protected: protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
unsigned ast_id_for_reloc_info_;
bool emit_debug_code() const { return emit_debug_code_; }
int buffer_space() const { return reloc_info_writer.pos() - pc_; } int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Read/patch instructions // Read/patch instructions
@ -1262,37 +1162,18 @@ class Assembler : public AssemblerBase {
// Patch branch instruction at pos to branch to given branch target pos // Patch branch instruction at pos to branch to given branch target pos
void target_at_put(int pos, int target_pos); void target_at_put(int pos, int target_pos);
// Prevent contant pool emission until EndBlockConstPool is called. // Block the emission of the constant pool before pc_offset
// Call to this function can be nested but must be followed by an equal void BlockConstPoolBefore(int pc_offset) {
// number of call to EndBlockConstpool. if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
void StartBlockConstPool() {
if (const_pool_blocked_nesting_++ == 0) {
// Prevent constant pool checks happening by setting the next check to
// the biggest possible offset.
next_buffer_check_ = kMaxInt;
}
} }
// Resume constant pool emission. Need to be called as many time as void StartBlockConstPool() {
// StartBlockConstPool to have an effect. const_pool_blocked_nesting_++;
void EndBlockConstPool() {
if (--const_pool_blocked_nesting_ == 0) {
// Check the constant pool hasn't been blocked for too long.
ASSERT((num_pending_reloc_info_ == 0) ||
(pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
// Two cases:
// * no_const_pool_before_ >= next_buffer_check_ and the emission is
// still blocked
// * no_const_pool_before_ < next_buffer_check_ and the next emit will
// trigger a check.
next_buffer_check_ = no_const_pool_before_;
}
} }
void EndBlockConstPool() {
bool is_const_pool_blocked() const { const_pool_blocked_nesting_--;
return (const_pool_blocked_nesting_ > 0) ||
(pc_offset() < no_const_pool_before_);
} }
bool is_const_pool_blocked() const { return const_pool_blocked_nesting_ > 0; }
private: private:
// Code buffer: // Code buffer:
@ -1302,6 +1183,9 @@ class Assembler : public AssemblerBase {
// True if the assembler owns the buffer, false if buffer is external. // True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_; bool own_buffer_;
// Buffer size and constant pool distance are checked together at regular
// intervals of kBufferCheckInterval emitted bytes
static const int kBufferCheckInterval = 1*KB/2;
int next_buffer_check_; // pc offset of next buffer check int next_buffer_check_; // pc offset of next buffer check
// Code generation // Code generation
@ -1326,41 +1210,40 @@ class Assembler : public AssemblerBase {
// expensive. By default we only check again once a number of instructions // expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not // has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers. // an exact science, and that we rely on some slop to not overrun buffers.
static const int kCheckPoolIntervalInst = 32; static const int kCheckConstIntervalInst = 32;
static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize; static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
// Average distance beetween a constant pool and the first instruction // Pools are emitted after function return and in dead code at (more or less)
// accessing the constant pool. Longer distance should result in less I-cache // regular intervals of kDistBetweenPools bytes
// pollution. static const int kDistBetweenPools = 1*KB;
// In practice the distance will be smaller since constant pool emission is
// forced after function return and sometimes after unconditional branches. // Constants in pools are accessed via pc relative addressing, which can
static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval; // reach +/-4KB thereby defining a maximum distance between the instruction
// and the accessed constant. We satisfy this constraint by limiting the
// distance between pools.
static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
// Emission of the constant pool may be blocked in some code sequences. // Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero. int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset. int no_const_pool_before_; // Block emission before this pc offset.
// Keep track of the first instruction requiring a constant pool entry // Keep track of the last emitted pool to guarantee a maximal distance
// since the previous constant pool was emitted. int last_const_pool_end_; // pc offset following the last constant pool
int first_const_pool_use_;
// Relocation info generation // Relocation info generation
// Each relocation is encoded as a variable size value // Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize; static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer; RelocInfoWriter reloc_info_writer;
// Relocation info records are also used during code generation as temporary // Relocation info records are also used during code generation as temporary
// containers for constants and code target addresses until they are emitted // containers for constants and code target addresses until they are emitted
// to the constant pool. These pending relocation info records are temporarily // to the constant pool. These pending relocation info records are temporarily
// stored in a separate buffer until a constant pool is emitted. // stored in a separate buffer until a constant pool is emitted.
// If every instruction in a long sequence is accessing the pool, we need one // If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction. // pending relocation entry per instruction.
static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
// the buffer of pending relocation info RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info
RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo]; int num_prinfo_; // number of pending reloc info entries in the buffer
// number of pending reloc info entries in the buffer
int num_pending_reloc_info_;
// The bound position, before this we cannot do instruction elimination. // The bound position, before this we cannot do instruction elimination.
int last_bound_pos_; int last_bound_pos_;
@ -1392,7 +1275,7 @@ class Assembler : public AssemblerBase {
friend class BlockConstPoolScope; friend class BlockConstPoolScope;
PositionsRecorder positions_recorder_; PositionsRecorder positions_recorder_;
bool emit_debug_code_; bool allow_peephole_optimization_;
friend class PositionsRecorder; friend class PositionsRecorder;
friend class EnsureSpace; friend class EnsureSpace;
}; };

169
deps/v8/src/arm/builtins-arm.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_ARM) #if defined(V8_TARGET_ARCH_ARM)
#include "codegen.h" #include "codegen-inl.h"
#include "debug.h" #include "debug.h"
#include "deoptimizer.h" #include "deoptimizer.h"
#include "full-codegen.h" #include "full-codegen.h"
@ -68,7 +68,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// JumpToExternalReference expects r0 to contain the number of arguments // JumpToExternalReference expects r0 to contain the number of arguments
// including the receiver and the extra arguments. // including the receiver and the extra arguments.
__ add(r0, r0, Operand(num_extra_args + 1)); __ add(r0, r0, Operand(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id, masm->isolate())); __ JumpToExternalReference(ExternalReference(id));
} }
@ -310,7 +310,6 @@ static void AllocateJSArray(MacroAssembler* masm,
// construct call and normal call. // construct call and normal call.
static void ArrayNativeCode(MacroAssembler* masm, static void ArrayNativeCode(MacroAssembler* masm,
Label* call_generic_code) { Label* call_generic_code) {
Counters* counters = masm->isolate()->counters();
Label argc_one_or_more, argc_two_or_more; Label argc_one_or_more, argc_two_or_more;
// Check for array construction with zero arguments or one. // Check for array construction with zero arguments or one.
@ -326,7 +325,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
r5, r5,
JSArray::kPreallocatedArrayElements, JSArray::kPreallocatedArrayElements,
call_generic_code); call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, r3, r4); __ IncrementCounter(&Counters::array_function_native, 1, r3, r4);
// Setup return value, remove receiver from stack and return. // Setup return value, remove receiver from stack and return.
__ mov(r0, r2); __ mov(r0, r2);
__ add(sp, sp, Operand(kPointerSize)); __ add(sp, sp, Operand(kPointerSize));
@ -362,7 +361,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
r7, r7,
true, true,
call_generic_code); call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, r2, r4); __ IncrementCounter(&Counters::array_function_native, 1, r2, r4);
// Setup return value, remove receiver and argument from stack and return. // Setup return value, remove receiver and argument from stack and return.
__ mov(r0, r3); __ mov(r0, r3);
__ add(sp, sp, Operand(2 * kPointerSize)); __ add(sp, sp, Operand(2 * kPointerSize));
@ -386,7 +385,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
r7, r7,
false, false,
call_generic_code); call_generic_code);
__ IncrementCounter(counters->array_function_native(), 1, r2, r6); __ IncrementCounter(&Counters::array_function_native, 1, r2, r6);
// Fill arguments as array elements. Copy from the top of the stack (last // Fill arguments as array elements. Copy from the top of the stack (last
// element) to the array backing store filling it backwards. Note: // element) to the array backing store filling it backwards. Note:
@ -429,7 +428,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
GenerateLoadArrayFunction(masm, r1); GenerateLoadArrayFunction(masm, r1);
if (FLAG_debug_code) { if (FLAG_debug_code) {
// Initial map for the builtin Array functions should be maps. // Initial map for the builtin Array function shoud be a map.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ tst(r2, Operand(kSmiTagMask)); __ tst(r2, Operand(kSmiTagMask));
__ Assert(ne, "Unexpected initial map for Array function"); __ Assert(ne, "Unexpected initial map for Array function");
@ -443,9 +442,8 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Jump to the generic array code if the specialized code cannot handle // Jump to the generic array code if the specialized code cannot handle
// the construction. // the construction.
__ bind(&generic_array_code); __ bind(&generic_array_code);
Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
Handle<Code> array_code = Handle<Code> array_code(code);
masm->isolate()->builtins()->ArrayCodeGeneric();
__ Jump(array_code, RelocInfo::CODE_TARGET); __ Jump(array_code, RelocInfo::CODE_TARGET);
} }
@ -460,8 +458,11 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
Label generic_constructor; Label generic_constructor;
if (FLAG_debug_code) { if (FLAG_debug_code) {
// The array construct code is only set for the builtin and internal // The array construct code is only set for the builtin Array function which
// Array functions which always have a map. // always have a map.
GenerateLoadArrayFunction(masm, r2);
__ cmp(r1, r2);
__ Assert(eq, "Unexpected Array function");
// Initial map for the builtin Array function should be a map. // Initial map for the builtin Array function should be a map.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ tst(r2, Operand(kSmiTagMask)); __ tst(r2, Operand(kSmiTagMask));
@ -476,8 +477,8 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
// Jump to the generic construct code in case the specialized code cannot // Jump to the generic construct code in case the specialized code cannot
// handle the construction. // handle the construction.
__ bind(&generic_constructor); __ bind(&generic_constructor);
Handle<Code> generic_construct_stub = Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
masm->isolate()->builtins()->JSConstructStubGeneric(); Handle<Code> generic_construct_stub(code);
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
} }
@ -490,8 +491,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based) // -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver // -- sp[argc * 4] : receiver
// ----------------------------------- // -----------------------------------
Counters* counters = masm->isolate()->counters(); __ IncrementCounter(&Counters::string_ctor_calls, 1, r2, r3);
__ IncrementCounter(counters->string_ctor_calls(), 1, r2, r3);
Register function = r1; Register function = r1;
if (FLAG_debug_code) { if (FLAG_debug_code) {
@ -521,7 +521,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
r5, // Scratch. r5, // Scratch.
false, // Is it a Smi? false, // Is it a Smi?
&not_cached); &not_cached);
__ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4); __ IncrementCounter(&Counters::string_ctor_cached_number, 1, r3, r4);
__ bind(&argument_is_string); __ bind(&argument_is_string);
// ----------- S t a t e ------------- // ----------- S t a t e -------------
@ -575,16 +575,16 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ tst(r3, Operand(kIsNotStringMask)); __ tst(r3, Operand(kIsNotStringMask));
__ b(ne, &convert_argument); __ b(ne, &convert_argument);
__ mov(argument, r0); __ mov(argument, r0);
__ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4); __ IncrementCounter(&Counters::string_ctor_conversions, 1, r3, r4);
__ b(&argument_is_string); __ b(&argument_is_string);
// Invoke the conversion builtin and put the result into r2. // Invoke the conversion builtin and put the result into r2.
__ bind(&convert_argument); __ bind(&convert_argument);
__ push(function); // Preserve the function. __ push(function); // Preserve the function.
__ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4); __ IncrementCounter(&Counters::string_ctor_conversions, 1, r3, r4);
__ EnterInternalFrame(); __ EnterInternalFrame();
__ push(r0); __ push(r0);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION); __ InvokeBuiltin(Builtins::TO_STRING, CALL_JS);
__ LeaveInternalFrame(); __ LeaveInternalFrame();
__ pop(function); __ pop(function);
__ mov(argument, r0); __ mov(argument, r0);
@ -600,7 +600,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// At this point the argument is already a string. Call runtime to // At this point the argument is already a string. Call runtime to
// create a string wrapper. // create a string wrapper.
__ bind(&gc_required); __ bind(&gc_required);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4); __ IncrementCounter(&Counters::string_ctor_gc_required, 1, r3, r4);
__ EnterInternalFrame(); __ EnterInternalFrame();
__ push(argument); __ push(argument);
__ CallRuntime(Runtime::kNewStringWrapper, 1); __ CallRuntime(Runtime::kNewStringWrapper, 1);
@ -619,7 +619,8 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
Label non_function_call; Label non_function_call;
// Check that the function is not a smi. // Check that the function is not a smi.
__ JumpIfSmi(r1, &non_function_call); __ tst(r1, Operand(kSmiTagMask));
__ b(eq, &non_function_call);
// Check that the function is a JSFunction. // Check that the function is a JSFunction.
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &non_function_call); __ b(ne, &non_function_call);
@ -635,8 +636,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// Set expected number of arguments to zero (not changing r0). // Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0, RelocInfo::NONE)); __ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ SetCallKind(r5, CALL_AS_METHOD); __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
@ -647,8 +647,6 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Should never count constructions for api objects. // Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions); ASSERT(!is_api_function || !count_constructions);
Isolate* isolate = masm->isolate();
// Enter a construct frame. // Enter a construct frame.
__ EnterConstructFrame(); __ EnterConstructFrame();
@ -664,7 +662,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Label undo_allocation; Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp = ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(isolate); ExternalReference::debug_step_in_fp_address();
__ mov(r2, Operand(debug_step_in_fp)); __ mov(r2, Operand(debug_step_in_fp));
__ ldr(r2, MemOperand(r2)); __ ldr(r2, MemOperand(r2));
__ tst(r2, r2); __ tst(r2, r2);
@ -674,7 +672,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Load the initial map and verify that it is in fact a map. // Load the initial map and verify that it is in fact a map.
// r1: constructor function // r1: constructor function
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ JumpIfSmi(r2, &rt_call); __ tst(r2, Operand(kSmiTagMask));
__ b(eq, &rt_call);
__ CompareObjectType(r2, r3, r4, MAP_TYPE); __ CompareObjectType(r2, r3, r4, MAP_TYPE);
__ b(ne, &rt_call); __ b(ne, &rt_call);
@ -909,15 +908,14 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r1: constructor function // r1: constructor function
if (is_api_function) { if (is_api_function) {
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
Handle<Code> code = Handle<Code> code = Handle<Code>(
masm->isolate()->builtins()->HandleApiCallConstruct(); Builtins::builtin(Builtins::HandleApiCallConstruct));
ParameterCount expected(0); ParameterCount expected(0);
__ InvokeCode(code, expected, expected, __ InvokeCode(code, expected, expected,
RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD); RelocInfo::CODE_TARGET, CALL_FUNCTION);
} else { } else {
ParameterCount actual(r0); ParameterCount actual(r0);
__ InvokeFunction(r1, actual, CALL_FUNCTION, __ InvokeFunction(r1, actual, CALL_FUNCTION);
NullCallWrapper(), CALL_AS_METHOD);
} }
// Pop the function from the stack. // Pop the function from the stack.
@ -944,11 +942,12 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// sp[0]: receiver (newly allocated object) // sp[0]: receiver (newly allocated object)
// sp[1]: constructor function // sp[1]: constructor function
// sp[2]: number of arguments (smi-tagged) // sp[2]: number of arguments (smi-tagged)
__ JumpIfSmi(r0, &use_receiver); __ tst(r0, Operand(kSmiTagMask));
__ b(eq, &use_receiver);
// If the type of the result (stored in its map) is less than // If the type of the result (stored in its map) is less than
// FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense. // FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
__ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE); __ CompareObjectType(r0, r3, r3, FIRST_JS_OBJECT_TYPE);
__ b(ge, &exit); __ b(ge, &exit);
// Throw away the result of the constructor invocation and use the // Throw away the result of the constructor invocation and use the
@ -967,7 +966,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ LeaveConstructFrame(); __ LeaveConstructFrame();
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1)); __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize)); __ add(sp, sp, Operand(kPointerSize));
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2); __ IncrementCounter(&Counters::constructed_objects, 1, r1, r2);
__ Jump(lr); __ Jump(lr);
} }
@ -1007,8 +1006,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Set up the roots register. // Set up the roots register.
ExternalReference roots_address = ExternalReference roots_address = ExternalReference::roots_address();
ExternalReference::roots_address(masm->isolate());
__ mov(r10, Operand(roots_address)); __ mov(r10, Operand(roots_address));
// Push the function and the receiver onto the stack. // Push the function and the receiver onto the stack.
@ -1044,11 +1042,11 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code and pass argc as r0. // Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3)); __ mov(r0, Operand(r3));
if (is_construct) { if (is_construct) {
__ Call(masm->isolate()->builtins()->JSConstructCall()); __ Call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
RelocInfo::CODE_TARGET);
} else { } else {
ParameterCount actual(r0); ParameterCount actual(r0);
__ InvokeFunction(r1, actual, CALL_FUNCTION, __ InvokeFunction(r1, actual, CALL_FUNCTION);
NullCallWrapper(), CALL_AS_METHOD);
} }
// Exit the JS frame and remove the parameters (except function), and return. // Exit the JS frame and remove the parameters (except function), and return.
@ -1076,17 +1074,12 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Preserve the function. // Preserve the function.
__ push(r1); __ push(r1);
// Push call kind information.
__ push(r5);
// Push the function on the stack as the argument to the runtime function. // Push the function on the stack as the argument to the runtime function.
__ push(r1); __ push(r1);
__ CallRuntime(Runtime::kLazyCompile, 1); __ CallRuntime(Runtime::kLazyCompile, 1);
// Calculate the entry point. // Calculate the entry point.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore call kind information.
__ pop(r5);
// Restore saved function. // Restore saved function.
__ pop(r1); __ pop(r1);
@ -1104,17 +1097,12 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Preserve the function. // Preserve the function.
__ push(r1); __ push(r1);
// Push call kind information.
__ push(r5);
// Push the function on the stack as the argument to the runtime function. // Push the function on the stack as the argument to the runtime function.
__ push(r1); __ push(r1);
__ CallRuntime(Runtime::kLazyRecompile, 1); __ CallRuntime(Runtime::kLazyRecompile, 1);
// Calculate the entry point. // Calculate the entry point.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore call kind information.
__ pop(r5);
// Restore saved function. // Restore saved function.
__ pop(r1); __ pop(r1);
@ -1182,11 +1170,9 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) { void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
CpuFeatures::TryForceFeatureScope scope(VFP3); // Probe the CPU to set the supported features, because this builtin
if (!CpuFeatures::IsSupported(VFP3)) { // may be called before the initialization performs CPU setup.
__ Abort("Unreachable code: Cannot optimize without VFP3 support."); CpuFeatures::Probe(false);
return;
}
// Lookup the function in the JavaScript frame and push it as an // Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function. // argument to the on-stack replacement function.
@ -1232,7 +1218,8 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// r0: actual number of arguments // r0: actual number of arguments
Label non_function; Label non_function;
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2)); __ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ JumpIfSmi(r1, &non_function); __ tst(r1, Operand(kSmiTagMask));
__ b(eq, &non_function);
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &non_function); __ b(ne, &non_function);
@ -1246,33 +1233,31 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Do not transform the receiver for strict mode functions. // Do not transform the receiver for strict mode functions.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); __ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset)); __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + __ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
kSmiTagSize))); kSmiTagSize)));
__ b(ne, &shift_arguments); __ b(ne, &shift_arguments);
// Do not transform the receiver for native (Compilerhints already in r3).
__ tst(r3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &shift_arguments);
// Compute the receiver in non-strict mode. // Compute the receiver in non-strict mode.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2)); __ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ ldr(r2, MemOperand(r2, -kPointerSize)); __ ldr(r2, MemOperand(r2, -kPointerSize));
// r0: actual number of arguments // r0: actual number of arguments
// r1: function // r1: function
// r2: first argument // r2: first argument
__ JumpIfSmi(r2, &convert_to_object); __ tst(r2, Operand(kSmiTagMask));
__ b(eq, &convert_to_object);
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex); __ LoadRoot(r3, Heap::kNullValueRootIndex);
__ cmp(r2, r3); __ cmp(r2, r3);
__ b(eq, &use_global_receiver); __ b(eq, &use_global_receiver);
__ LoadRoot(r3, Heap::kNullValueRootIndex); __ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ cmp(r2, r3); __ cmp(r2, r3);
__ b(eq, &use_global_receiver); __ b(eq, &use_global_receiver);
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); __ CompareObjectType(r2, r3, r3, FIRST_JS_OBJECT_TYPE);
__ CompareObjectType(r2, r3, r3, FIRST_SPEC_OBJECT_TYPE); __ b(lt, &convert_to_object);
__ b(ge, &shift_arguments); __ cmp(r3, Operand(LAST_JS_OBJECT_TYPE));
__ b(le, &shift_arguments);
__ bind(&convert_to_object); __ bind(&convert_to_object);
__ EnterInternalFrame(); // In order to preserve argument count. __ EnterInternalFrame(); // In order to preserve argument count.
@ -1280,7 +1265,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ push(r0); __ push(r0);
__ push(r2); __ push(r2);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
__ mov(r2, r0); __ mov(r2, r0);
__ pop(r0); __ pop(r0);
@ -1350,8 +1335,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Expected number of arguments is 0 for CALL_NON_FUNCTION. // Expected number of arguments is 0 for CALL_NON_FUNCTION.
__ mov(r2, Operand(0, RelocInfo::NONE)); __ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION); __ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
__ SetCallKind(r5, CALL_AS_METHOD); __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
__ bind(&function); __ bind(&function);
} }
@ -1366,15 +1350,12 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset)); FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(r2, Operand(r2, ASR, kSmiTagSize)); __ mov(r2, Operand(r2, ASR, kSmiTagSize));
__ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ SetCallKind(r5, CALL_AS_METHOD);
__ cmp(r2, r0); // Check formal and actual parameter counts. __ cmp(r2, r0); // Check formal and actual parameter counts.
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), __ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET, RelocInfo::CODE_TARGET, ne);
ne);
ParameterCount expected(0); ParameterCount expected(0);
__ InvokeCode(r3, expected, expected, JUMP_FUNCTION, __ InvokeCode(r3, expected, expected, JUMP_FUNCTION);
NullCallWrapper(), CALL_AS_METHOD);
} }
@ -1391,7 +1372,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(r0); __ push(r0);
__ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array __ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array
__ push(r0); __ push(r0);
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION); __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_JS);
// Check the stack for overflow. We are not trying need to catch // Check the stack for overflow. We are not trying need to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack // interruptions (e.g. debug break and preemption) here, so the "real stack
@ -1409,7 +1390,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ ldr(r1, MemOperand(fp, kFunctionOffset)); __ ldr(r1, MemOperand(fp, kFunctionOffset));
__ push(r1); __ push(r1);
__ push(r0); __ push(r0);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION); __ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_JS);
// End of stack check. // End of stack check.
// Push current limit and index. // Push current limit and index.
@ -1429,17 +1410,14 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ ldr(r0, MemOperand(fp, kRecvOffset)); __ ldr(r0, MemOperand(fp, kRecvOffset));
// Do not transform the receiver for strict mode functions. // Do not transform the receiver for strict mode functions.
__ ldr(r2, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset)); __ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + __ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
kSmiTagSize))); kSmiTagSize)));
__ b(ne, &push_receiver); __ b(ne, &push_receiver);
// Do not transform the receiver for strict mode functions.
__ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &push_receiver);
// Compute the receiver in non-strict mode. // Compute the receiver in non-strict mode.
__ JumpIfSmi(r0, &call_to_object); __ tst(r0, Operand(kSmiTagMask));
__ b(eq, &call_to_object);
__ LoadRoot(r1, Heap::kNullValueRootIndex); __ LoadRoot(r1, Heap::kNullValueRootIndex);
__ cmp(r0, r1); __ cmp(r0, r1);
__ b(eq, &use_global_receiver); __ b(eq, &use_global_receiver);
@ -1449,15 +1427,16 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Check if the receiver is already a JavaScript object. // Check if the receiver is already a JavaScript object.
// r0: receiver // r0: receiver
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE); __ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
__ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE); __ b(lt, &call_to_object);
__ b(ge, &push_receiver); __ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
__ b(le, &push_receiver);
// Convert the receiver to a regular object. // Convert the receiver to a regular object.
// r0: receiver // r0: receiver
__ bind(&call_to_object); __ bind(&call_to_object);
__ push(r0); __ push(r0);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
__ b(&push_receiver); __ b(&push_receiver);
// Use the current global receiver object as the receiver. // Use the current global receiver object as the receiver.
@ -1507,8 +1486,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
ParameterCount actual(r0); ParameterCount actual(r0);
__ mov(r0, Operand(r0, ASR, kSmiTagSize)); __ mov(r0, Operand(r0, ASR, kSmiTagSize));
__ ldr(r1, MemOperand(fp, kFunctionOffset)); __ ldr(r1, MemOperand(fp, kFunctionOffset));
__ InvokeFunction(r1, actual, CALL_FUNCTION, __ InvokeFunction(r1, actual, CALL_FUNCTION);
NullCallWrapper(), CALL_AS_METHOD);
// Tear down the internal frame and remove function, receiver and args. // Tear down the internal frame and remove function, receiver and args.
__ LeaveInternalFrame(); __ LeaveInternalFrame();
@ -1545,7 +1523,6 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r1 : function (passed through to callee) // -- r1 : function (passed through to callee)
// -- r2 : expected number of arguments // -- r2 : expected number of arguments
// -- r3 : code entry to call // -- r3 : code entry to call
// -- r5 : call kind information
// ----------------------------------- // -----------------------------------
Label invoke, dont_adapt_arguments; Label invoke, dont_adapt_arguments;

3478
deps/v8/src/arm/code-stubs-arm.cc

File diff suppressed because it is too large

607
deps/v8/src/arm/code-stubs-arm.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -38,120 +38,202 @@ namespace internal {
// TranscendentalCache runtime function. // TranscendentalCache runtime function.
class TranscendentalCacheStub: public CodeStub { class TranscendentalCacheStub: public CodeStub {
public: public:
enum ArgumentType { explicit TranscendentalCacheStub(TranscendentalCache::Type type)
TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits, : type_(type) {}
UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
};
TranscendentalCacheStub(TranscendentalCache::Type type,
ArgumentType argument_type)
: type_(type), argument_type_(argument_type) { }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
private: private:
TranscendentalCache::Type type_; TranscendentalCache::Type type_;
ArgumentType argument_type_;
void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
Major MajorKey() { return TranscendentalCache; } Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_ | argument_type_; } int MinorKey() { return type_; }
Runtime::FunctionId RuntimeFunction(); Runtime::FunctionId RuntimeFunction();
}; };
class UnaryOpStub: public CodeStub { class ToBooleanStub: public CodeStub {
public:
explicit ToBooleanStub(Register tos) : tos_(tos) { }
void Generate(MacroAssembler* masm);
private:
Register tos_;
Major MajorKey() { return ToBoolean; }
int MinorKey() { return tos_.code(); }
};
class GenericBinaryOpStub : public CodeStub {
public: public:
UnaryOpStub(Token::Value op, static const int kUnknownIntValue = -1;
UnaryOverwriteMode mode,
UnaryOpIC::TypeInfo operand_type = UnaryOpIC::UNINITIALIZED) GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
Register lhs,
Register rhs,
int constant_rhs = kUnknownIntValue)
: op_(op), : op_(op),
mode_(mode), mode_(mode),
operand_type_(operand_type), lhs_(lhs),
name_(NULL) { rhs_(rhs),
} constant_rhs_(constant_rhs),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
name_(NULL) { }
GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
lhs_(LhsRegister(RegisterBits::decode(key))),
rhs_(RhsRegister(RegisterBits::decode(key))),
constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
runtime_operands_type_(type_info),
name_(NULL) { }
private: private:
Token::Value op_; Token::Value op_;
UnaryOverwriteMode mode_; OverwriteMode mode_;
Register lhs_;
Register rhs_;
int constant_rhs_;
bool specialized_on_rhs_;
BinaryOpIC::TypeInfo runtime_operands_type_;
char* name_;
// Operand type information determined at runtime. static const int kMaxKnownRhs = 0x40000000;
UnaryOpIC::TypeInfo operand_type_; static const int kKnownRhsKeyBits = 6;
char* name_; // Minor key encoding in 17 bits.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 6> {};
class TypeInfoBits: public BitField<int, 8, 3> {};
class RegisterBits: public BitField<bool, 11, 1> {};
class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
const char* GetName(); Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
(lhs_.is(r1) && rhs_.is(r0)));
// Encode the parameters in a unique 18 bit value.
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| KnownIntBits::encode(MinorKeyForKnownInt())
| TypeInfoBits::encode(runtime_operands_type_)
| RegisterBits::encode(lhs_.is(r0));
}
#ifdef DEBUG void Generate(MacroAssembler* masm);
void Print() { void HandleNonSmiBitwiseOp(MacroAssembler* masm,
PrintF("UnaryOpStub %d (op %s), (mode %d, runtime_type_info %s)\n", Register lhs,
MinorKey(), Register rhs);
Token::String(op_), void HandleBinaryOpSlowCases(MacroAssembler* masm,
static_cast<int>(mode_), Label* not_smi,
UnaryOpIC::GetName(operand_type_)); Register lhs,
Register rhs,
const Builtins::JavaScript& builtin);
void GenerateTypeTransition(MacroAssembler* masm);
static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
if (constant_rhs == kUnknownIntValue) return false;
if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
if (op == Token::MOD) {
if (constant_rhs <= 1) return false;
if (constant_rhs <= 10) return true;
if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
return false;
}
return false;
} }
#endif
class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {}; int MinorKeyForKnownInt() {
class OpBits: public BitField<Token::Value, 1, 7> {}; if (!specialized_on_rhs_) return 0;
class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {}; if (constant_rhs_ <= 10) return constant_rhs_ + 1;
ASSERT(IsPowerOf2(constant_rhs_));
int key = 12;
int d = constant_rhs_;
while ((d & 1) == 0) {
key++;
d >>= 1;
}
ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
return key;
}
Major MajorKey() { return UnaryOp; } int KnownBitsForMinorKey(int key) {
int MinorKey() { if (!key) return 0;
return ModeBits::encode(mode_) if (key <= 11) return key - 1;
| OpBits::encode(op_) int d = 1;
| OperandTypeInfoBits::encode(operand_type_); while (key != 12) {
key--;
d <<= 1;
}
return d;
} }
// Note: A lot of the helper functions below will vanish when we use virtual Register LhsRegister(bool lhs_is_r0) {
// function instead of switch more often. return lhs_is_r0 ? r0 : r1;
void Generate(MacroAssembler* masm); }
void GenerateTypeTransition(MacroAssembler* masm); Register RhsRegister(bool lhs_is_r0) {
return lhs_is_r0 ? r1 : r0;
}
void GenerateSmiStub(MacroAssembler* masm); bool HasSmiSmiFastPath() {
void GenerateSmiStubSub(MacroAssembler* masm); return op_ != Token::DIV;
void GenerateSmiStubBitNot(MacroAssembler* masm); }
void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
void GenerateHeapNumberStub(MacroAssembler* masm); bool ShouldGenerateSmiCode() {
void GenerateHeapNumberStubSub(MacroAssembler* masm); return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
void GenerateHeapNumberStubBitNot(MacroAssembler* masm); runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow); runtime_operands_type_ != BinaryOpIC::STRINGS;
void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow); }
void GenerateGenericStub(MacroAssembler* masm); bool ShouldGenerateFPCode() {
void GenerateGenericStubSub(MacroAssembler* masm); return runtime_operands_type_ != BinaryOpIC::STRINGS;
void GenerateGenericStubBitNot(MacroAssembler* masm); }
void GenerateGenericCodeFallback(MacroAssembler* masm);
virtual int GetCodeKind() { return Code::UNARY_OP_IC; } virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() { virtual InlineCacheState GetICState() {
return UnaryOpIC::ToState(operand_type_); return BinaryOpIC::ToState(runtime_operands_type_);
} }
const char* GetName();
virtual void FinishCode(Code* code) { virtual void FinishCode(Code* code) {
code->set_unary_op_type(operand_type_); code->set_binary_op_type(runtime_operands_type_);
}
#ifdef DEBUG
void Print() {
if (!specialized_on_rhs_) {
PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
} else {
PrintF("GenericBinaryOpStub (%s by %d)\n",
Token::String(op_),
constant_rhs_);
} }
}
#endif
}; };
class BinaryOpStub: public CodeStub { class TypeRecordingBinaryOpStub: public CodeStub {
public: public:
BinaryOpStub(Token::Value op, OverwriteMode mode) TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op), : op_(op),
mode_(mode), mode_(mode),
operands_type_(BinaryOpIC::UNINITIALIZED), operands_type_(TRBinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED), result_type_(TRBinaryOpIC::UNINITIALIZED),
name_(NULL) { name_(NULL) {
use_vfp3_ = CpuFeatures::IsSupported(VFP3); use_vfp3_ = CpuFeatures::IsSupported(VFP3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS)); ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
} }
BinaryOpStub( TypeRecordingBinaryOpStub(
int key, int key,
BinaryOpIC::TypeInfo operands_type, TRBinaryOpIC::TypeInfo operands_type,
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED) TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)), : op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)), mode_(ModeBits::decode(key)),
use_vfp3_(VFP3Bits::decode(key)), use_vfp3_(VFP3Bits::decode(key)),
@ -170,8 +252,8 @@ class BinaryOpStub: public CodeStub {
bool use_vfp3_; bool use_vfp3_;
// Operand type information determined at runtime. // Operand type information determined at runtime.
BinaryOpIC::TypeInfo operands_type_; TRBinaryOpIC::TypeInfo operands_type_;
BinaryOpIC::TypeInfo result_type_; TRBinaryOpIC::TypeInfo result_type_;
char* name_; char* name_;
@ -179,12 +261,12 @@ class BinaryOpStub: public CodeStub {
#ifdef DEBUG #ifdef DEBUG
void Print() { void Print() {
PrintF("BinaryOpStub %d (op %s), " PrintF("TypeRecordingBinaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n", "(mode %d, runtime_type_info %s)\n",
MinorKey(), MinorKey(),
Token::String(op_), Token::String(op_),
static_cast<int>(mode_), static_cast<int>(mode_),
BinaryOpIC::GetName(operands_type_)); TRBinaryOpIC::GetName(operands_type_));
} }
#endif #endif
@ -192,10 +274,10 @@ class BinaryOpStub: public CodeStub {
class ModeBits: public BitField<OverwriteMode, 0, 2> {}; class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {}; class OpBits: public BitField<Token::Value, 2, 7> {};
class VFP3Bits: public BitField<bool, 9, 1> {}; class VFP3Bits: public BitField<bool, 9, 1> {};
class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {}; class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {}; class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
Major MajorKey() { return BinaryOp; } Major MajorKey() { return TypeRecordingBinaryOp; }
int MinorKey() { int MinorKey() {
return OpBits::encode(op_) return OpBits::encode(op_)
| ModeBits::encode(mode_) | ModeBits::encode(mode_)
@ -212,7 +294,6 @@ class BinaryOpStub: public CodeStub {
Label* not_numbers, Label* not_numbers,
Label* gc_required); Label* gc_required);
void GenerateSmiCode(MacroAssembler* masm, void GenerateSmiCode(MacroAssembler* masm,
Label* use_runtime,
Label* gc_required, Label* gc_required,
SmiCodeGenerateHeapNumberResults heapnumber_results); SmiCodeGenerateHeapNumberResults heapnumber_results);
void GenerateLoadArguments(MacroAssembler* masm); void GenerateLoadArguments(MacroAssembler* masm);
@ -223,7 +304,6 @@ class BinaryOpStub: public CodeStub {
void GenerateHeapNumberStub(MacroAssembler* masm); void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateOddballStub(MacroAssembler* masm); void GenerateOddballStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm); void GenerateStringStub(MacroAssembler* masm);
void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm); void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm); void GenerateAddStrings(MacroAssembler* masm);
void GenerateCallRuntime(MacroAssembler* masm); void GenerateCallRuntime(MacroAssembler* masm);
@ -238,15 +318,15 @@ class BinaryOpStub: public CodeStub {
void GenerateTypeTransition(MacroAssembler* masm); void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm); void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
virtual int GetCodeKind() { return Code::BINARY_OP_IC; } virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
virtual InlineCacheState GetICState() { virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(operands_type_); return TRBinaryOpIC::ToState(operands_type_);
} }
virtual void FinishCode(Code* code) { virtual void FinishCode(Code* code) {
code->set_binary_op_type(operands_type_); code->set_type_recording_binary_op_type(operands_type_);
code->set_binary_op_result_type(result_type_); code->set_type_recording_binary_op_result_type(result_type_);
} }
friend class CodeGenerator; friend class CodeGenerator;
@ -306,7 +386,8 @@ class StringCompareStub: public CodeStub {
public: public:
StringCompareStub() { } StringCompareStub() { }
// Compares two flat ASCII strings and returns result in r0. // Compare two flat ASCII strings and returns result in r0.
// Does not use the stack.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm, static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left, Register left,
Register right, Register right,
@ -315,27 +396,107 @@ class StringCompareStub: public CodeStub {
Register scratch3, Register scratch3,
Register scratch4); Register scratch4);
// Compares two flat ASCII strings for equality and returns result private:
// in r0. Major MajorKey() { return StringCompare; }
static void GenerateFlatAsciiStringEquals(MacroAssembler* masm, int MinorKey() { return 0; }
Register left,
Register right, void Generate(MacroAssembler* masm);
Register scratch1, };
Register scratch2,
Register scratch3);
// This stub can do a fast mod operation without using fp.
// It is tail called from the GenericBinaryOpStub and it always
// returns an answer. It never causes GC so it doesn't need a real frame.
//
// The inputs are always positive Smis. This is never called
// where the denominator is a power of 2. We handle that separately.
//
// If we consider the denominator as an odd number multiplied by a power of 2,
// then:
// * The exponent (power of 2) is in the shift_distance register.
// * The odd number is in the odd_number register. It is always in the range
// of 3 to 25.
// * The bits from the numerator that are to be copied to the answer (there are
// shift_distance of them) are in the mask_bits register.
// * The other bits of the numerator have been shifted down and are in the lhs
// register.
class IntegerModStub : public CodeStub {
public:
IntegerModStub(Register result,
Register shift_distance,
Register odd_number,
Register mask_bits,
Register lhs,
Register scratch)
: result_(result),
shift_distance_(shift_distance),
odd_number_(odd_number),
mask_bits_(mask_bits),
lhs_(lhs),
scratch_(scratch) {
// We don't code these in the minor key, so they should always be the same.
// We don't really want to fix that since this stub is rather large and we
// don't want many copies of it.
ASSERT(shift_distance_.is(r9));
ASSERT(odd_number_.is(r4));
ASSERT(mask_bits_.is(r3));
ASSERT(scratch_.is(r5));
}
private: private:
virtual Major MajorKey() { return StringCompare; } Register result_;
virtual int MinorKey() { return 0; } Register shift_distance_;
virtual void Generate(MacroAssembler* masm); Register odd_number_;
Register mask_bits_;
Register lhs_;
Register scratch_;
static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm, // Minor key encoding in 16 bits.
Register left, class ResultRegisterBits: public BitField<int, 0, 4> {};
Register right, class LhsRegisterBits: public BitField<int, 4, 4> {};
Register length,
Register scratch1, Major MajorKey() { return IntegerMod; }
Register scratch2, int MinorKey() {
Label* chars_not_equal); // Encode the parameters in a unique 16 bit value.
return ResultRegisterBits::encode(result_.code())
| LhsRegisterBits::encode(lhs_.code());
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "IntegerModStub"; }
// Utility functions.
void DigitSum(MacroAssembler* masm,
Register lhs,
int mask,
int shift,
Label* entry);
void DigitSum(MacroAssembler* masm,
Register lhs,
Register scratch,
int mask,
int shift1,
int shift2,
Label* entry);
void ModGetInRangeBySubtraction(MacroAssembler* masm,
Register lhs,
int shift,
int rhs);
void ModReduce(MacroAssembler* masm,
Register lhs,
int max,
int denominator);
void ModAnswer(MacroAssembler* masm,
Register result,
Register shift_distance,
Register mask_bits,
Register sum_of_digits);
#ifdef DEBUG
void Print() { PrintF("IntegerModStub\n"); }
#endif
}; };
@ -419,9 +580,6 @@ class RegExpCEntryStub: public CodeStub {
private: private:
Major MajorKey() { return RegExpCEntry; } Major MajorKey() { return RegExpCEntry; }
int MinorKey() { return 0; } int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
const char* GetName() { return "RegExpCEntryStub"; } const char* GetName() { return "RegExpCEntryStub"; }
}; };
@ -441,210 +599,59 @@ class DirectCEntryStub: public CodeStub {
private: private:
Major MajorKey() { return DirectCEntry; } Major MajorKey() { return DirectCEntry; }
int MinorKey() { return 0; } int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
const char* GetName() { return "DirectCEntryStub"; } const char* GetName() { return "DirectCEntryStub"; }
}; };
class FloatingPointHelper : public AllStatic { // Generate code to load an element from a pixel array. The receiver is assumed
public: // to not be a smi and to have elements, the caller must guarantee this
enum Destination { // precondition. If key is not a smi, then the generated code branches to
kVFPRegisters, // key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi
kCoreRegisters // check has already been performed on key so that the smi check is not
}; // generated. If key is not a valid index within the bounds of the pixel array,
// the generated code jumps to out_of_range. receiver, key and elements are
// unchanged throughout the generated code sequence.
// Loads smis from r0 and r1 (right and left in binary operations) into void GenerateFastPixelArrayLoad(MacroAssembler* masm,
// floating point registers. Depending on the destination the values ends up Register receiver,
// either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is Register key,
// floating point registers VFP3 must be supported. If core registers are Register elements_map,
// requested when VFP3 is supported d6 and d7 will be scratched. Register elements,
static void LoadSmis(MacroAssembler* masm,
Destination destination,
Register scratch1,
Register scratch2);
// Loads objects from r0 and r1 (right and left in binary operations) into
// floating point registers. Depending on the destination the values ends up
// either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
// floating point registers VFP3 must be supported. If core registers are
// requested when VFP3 is supported d6 and d7 will still be scratched. If
// either r0 or r1 is not a number (not smi and not heap number object) the
// not_number label is jumped to with r0 and r1 intact.
static void LoadOperands(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* not_number);
// Convert the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
static void ConvertNumberToInt32(MacroAssembler* masm,
Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
Register scratch3,
DwVfpRegister double_scratch,
Label* not_int32);
// Converts the integer (untagged smi) in |int_scratch| to a double, storing
// the result either in |double_dst| or |dst2:dst1|, depending on
// |destination|.
// Warning: The value in |int_scratch| will be changed in the process!
static void ConvertIntToDouble(MacroAssembler* masm,
Register int_scratch,
Destination destination,
DwVfpRegister double_dst,
Register dst1,
Register dst2,
Register scratch2,
SwVfpRegister single_scratch);
// Load the number from object into double_dst in the double format.
// Control will jump to not_int32 if the value cannot be exactly represented
// by a 32-bit integer.
// Floating point value in the 32-bit integer range that are not exact integer
// won't be loaded.
static void LoadNumberAsInt32Double(MacroAssembler* masm,
Register object,
Destination destination,
DwVfpRegister double_dst,
Register dst1,
Register dst2,
Register heap_number_map,
Register scratch1,
Register scratch2,
SwVfpRegister single_scratch,
Label* not_int32);
// Loads the number from object into dst as a 32-bit integer.
// Control will jump to not_int32 if the object cannot be exactly represented
// by a 32-bit integer.
// Floating point value in the 32-bit integer range that are not exact integer
// won't be converted.
// scratch3 is not used when VFP3 is supported.
static void LoadNumberAsInt32(MacroAssembler* masm,
Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
Register scratch3,
DwVfpRegister double_scratch,
Label* not_int32);
// Generate non VFP3 code to check if a double can be exactly represented by a
// 32-bit integer. This does not check for 0 or -0, which need
// to be checked for separately.
// Control jumps to not_int32 if the value is not a 32-bit integer, and falls
// through otherwise.
// src1 and src2 will be cloberred.
//
// Expected input:
// - src1: higher (exponent) part of the double value.
// - src2: lower (mantissa) part of the double value.
// Output status:
// - dst: 32 higher bits of the mantissa. (mantissa[51:20])
// - src2: contains 1.
// - other registers are clobbered.
static void DoubleIs32BitInteger(MacroAssembler* masm,
Register src1,
Register src2,
Register dst,
Register scratch,
Label* not_int32);
// Generates code to call a C function to do a double operation using core
// registers. (Used when VFP3 is not supported.)
// This code never falls through, but returns with a heap number containing
// the result in r0.
// Register heapnumber_result must be a heap number in which the
// result of the operation will be stored.
// Requires the following layout on entry:
// r0: Left value (least significant part of mantissa).
// r1: Left value (sign, exponent, top of mantissa).
// r2: Right value (least significant part of mantissa).
// r3: Right value (sign, exponent, top of mantissa).
static void CallCCodeForDoubleOperation(MacroAssembler* masm,
Token::Value op,
Register heap_number_result,
Register scratch);
private:
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register object,
DwVfpRegister dst,
Register dst1,
Register dst2,
Register heap_number_map,
Register scratch1, Register scratch1,
Register scratch2, Register scratch2,
Label* not_number); Register result,
}; Label* not_pixel_array,
Label* key_not_smi,
Label* out_of_range);
class StringDictionaryLookupStub: public CodeStub {
public: // Generate code to store an element into a pixel array, clamping values between
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP }; // [0..255]. The receiver is assumed to not be a smi and to have elements, the
// caller must guarantee this precondition. If key is not a smi, then the
explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { } // generated code branches to key_not_smi. Callers can specify NULL for
// key_not_smi to signal that a smi check has already been performed on key so
void Generate(MacroAssembler* masm); // that the smi check is not generated. If value is not a smi, the generated
// code will branch to value_not_smi. If the receiver doesn't have pixel array
MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup( // elements, the generated code will branch to not_pixel_array, unless
MacroAssembler* masm, // not_pixel_array is NULL, in which case the caller must ensure that the
Label* miss, // receiver has pixel array elements. If key is not a valid index within the
Label* done, // bounds of the pixel array, the generated code jumps to out_of_range. If
// load_elements_from_receiver is true, then the elements of receiver is loaded
// into elements, otherwise elements is assumed to already be the receiver's
// elements. If load_elements_map_from_elements is true, elements_map is loaded
// from elements, otherwise it is assumed to already contain the element map.
void GenerateFastPixelArrayStore(MacroAssembler* masm,
Register receiver, Register receiver,
Register properties, Register key,
String* name, Register value,
Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register elements, Register elements,
Register name, Register elements_map,
Register r0, Register scratch1,
Register r1); Register scratch2,
bool load_elements_from_receiver,
private: bool load_elements_map_from_elements,
static const int kInlinedProbes = 4; Label* key_not_smi,
static const int kTotalProbes = 20; Label* value_not_smi,
Label* not_pixel_array,
static const int kCapacityOffset = Label* out_of_range);
StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
#ifdef DEBUG
void Print() {
PrintF("StringDictionaryLookupStub\n");
}
#endif
Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);
}
class LookupModeBits: public BitField<LookupMode, 0, 1> {};
LookupMode mode_;
};
} } // namespace v8::internal } } // namespace v8::internal

48
deps/v8/src/arm/codegen-arm-inl.h

@ -0,0 +1,48 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_CODEGEN_ARM_INL_H_
#define V8_ARM_CODEGEN_ARM_INL_H_
#include "virtual-frame-arm.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
// Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cond) { __ b(cond, &entry_label_); }
#undef __
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_INL_H_

7360
deps/v8/src/arm/codegen-arm.cc

File diff suppressed because it is too large

512
deps/v8/src/arm/codegen-arm.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -37,8 +37,162 @@ namespace internal {
// Forward declarations // Forward declarations
class CompilationInfo; class CompilationInfo;
class DeferredCode;
class JumpTarget;
class RegisterAllocator;
class RegisterFile;
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
// -------------------------------------------------------------------------
// Reference support
// A reference is a C++ stack-allocated object that puts a
// reference on the virtual frame. The reference may be consumed
// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
// When the lifetime (scope) of a valid reference ends, it must have
// been consumed, and be in state UNLOADED.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
Reference(CodeGenerator* cgen,
Expression* expression,
bool persist_after_get = false);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
ASSERT_EQ(ILLEGAL, type_);
type_ = value;
}
void set_unloaded() {
ASSERT_NE(ILLEGAL, type_);
ASSERT_NE(UNLOADED, type_);
type_ = UNLOADED;
}
// The size the reference takes up on the stack.
int size() const {
return (type_ < SLOT) ? 0 : type_;
}
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is consumed by the call unless the
// reference is for a compound assignment.
// If the reference is not consumed, it is left in place under its value.
void GetValue();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
// on the expression stack. The value is stored in the location specified
// by the reference, and is left on top of the stack, after the reference
// is popped from beneath it (unloaded).
void SetValue(InitState init_state, WriteBarrierCharacter wb);
// This is in preparation for something that uses the reference on the stack.
// If we need this reference afterwards get then dup it now. Otherwise mark
// it as used.
inline void DupIfPersist();
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
// Keep the reference on the stack after get, so it can be used by set later.
bool persist_after_get_;
};
// -------------------------------------------------------------------------
// Code generation state
// The state is passed down the AST by the code generator (and back up, in
// the form of the state of the label pair). It is threaded through the
// call stack. Constructing a state implicitly pushes it on the owning code
// generator's stack of states, and destroying one implicitly pops it.
class CodeGenState BASE_EMBEDDED {
public:
// Create an initial code generator state. Destroying the initial state
// leaves the code generator with a NULL state.
explicit CodeGenState(CodeGenerator* owner);
// Destroy a code generator state and restore the owning code generator's
// previous state.
virtual ~CodeGenState();
virtual JumpTarget* true_target() const { return NULL; }
virtual JumpTarget* false_target() const { return NULL; }
protected:
inline CodeGenerator* owner() { return owner_; }
inline CodeGenState* previous() const { return previous_; }
private:
CodeGenerator* owner_;
CodeGenState* previous_;
};
class ConditionCodeGenState : public CodeGenState {
public:
// Create a code generator state based on a code generator's current
// state. The new state has its own pair of branch labels.
ConditionCodeGenState(CodeGenerator* owner,
JumpTarget* true_target,
JumpTarget* false_target);
virtual JumpTarget* true_target() const { return true_target_; }
virtual JumpTarget* false_target() const { return false_target_; }
private:
JumpTarget* true_target_;
JumpTarget* false_target_;
};
class TypeInfoCodeGenState : public CodeGenState {
public:
TypeInfoCodeGenState(CodeGenerator* owner,
Slot* slot_number,
TypeInfo info);
~TypeInfoCodeGenState();
virtual JumpTarget* true_target() const { return previous()->true_target(); }
virtual JumpTarget* false_target() const {
return previous()->false_target();
}
private:
Slot* slot_;
TypeInfo old_type_info_;
};
// -------------------------------------------------------------------------
// Arguments allocation mode
enum ArgumentsAllocationMode {
NO_ARGUMENTS_ALLOCATION,
EAGER_ARGUMENTS_ALLOCATION,
LAZY_ARGUMENTS_ALLOCATION
};
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// CodeGenerator // CodeGenerator
@ -71,17 +225,367 @@ class CodeGenerator: public AstVisitor {
int pos, int pos,
bool right_here = false); bool right_here = false);
// Accessors
MacroAssembler* masm() { return masm_; }
VirtualFrame* frame() const { return frame_; }
inline Handle<Script> script();
bool has_valid_frame() const { return frame_ != NULL; }
// Set the virtual frame to be new_frame, with non-frame register
// reference counts given by non_frame_registers. The non-frame
// register reference counts of the old frame are returned in
// non_frame_registers.
void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
void DeleteFrame();
RegisterAllocator* allocator() const { return allocator_; }
CodeGenState* state() { return state_; }
void set_state(CodeGenState* state) { state_ = state; }
TypeInfo type_info(Slot* slot) {
int index = NumberOfSlot(slot);
if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
return (*type_info_)[index];
}
TypeInfo set_type_info(Slot* slot, TypeInfo info) {
int index = NumberOfSlot(slot);
ASSERT(index >= kInvalidSlotNumber);
if (index != kInvalidSlotNumber) {
TypeInfo previous_value = (*type_info_)[index];
(*type_info_)[index] = info;
return previous_value;
}
return TypeInfo::Unknown();
}
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
// Constants related to patching of inlined load/store. // Constants related to patching of inlined load/store.
static int GetInlinedKeyedLoadInstructionsAfterPatch() { static int GetInlinedKeyedLoadInstructionsAfterPatch() {
return FLAG_debug_code ? 32 : 13; return FLAG_debug_code ? 32 : 13;
} }
static const int kInlinedKeyedStoreInstructionsAfterPatch = 8; static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
static int GetInlinedNamedStoreInstructionsAfterPatch() { static int GetInlinedNamedStoreInstructionsAfterPatch() {
ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1); ASSERT(inlined_write_barrier_size_ != -1);
return Isolate::Current()->inlined_write_barrier_size() + 4; return inlined_write_barrier_size_ + 4;
} }
private: private:
// Type of a member function that generates inline code for a native function.
typedef void (CodeGenerator::*InlineFunctionGenerator)
(ZoneList<Expression*>*);
static const InlineFunctionGenerator kInlineFunctionGenerators[];
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
// Accessors
inline bool is_eval();
inline Scope* scope();
inline StrictModeFlag strict_mode_flag();
// Generating deferred code.
void ProcessDeferred();
static const int kInvalidSlotNumber = -1;
int NumberOfSlot(Slot* slot);
// State
bool has_cc() const { return cc_reg_ != al; }
JumpTarget* true_target() const { return state_->true_target(); }
JumpTarget* false_target() const { return state_->false_target(); }
// Track loop nesting level.
int loop_nesting() const { return loop_nesting_; }
void IncrementLoopNesting() { loop_nesting_++; }
void DecrementLoopNesting() { loop_nesting_--; }
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
virtual void VisitSlot(Slot* node);
#define DEF_VISIT(type) \
virtual void Visit##type(type* node);
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
// Main code generation function
void Generate(CompilationInfo* info);
// Generate the return sequence code. Should be called no more than
// once per compiled function, immediately after binding the return
// target (which can not be done more than once). The return value should
// be in r0.
void GenerateReturnSequence();
// Returns the arguments allocation mode.
ArgumentsAllocationMode ArgumentsMode();
// Store the arguments object and allocate it if necessary.
void StoreArgumentsObject(bool initial);
// The following are used by class Reference.
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
MemOperand SlotOperand(Slot* slot, Register tmp);
MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
Register tmp,
Register tmp2,
JumpTarget* slow);
// Expressions
void LoadCondition(Expression* x,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_cc);
void Load(Expression* expr);
void LoadGlobal();
void LoadGlobalReceiver(Register scratch);
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
// Store the value on top of the stack to a slot.
void StoreToSlot(Slot* slot, InitState init_state);
// Support for compiling assignment expressions.
void EmitSlotAssignment(Assignment* node);
void EmitNamedPropertyAssignment(Assignment* node);
void EmitKeyedPropertyAssignment(Assignment* node);
// Load a named property, returning it in r0. The receiver is passed on the
// stack, and remains there.
void EmitNamedLoad(Handle<String> name, bool is_contextual);
// Store to a named property. If the store is contextual, value is passed on
// the frame and consumed. Otherwise, receiver and value are passed on the
// frame and consumed. The result is returned in r0.
void EmitNamedStore(Handle<String> name, bool is_contextual);
// Load a keyed property, leaving it in r0. The receiver and key are
// passed on the stack, and remain there.
void EmitKeyedLoad();
// Store a keyed property. Key and receiver are on the stack and the value is
// in r0. Result is returned in r0.
void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow);
// Support for loading from local/global variables and arguments
// whose location is known unless they are shadowed by
// eval-introduced bindings. Generates no code for unsupported slot
// types and therefore expects to fall through to the slow jump target.
void EmitDynamicLoadFromSlotFastCase(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow,
JumpTarget* done);
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
// non-existing properties of the global object, so we must make it
// look like an explicit property access, instead of an access
// through the context chain.
void LoadTypeofExpression(Expression* x);
void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
// Generate code that computes a shortcutting logical operation.
void GenerateLogicalBooleanOperation(BinaryOperation* node);
void GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
GenerateInlineSmi inline_smi,
int known_rhs =
GenericBinaryOpStub::kUnknownIntValue);
void Comparison(Condition cc,
Expression* left,
Expression* right,
bool strict = false);
void SmiOperation(Token::Value op,
Handle<Object> value,
bool reversed,
OverwriteMode mode);
void CallWithArguments(ZoneList<Expression*>* arguments,
CallFunctionFlags flags,
int position);
// An optimized implementation of expressions of the form
// x.apply(y, arguments). We call x the applicand and y the receiver.
// The optimization avoids allocating an arguments object if possible.
void CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position);
// Control flow
void Branch(bool if_true, JumpTarget* target);
void CheckStack();
bool CheckForInlineRuntimeCall(CallRuntime* node);
static Handle<Code> ComputeLazyCompile(int argc);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
// Declare global variables and functions in the given array of
// name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs);
// Instantiate the function based on the shared function info.
void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
bool pretenure);
// Support for type checks.
void GenerateIsSmi(ZoneList<Expression*>* args);
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsRegExp(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
void GenerateIsStringWrapperSafeForDefaultValueOf(
ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArguments(ZoneList<Expression*>* args);
// Support for accessing the class and value fields of an object.
void GenerateClassOf(ZoneList<Expression*>* args);
void GenerateValueOf(ZoneList<Expression*>* args);
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
void GenerateStringCharFromCode(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
void GenerateStringCharAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
void GenerateLog(ZoneList<Expression*>* args);
// Fast support for Math.random().
void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
// Fast support for StringAdd.
void GenerateStringAdd(ZoneList<Expression*>* args);
// Fast support for SubString.
void GenerateSubString(ZoneList<Expression*>* args);
// Fast support for StringCompare.
void GenerateStringCompare(ZoneList<Expression*>* args);
// Support for direct calls from JavaScript to native RegExp code.
void GenerateRegExpExec(ZoneList<Expression*>* args);
void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
// Support for fast native caches.
void GenerateGetFromCache(ZoneList<Expression*>* args);
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
// Fast swapping of elements.
void GenerateSwapElements(ZoneList<Expression*>* args);
// Fast call for custom callbacks.
void GenerateCallFunction(ZoneList<Expression*>* args);
// Fast call to math functions.
void GenerateMathPow(ZoneList<Expression*>* args);
void GenerateMathSin(ZoneList<Expression*>* args);
void GenerateMathCos(ZoneList<Expression*>* args);
void GenerateMathSqrt(ZoneList<Expression*>* args);
void GenerateMathLog(ZoneList<Expression*>* args);
void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
ALWAYS_FALSE,
DONT_KNOW
};
ConditionAnalysis AnalyzeCondition(Expression* cond);
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
void CodeForStatementPosition(Statement* node);
void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
// True if the registers are valid for entry to a block.
bool HasValidEntryRegisters();
#endif
List<DeferredCode*> deferred_;
// Assembler
MacroAssembler* masm_; // to generate code
CompilationInfo* info_;
// Code generation state
VirtualFrame* frame_;
RegisterAllocator* allocator_;
Condition cc_reg_;
CodeGenState* state_;
int loop_nesting_;
Vector<TypeInfo>* type_info_;
// Jump targets
BreakTarget function_return_;
// True if the function return is shadowed (ie, jumping to the target
// function_return_ does not jump to the true function return, but rather
// to some unlinking code).
bool function_return_is_shadowed_;
// Size of inlined write barriers generated by EmitNamedStore.
static int inlined_write_barrier_size_;
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
friend class FastCodeGenerator;
friend class FullCodeGenerator;
friend class FullCodeGenSyntaxChecker;
friend class LCodeGen;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator); DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
}; };

24
deps/v8/src/arm/constants-arm.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -28,9 +28,12 @@
#ifndef V8_ARM_CONSTANTS_ARM_H_ #ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_ARM_CONSTANTS_ARM_H_ #define V8_ARM_CONSTANTS_ARM_H_
// ARM EABI is required. // The simulator emulates the EABI so we define the USE_ARM_EABI macro if we
#if defined(__arm__) && !defined(__ARM_EABI__) // are not running on real ARM hardware. One reason for this is that the
#error ARM EABI support is required. // old ABI uses fp registers in the calling convention and the simulator does
// not simulate fp registers or coroutine instructions.
#if defined(__ARM_EABI__) || !defined(__arm__)
# define USE_ARM_EABI 1
#endif #endif
// This means that interwork-compatible jump instructions are generated. We // This means that interwork-compatible jump instructions are generated. We
@ -86,11 +89,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// Constant pool marker.
static const int kConstantPoolMarkerMask = 0xffe00000;
static const int kConstantPoolMarker = 0x0c000000;
static const int kConstantPoolLengthMask = 0x001ffff;
// Number of registers in normal ARM mode. // Number of registers in normal ARM mode.
static const int kNumRegisters = 16; static const int kNumRegisters = 16;
@ -343,9 +341,7 @@ enum BlockAddrMode {
da_x = (0|0|0) << 21, // Decrement after. da_x = (0|0|0) << 21, // Decrement after.
ia_x = (0|4|0) << 21, // Increment after. ia_x = (0|4|0) << 21, // Increment after.
db_x = (8|0|0) << 21, // Decrement before. db_x = (8|0|0) << 21, // Decrement before.
ib_x = (8|4|0) << 21, // Increment before. ib_x = (8|4|0) << 21 // Increment before.
kBlockAddrModeMask = (8|4|1) << 21
}; };
@ -392,11 +388,9 @@ enum VFPConversionMode {
// This mask does not include the "inexact" or "input denormal" cumulative // This mask does not include the "inexact" or "input denormal" cumulative
// exceptions flags, because we usually don't want to check for it. // exceptions flags, because we usually don't want to check for it.
static const uint32_t kVFPExceptionMask = 0xf; static const uint32_t kVFPExceptionMask = 0xf;
static const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
static const uint32_t kVFPOverflowExceptionBit = 1 << 2;
static const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
static const uint32_t kVFPInexactExceptionBit = 1 << 4; static const uint32_t kVFPInexactExceptionBit = 1 << 4;
static const uint32_t kVFPFlushToZeroMask = 1 << 24; static const uint32_t kVFPFlushToZeroMask = 1 << 24;
static const uint32_t kVFPInvalidExceptionBit = 1;
static const uint32_t kVFPNConditionFlagBit = 1 << 31; static const uint32_t kVFPNConditionFlagBit = 1 << 31;
static const uint32_t kVFPZConditionFlagBit = 1 << 30; static const uint32_t kVFPZConditionFlagBit = 1 << 30;

39
deps/v8/src/arm/cpu-arm.cc

@ -42,12 +42,10 @@ namespace v8 {
namespace internal { namespace internal {
void CPU::Setup() { void CPU::Setup() {
CpuFeatures::Probe(); CpuFeatures::Probe(true);
if (!CpuFeatures::IsSupported(VFP3) || Serializer::enabled()) {
V8::DisableCrankshaft();
} }
bool CPU::SupportsCrankshaft() {
return CpuFeatures::IsSupported(VFP3);
} }
@ -63,7 +61,7 @@ void CPU::FlushICache(void* start, size_t size) {
// that the Icache was flushed. // that the Icache was flushed.
// None of this code ends up in the snapshot so there are no issues // None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots. // around whether or not to generate the code when building snapshots.
Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size); Simulator::FlushICache(start, size);
#else #else
// Ideally, we would call // Ideally, we would call
// syscall(__ARM_NR_cacheflush, start, // syscall(__ARM_NR_cacheflush, start,
@ -75,6 +73,7 @@ void CPU::FlushICache(void* start, size_t size) {
register uint32_t end asm("a2") = register uint32_t end asm("a2") =
reinterpret_cast<uint32_t>(start) + size; reinterpret_cast<uint32_t>(start) + size;
register uint32_t flg asm("a3") = 0; register uint32_t flg asm("a3") = 0;
#ifdef __ARM_EABI__
#if defined (__arm__) && !defined(__thumb__) #if defined (__arm__) && !defined(__thumb__)
// __arm__ may be defined in thumb mode. // __arm__ may be defined in thumb mode.
register uint32_t scno asm("r7") = __ARM_NR_cacheflush; register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
@ -103,6 +102,34 @@ void CPU::FlushICache(void* start, size_t size) {
: "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush) : "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
: "r3"); : "r3");
#endif #endif
#else
#if defined (__arm__) && !defined(__thumb__)
// __arm__ may be defined in thumb mode.
asm volatile(
"svc %1"
: "=r" (beg)
: "i" (__ARM_NR_cacheflush), "0" (beg), "r" (end), "r" (flg));
#else
// Do not use the value of __ARM_NR_cacheflush in the inline assembly
// below, because the thumb mode value would be used, which would be
// wrong, since we switch to ARM mode before executing the svc instruction
asm volatile(
"@ Enter ARM Mode \n\t"
"adr r3, 1f \n\t"
"bx r3 \n\t"
".ALIGN 4 \n\t"
".ARM \n"
"1: svc 0x9f0002 \n"
"@ Enter THUMB Mode\n\t"
"adr r3, 2f+1 \n\t"
"bx r3 \n\t"
".THUMB \n"
"2: \n\t"
: "=r" (beg)
: "0" (beg), "r" (end), "r" (flg)
: "r3");
#endif
#endif
#endif #endif
} }

14
deps/v8/src/arm/debug-arm.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_ARM) #if defined(V8_TARGET_ARCH_ARM)
#include "codegen.h" #include "codegen-inl.h"
#include "debug.h" #include "debug.h"
namespace v8 { namespace v8 {
@ -65,7 +65,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
patcher.masm()->mov(v8::internal::lr, v8::internal::pc); patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4)); patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
#endif #endif
patcher.Emit(Isolate::Current()->debug()->debug_break_return()->entry()); patcher.Emit(Debug::debug_break_return()->entry());
patcher.masm()->bkpt(0); patcher.masm()->bkpt(0);
} }
@ -115,7 +115,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
patcher.masm()->mov(v8::internal::lr, v8::internal::pc); patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4)); patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
#endif #endif
patcher.Emit(Isolate::Current()->debug()->debug_break_slot()->entry()); patcher.Emit(Debug::debug_break_slot()->entry());
} }
@ -159,7 +159,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ RecordComment("// Calling from debug break to runtime - come in - over"); __ RecordComment("// Calling from debug break to runtime - come in - over");
#endif #endif
__ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments __ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break(masm->isolate()))); __ mov(r1, Operand(ExternalReference::debug_break()));
CEntryStub ceb(1); CEntryStub ceb(1);
__ CallStub(&ceb); __ CallStub(&ceb);
@ -185,9 +185,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// Now that the break point has been handled, resume normal execution by // Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was // jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX. // overwritten by the address of DebugBreakXXX.
ExternalReference after_break_target = __ mov(ip, Operand(ExternalReference(Debug_Address::AfterBreakTarget())));
ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
__ mov(ip, Operand(after_break_target));
__ ldr(ip, MemOperand(ip)); __ ldr(ip, MemOperand(ip));
__ Jump(ip); __ Jump(ip);
} }

110
deps/v8/src/arm/deoptimizer-arm.cc

@ -51,7 +51,6 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) { void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
HandleScope scope;
AssertNoAllocation no_allocation; AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return; if (!function->IsOptimized()) return;
@ -75,6 +74,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
int deoptimization_index = safepoint_entry.deoptimization_index(); int deoptimization_index = safepoint_entry.deoptimization_index();
int gap_code_size = safepoint_entry.gap_code_size(); int gap_code_size = safepoint_entry.gap_code_size();
// Check that we did not shoot past next safepoint. // Check that we did not shoot past next safepoint.
// TODO(srdjan): How do we guarantee that safepoint code does not
// overlap other safepoint patching code?
CHECK(pc_offset >= last_pc_offset); CHECK(pc_offset >= last_pc_offset);
#ifdef DEBUG #ifdef DEBUG
// Destroy the code which is not supposed to be run again. // Destroy the code which is not supposed to be run again.
@ -111,9 +112,8 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// Add the deoptimizing code to the list. // Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code); DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
DeoptimizerData* data = code->GetIsolate()->deoptimizer_data(); node->set_next(deoptimizing_code_list_);
node->set_next(data->deoptimizing_code_list_); deoptimizing_code_list_ = node;
data->deoptimizing_code_list_ = node;
// Set the code for the function to non-optimized version. // Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code()); function->ReplaceCode(function->shared()->code());
@ -122,11 +122,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: "); PrintF("[forced deoptimization: ");
function->PrintName(); function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function)); PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
#ifdef DEBUG
if (FLAG_print_code) {
code->PrintLn();
}
#endif
} }
} }
@ -267,9 +262,6 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
output_ = new FrameDescription*[1]; output_ = new FrameDescription*[1];
output_[0] = new(output_frame_size) FrameDescription( output_[0] = new(output_frame_size) FrameDescription(
output_frame_size, function_); output_frame_size, function_);
#ifdef DEBUG
output_[0]->SetKind(Code::OPTIMIZED_FUNCTION);
#endif
// Clear the incoming parameters in the optimized frame to avoid // Clear the incoming parameters in the optimized frame to avoid
// confusing the garbage collector. // confusing the garbage collector.
@ -291,33 +283,14 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
// There are no translation commands for the caller's pc and fp, the // There are no translation commands for the caller's pc and fp, the
// context, and the function. Set them up explicitly. // context, and the function. Set them up explicitly.
for (int i = StandardFrameConstants::kCallerPCOffset; for (int i = 0; ok && i < 4; i++) {
ok && i >= StandardFrameConstants::kMarkerOffset;
i -= kPointerSize) {
uint32_t input_value = input_->GetFrameSlot(input_offset); uint32_t input_value = input_->GetFrameSlot(input_offset);
if (FLAG_trace_osr) { if (FLAG_trace_osr) {
const char* name = "UNKNOWN"; PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part)\n",
switch (i) {
case StandardFrameConstants::kCallerPCOffset:
name = "caller's pc";
break;
case StandardFrameConstants::kCallerFPOffset:
name = "fp";
break;
case StandardFrameConstants::kContextOffset:
name = "context";
break;
case StandardFrameConstants::kMarkerOffset:
name = "function";
break;
}
PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
output_offset, output_offset,
input_value, input_value,
input_offset, input_offset);
name);
} }
output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset)); output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
input_offset -= kPointerSize; input_offset -= kPointerSize;
output_offset -= kPointerSize; output_offset -= kPointerSize;
@ -343,7 +316,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
optimized_code_->entry() + pc_offset); optimized_code_->entry() + pc_offset);
output_[0]->SetPc(pc); output_[0]->SetPc(pc);
} }
Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR); Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
output_[0]->SetContinuation( output_[0]->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry())); reinterpret_cast<uint32_t>(continuation->entry()));
@ -385,9 +358,6 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// Allocate and store the output frame description. // Allocate and store the output frame description.
FrameDescription* output_frame = FrameDescription* output_frame =
new(output_frame_size) FrameDescription(output_frame_size, function); new(output_frame_size) FrameDescription(output_frame_size, function);
#ifdef DEBUG
output_frame->SetKind(Code::FUNCTION);
#endif
bool is_bottommost = (0 == frame_index); bool is_bottommost = (0 == frame_index);
bool is_topmost = (output_count_ - 1 == frame_index); bool is_topmost = (output_count_ - 1 == frame_index);
@ -520,13 +490,11 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
FullCodeGenerator::StateField::decode(pc_and_state); FullCodeGenerator::StateField::decode(pc_and_state);
output_frame->SetState(Smi::FromInt(state)); output_frame->SetState(Smi::FromInt(state));
// Set the continuation for the topmost frame. // Set the continuation for the topmost frame.
if (is_topmost && bailout_type_ != DEBUGGER) { if (is_topmost) {
Builtins* builtins = isolate_->builtins();
Code* continuation = (bailout_type_ == EAGER) Code* continuation = (bailout_type_ == EAGER)
? builtins->builtin(Builtins::kNotifyDeoptimized) ? Builtins::builtin(Builtins::NotifyDeoptimized)
: builtins->builtin(Builtins::kNotifyLazyDeoptimized); : Builtins::builtin(Builtins::NotifyLazyDeoptimized);
output_frame->SetContinuation( output_frame->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry())); reinterpret_cast<uint32_t>(continuation->entry()));
} }
@ -535,36 +503,13 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
} }
void Deoptimizer::FillInputFrame(Address tos, JavaScriptFrame* frame) {
// Set the register values. The values are not important as there are no
// callee saved registers in JavaScript frames, so all registers are
// spilled. Registers fp and sp are set to the correct values though.
for (int i = 0; i < Register::kNumRegisters; i++) {
input_->SetRegister(i, i * 4);
}
input_->SetRegister(sp.code(), reinterpret_cast<intptr_t>(frame->sp()));
input_->SetRegister(fp.code(), reinterpret_cast<intptr_t>(frame->fp()));
for (int i = 0; i < DoubleRegister::kNumAllocatableRegisters; i++) {
input_->SetDoubleRegister(i, 0.0);
}
// Fill the frame content from the actual data on the frame.
for (unsigned i = 0; i < input_->GetFrameSize(); i += kPointerSize) {
input_->SetFrameSlot(i, Memory::uint32_at(tos + i));
}
}
#define __ masm()-> #define __ masm()->
// This code tries to be close to ia32 code so that any changes can be // This code tries to be close to ia32 code so that any changes can be
// easily ported. // easily ported.
void Deoptimizer::EntryGenerator::Generate() { void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue(); GeneratePrologue();
Isolate* isolate = masm()->isolate();
CpuFeatures::Scope scope(VFP3); CpuFeatures::Scope scope(VFP3);
// Save all general purpose registers before messing with them. // Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters; const int kNumberOfRegisters = Register::kNumRegisters;
@ -575,21 +520,13 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize = const int kDoubleRegsSize =
kDoubleSize * DwVfpRegister::kNumAllocatableRegisters; kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
// Save all VFP registers before messing with them. // Save all general purpose registers before messing with them.
DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0); __ sub(sp, sp, Operand(kDoubleRegsSize));
DwVfpRegister last = for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
DwVfpRegister::FromAllocationIndex( DwVfpRegister vfp_reg = DwVfpRegister::FromAllocationIndex(i);
DwVfpRegister::kNumAllocatableRegisters - 1); int offset = i * kDoubleSize;
ASSERT(last.code() > first.code()); __ vstr(vfp_reg, sp, offset);
ASSERT((last.code() - first.code()) ==
(DwVfpRegister::kNumAllocatableRegisters - 1));
#ifdef DEBUG
for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) {
ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
(DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
} }
#endif
__ vstm(db_w, sp, first, last);
// Push all 16 registers (needed to populate FrameDescription::registers_). // Push all 16 registers (needed to populate FrameDescription::registers_).
__ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit()); __ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
@ -620,16 +557,14 @@ void Deoptimizer::EntryGenerator::Generate() {
// Allocate a new deoptimizer object. // Allocate a new deoptimizer object.
// Pass four arguments in r0 to r3 and fifth argument on stack. // Pass four arguments in r0 to r3 and fifth argument on stack.
__ PrepareCallCFunction(6, r5); __ PrepareCallCFunction(5, r5);
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); __ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(r1, Operand(type())); // bailout type, __ mov(r1, Operand(type())); // bailout type,
// r2: bailout id already loaded. // r2: bailout id already loaded.
// r3: code address or 0 already loaded. // r3: code address or 0 already loaded.
__ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta. __ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
__ mov(r5, Operand(ExternalReference::isolate_address()));
__ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
// Call Deoptimizer::New(). // Call Deoptimizer::New().
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6); __ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
// Preserve "deoptimizer" object in register r0 and get the input // Preserve "deoptimizer" object in register r0 and get the input
// frame descriptor pointer to r1 (deoptimizer->input_); // frame descriptor pointer to r1 (deoptimizer->input_);
@ -683,8 +618,7 @@ void Deoptimizer::EntryGenerator::Generate() {
// r0: deoptimizer object; r1: scratch. // r0: deoptimizer object; r1: scratch.
__ PrepareCallCFunction(1, r1); __ PrepareCallCFunction(1, r1);
// Call Deoptimizer::ComputeOutputFrames(). // Call Deoptimizer::ComputeOutputFrames().
__ CallCFunction( __ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
ExternalReference::compute_output_frames_function(isolate), 1);
__ pop(r0); // Restore deoptimizer object (class Deoptimizer). __ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames. // Replace the current (input) frame with the output frames.
@ -734,7 +668,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(ip); // remove lr __ pop(ip); // remove lr
// Set up the roots register. // Set up the roots register.
ExternalReference roots_address = ExternalReference::roots_address(isolate); ExternalReference roots_address = ExternalReference::roots_address();
__ mov(r10, Operand(roots_address)); __ mov(r10, Operand(roots_address));
__ pop(ip); // remove pc __ pop(ip); // remove pc

133
deps/v8/src/arm/disasm-arm.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -89,9 +89,6 @@ class Decoder {
// Returns the length of the disassembled machine instruction in bytes. // Returns the length of the disassembled machine instruction in bytes.
int InstructionDecode(byte* instruction); int InstructionDecode(byte* instruction);
static bool IsConstantPoolAt(byte* instr_ptr);
static int ConstantPoolSizeAt(byte* instr_ptr);
private: private:
// Bottleneck functions to print into the out_buffer. // Bottleneck functions to print into the out_buffer.
void PrintChar(const char ch); void PrintChar(const char ch);
@ -371,34 +368,25 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
int Decoder::FormatVFPRegister(Instruction* instr, const char* format) { int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
ASSERT((format[0] == 'S') || (format[0] == 'D')); ASSERT((format[0] == 'S') || (format[0] == 'D'));
VFPRegPrecision precision =
format[0] == 'D' ? kDoublePrecision : kSinglePrecision;
int retval = 2;
int reg = -1;
if (format[1] == 'n') { if (format[1] == 'n') {
reg = instr->VFPNRegValue(precision); int reg = instr->VnValue();
if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NValue()));
if (format[0] == 'D') PrintDRegister(reg);
return 2;
} else if (format[1] == 'm') { } else if (format[1] == 'm') {
reg = instr->VFPMRegValue(precision); int reg = instr->VmValue();
if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MValue()));
if (format[0] == 'D') PrintDRegister(reg);
return 2;
} else if (format[1] == 'd') { } else if (format[1] == 'd') {
reg = instr->VFPDRegValue(precision); int reg = instr->VdValue();
if (format[2] == '+') { if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DValue()));
int immed8 = instr->Immed8Value(); if (format[0] == 'D') PrintDRegister(reg);
if (format[0] == 'S') reg += immed8 - 1; return 2;
if (format[0] == 'D') reg += (immed8 / 2 - 1);
}
if (format[2] == '+') retval = 3;
} else {
UNREACHABLE();
}
if (precision == kSinglePrecision) {
PrintSRegister(reg);
} else {
PrintDRegister(reg);
} }
return retval; UNREACHABLE();
return -1;
} }
@ -502,16 +490,13 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
ASSERT(STRING_STARTS_WITH(format, "memop")); ASSERT(STRING_STARTS_WITH(format, "memop"));
if (instr->HasL()) { if (instr->HasL()) {
Print("ldr"); Print("ldr");
} else { } else if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0)) {
if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0) && if (instr->Bits(7, 4) == 0xf) {
(instr->Bits(7, 6) == 3) && (instr->Bit(4) == 1)) {
if (instr->Bit(5) == 1) {
Print("strd"); Print("strd");
} else { } else {
Print("ldrd"); Print("ldrd");
} }
return 5; } else {
}
Print("str"); Print("str");
} }
return 5; return 5;
@ -914,7 +899,6 @@ void Decoder::DecodeType2(Instruction* instr) {
case da_x: { case da_x: {
if (instr->HasW()) { if (instr->HasW()) {
Unknown(instr); // not used in V8 Unknown(instr); // not used in V8
return;
} }
Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12"); Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
break; break;
@ -922,7 +906,6 @@ void Decoder::DecodeType2(Instruction* instr) {
case ia_x: { case ia_x: {
if (instr->HasW()) { if (instr->HasW()) {
Unknown(instr); // not used in V8 Unknown(instr); // not used in V8
return;
} }
Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12"); Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
break; break;
@ -1009,17 +992,13 @@ void Decoder::DecodeType3(Instruction* instr) {
void Decoder::DecodeType4(Instruction* instr) { void Decoder::DecodeType4(Instruction* instr) {
if (instr->Bit(22) != 0) { ASSERT(instr->Bit(22) == 0); // Privileged mode currently not supported.
// Privileged mode currently not supported.
Unknown(instr);
} else {
if (instr->HasL()) { if (instr->HasL()) {
Format(instr, "ldm'cond'pu 'rn'w, 'rlist"); Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
} else { } else {
Format(instr, "stm'cond'pu 'rn'w, 'rlist"); Format(instr, "stm'cond'pu 'rn'w, 'rlist");
} }
} }
}
void Decoder::DecodeType5(Instruction* instr) { void Decoder::DecodeType5(Instruction* instr) {
@ -1063,8 +1042,6 @@ int Decoder::DecodeType7(Instruction* instr) {
// vmov: Rt = Sn // vmov: Rt = Sn
// vcvt: Dd = Sm // vcvt: Dd = Sm
// vcvt: Sd = Dm // vcvt: Sd = Dm
// Dd = vabs(Dm)
// Dd = vneg(Dm)
// Dd = vadd(Dn, Dm) // Dd = vadd(Dn, Dm)
// Dd = vsub(Dn, Dm) // Dd = vsub(Dn, Dm)
// Dd = vmul(Dn, Dm) // Dd = vmul(Dn, Dm)
@ -1089,10 +1066,7 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
} }
} else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) { } else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
// vabs // vabs
Format(instr, "vabs.f64'cond 'Dd, 'Dm"); Format(instr, "vabs'cond 'Dd, 'Dm");
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
// vneg
Format(instr, "vneg.f64'cond 'Dd, 'Dm");
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) { } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr); DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) { } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
@ -1285,22 +1259,9 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
Format(instr, "vstr'cond 'Sd, ['rn + 4*'imm08@00]"); Format(instr, "vstr'cond 'Sd, ['rn + 4*'imm08@00]");
} }
break; break;
case 0x4:
case 0x5:
case 0x6:
case 0x7:
case 0x9:
case 0xB: {
bool to_vfp_register = (instr->VLValue() == 0x1);
if (to_vfp_register) {
Format(instr, "vldm'cond'pu 'rn'w, {'Sd-'Sd+}");
} else {
Format(instr, "vstm'cond'pu 'rn'w, {'Sd-'Sd+}");
}
break;
}
default: default:
Unknown(instr); // Not used by V8. Unknown(instr); // Not used by V8.
break;
} }
} else if (instr->CoprocessorValue() == 0xB) { } else if (instr->CoprocessorValue() == 0xB) {
switch (instr->OpcodeValue()) { switch (instr->OpcodeValue()) {
@ -1328,38 +1289,12 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
Format(instr, "vstr'cond 'Dd, ['rn + 4*'imm08@00]"); Format(instr, "vstr'cond 'Dd, ['rn + 4*'imm08@00]");
} }
break; break;
case 0x4:
case 0x5:
case 0x9: {
bool to_vfp_register = (instr->VLValue() == 0x1);
if (to_vfp_register) {
Format(instr, "vldm'cond'pu 'rn'w, {'Dd-'Dd+}");
} else {
Format(instr, "vstm'cond'pu 'rn'w, {'Dd-'Dd+}");
}
break;
}
default: default:
Unknown(instr); // Not used by V8. Unknown(instr); // Not used by V8.
break;
} }
} else { } else {
Unknown(instr); // Not used by V8. UNIMPLEMENTED(); // Not used by V8.
}
}
bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
return (instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker;
}
int Decoder::ConstantPoolSizeAt(byte* instr_ptr) {
if (IsConstantPoolAt(instr_ptr)) {
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
return instruction_bits & kConstantPoolLengthMask;
} else {
return -1;
} }
} }
@ -1372,15 +1307,7 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
"%08x ", "%08x ",
instr->InstructionBits()); instr->InstructionBits());
if (instr->ConditionField() == kSpecialCondition) { if (instr->ConditionField() == kSpecialCondition) {
Unknown(instr); UNIMPLEMENTED();
return Instruction::kInstrSize;
}
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"constant pool begin (length %d)",
instruction_bits &
kConstantPoolLengthMask);
return Instruction::kInstrSize; return Instruction::kInstrSize;
} }
switch (instr->TypeValue()) { switch (instr->TypeValue()) {
@ -1432,8 +1359,9 @@ namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const { const char* NameConverter::NameOfAddress(byte* addr) const {
v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr); static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
return tmp_buffer_.start(); v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
return tmp_buffer.start();
} }
@ -1483,7 +1411,12 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
int Disassembler::ConstantPoolSizeAt(byte* instruction) { int Disassembler::ConstantPoolSizeAt(byte* instruction) {
return v8::internal::Decoder::ConstantPoolSizeAt(instruction); int instruction_bits = *(reinterpret_cast<int*>(instruction));
if ((instruction_bits & 0xfff00000) == 0x03000000) {
return instruction_bits & 0x0000ffff;
} else {
return -1;
}
} }

5
deps/v8/src/arm/frames-arm.h

@ -72,9 +72,6 @@ static const RegList kCalleeSaved =
static const int kNumCalleeSaved = 7 + kR9Available; static const int kNumCalleeSaved = 7 + kR9Available;
// Double registers d8 to d15 are callee-saved.
static const int kNumDoubleCalleeSaved = 8;
// Number of registers for which space is reserved in safepoints. Must be a // Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8. // multiple of 8.
@ -139,7 +136,7 @@ class JavaScriptFrameConstants : public AllStatic {
public: public:
// FP-relative. // FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset; static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kLastParameterOffset = +2 * kPointerSize; static const int kSavedRegistersOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset; static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative. // Caller SP-relative.

1110
deps/v8/src/arm/full-codegen-arm.cc

File diff suppressed because it is too large

676
deps/v8/src/arm/ic-arm.cc

File diff suppressed because it is too large

174
deps/v8/src/arm/jump-target-arm.cc

@ -0,0 +1,174 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen-inl.h"
#include "jump-target-inl.h"
#include "register-allocator-inl.h"
#include "virtual-frame-inl.h"
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
#define __ ACCESS_MASM(cgen()->masm())
void JumpTarget::DoJump() {
ASSERT(cgen()->has_valid_frame());
// Live non-frame registers are not allowed at unconditional jumps
// because we have no way of invalidating the corresponding results
// which are still live in the C++ code.
ASSERT(cgen()->HasValidEntryRegisters());
if (entry_frame_set_) {
if (entry_label_.is_bound()) {
// If we already bound and generated code at the destination then it
// is too late to ask for less optimistic type assumptions.
ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
}
// There already a frame expectation at the target.
cgen()->frame()->MergeTo(&entry_frame_);
cgen()->DeleteFrame();
} else {
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
// Zap the fall-through frame since the jump was unconditional.
RegisterFile empty;
cgen()->SetFrame(NULL, &empty);
}
if (entry_label_.is_bound()) {
// You can't jump backwards to an already bound label unless you admitted
// up front that this was a bidirectional jump target. Bidirectional jump
// targets will zap their type info when bound in case some later virtual
// frame with less precise type info branches to them.
ASSERT(direction_ != FORWARD_ONLY);
}
__ jmp(&entry_label_);
}
void JumpTarget::DoBranch(Condition cond, Hint ignored) {
ASSERT(cgen()->has_valid_frame());
if (entry_frame_set_) {
if (entry_label_.is_bound()) {
// If we already bound and generated code at the destination then it
// is too late to ask for less optimistic type assumptions.
ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
}
// We have an expected frame to merge to on the backward edge.
cgen()->frame()->MergeTo(&entry_frame_, cond);
} else {
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
}
if (entry_label_.is_bound()) {
// You can't branch backwards to an already bound label unless you admitted
// up front that this was a bidirectional jump target. Bidirectional jump
// targets will zap their type info when bound in case some later virtual
// frame with less precise type info branches to them.
ASSERT(direction_ != FORWARD_ONLY);
}
__ b(cond, &entry_label_);
if (cond == al) {
cgen()->DeleteFrame();
}
}
void JumpTarget::Call() {
// Call is used to push the address of the catch block on the stack as
// a return address when compiling try/catch and try/finally. We
// fully spill the frame before making the call. The expected frame
// at the label (which should be the only one) is the spilled current
// frame plus an in-memory return address. The "fall-through" frame
// at the return site is the spilled current frame.
ASSERT(cgen()->has_valid_frame());
// There are no non-frame references across the call.
ASSERT(cgen()->HasValidEntryRegisters());
ASSERT(!is_linked());
// Calls are always 'forward' so we use a copy of the current frame (plus
// one for a return address) as the expected frame.
ASSERT(!entry_frame_set_);
VirtualFrame target_frame = *cgen()->frame();
target_frame.Adjust(1);
set_entry_frame(&target_frame);
__ bl(&entry_label_);
}
void JumpTarget::DoBind() {
ASSERT(!is_bound());
// Live non-frame registers are not allowed at the start of a basic
// block.
ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
if (cgen()->has_valid_frame()) {
if (direction_ != FORWARD_ONLY) cgen()->frame()->ForgetTypeInfo();
// If there is a current frame we can use it on the fall through.
if (!entry_frame_set_) {
entry_frame_ = *cgen()->frame();
entry_frame_set_ = true;
} else {
cgen()->frame()->MergeTo(&entry_frame_);
// On fall through we may have to merge both ways.
if (direction_ != FORWARD_ONLY) {
// This will not need to adjust the virtual frame entries that are
// register allocated since that was done above and they now match.
// But it does need to adjust the entry_frame_ of this jump target
// to make it potentially less optimistic. Later code can branch back
// to this jump target and we need to assert that that code does not
// have weaker assumptions about types.
entry_frame_.MergeTo(cgen()->frame());
}
}
} else {
// If there is no current frame we must have an entry frame which we can
// copy.
ASSERT(entry_frame_set_);
RegisterFile empty;
cgen()->SetFrame(new VirtualFrame(&entry_frame_), &empty);
}
__ bind(&entry_label_);
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

659
deps/v8/src/arm/lithium-arm.cc

File diff suppressed because it is too large

638
deps/v8/src/arm/lithium-arm.h

File diff suppressed because it is too large

1884
deps/v8/src/arm/lithium-codegen-arm.cc

File diff suppressed because it is too large

52
deps/v8/src/arm/lithium-codegen-arm.h

@ -51,10 +51,9 @@ class LCodeGen BASE_EMBEDDED {
current_instruction_(-1), current_instruction_(-1),
instructions_(chunk->instructions()), instructions_(chunk->instructions()),
deoptimizations_(4), deoptimizations_(4),
deopt_jump_table_(4),
deoptimization_literals_(8), deoptimization_literals_(8),
inlined_function_count_(0), inlined_function_count_(0),
scope_(info->scope()), scope_(chunk->graph()->info()->scope()),
status_(UNUSED), status_(UNUSED),
deferred_(8), deferred_(8),
osr_pc_offset_(-1), osr_pc_offset_(-1),
@ -66,10 +65,6 @@ class LCodeGen BASE_EMBEDDED {
// Simple accessors. // Simple accessors.
MacroAssembler* masm() const { return masm_; } MacroAssembler* masm() const { return masm_; }
CompilationInfo* info() const { return info_; }
Isolate* isolate() const { return info_->isolate(); }
Factory* factory() const { return isolate()->factory(); }
Heap* heap() const { return isolate()->heap(); }
// Support for converting LOperands to assembler types. // Support for converting LOperands to assembler types.
// LOperand must be a register. // LOperand must be a register.
@ -108,15 +103,13 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredNumberTagI(LNumberTagI* instr); void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr); void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr); void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LStackCheck* instr); void DoDeferredStackCheck(LGoto* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr); void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr, void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check); Label* map_check);
// Parallel move support. // Parallel move support.
void DoParallelMove(LParallelMove* move); void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
// Emit frame translation commands for an environment. // Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation); void WriteTranslation(LEnvironment* environment, Translation* translation);
@ -140,7 +133,7 @@ class LCodeGen BASE_EMBEDDED {
bool is_aborted() const { return status_ == ABORTED; } bool is_aborted() const { return status_ == ABORTED; }
int strict_mode_flag() const { int strict_mode_flag() const {
return info()->is_strict_mode() ? kStrictMode : kNonStrictMode; return info_->is_strict() ? kStrictMode : kNonStrictMode;
} }
LChunk* chunk() const { return chunk_; } LChunk* chunk() const { return chunk_; }
@ -148,7 +141,7 @@ class LCodeGen BASE_EMBEDDED {
HGraph* graph() const { return chunk_->graph(); } HGraph* graph() const { return chunk_->graph(); }
Register scratch0() { return r9; } Register scratch0() { return r9; }
DwVfpRegister double_scratch0() { return d15; } DwVfpRegister double_scratch0() { return d0; }
int GetNextEmittedBlock(int block); int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction(); LInstruction* GetNextInstruction();
@ -160,8 +153,8 @@ class LCodeGen BASE_EMBEDDED {
Register temporary, Register temporary,
Register temporary2); Register temporary2);
int GetStackSlotCount() const { return chunk()->spill_slot_count(); } int StackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); } int ParameterCount() const { return scope()->num_parameters(); }
void Abort(const char* format, ...); void Abort(const char* format, ...);
void Comment(const char* format, ...); void Comment(const char* format, ...);
@ -173,7 +166,6 @@ class LCodeGen BASE_EMBEDDED {
bool GeneratePrologue(); bool GeneratePrologue();
bool GenerateBody(); bool GenerateBody();
bool GenerateDeferredCode(); bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable(); bool GenerateSafepointTable();
enum SafepointMode { enum SafepointMode {
@ -190,14 +182,14 @@ class LCodeGen BASE_EMBEDDED {
LInstruction* instr, LInstruction* instr,
SafepointMode safepoint_mode); SafepointMode safepoint_mode);
void CallRuntime(const Runtime::Function* function, void CallRuntime(Runtime::Function* function,
int num_arguments, int num_arguments,
LInstruction* instr); LInstruction* instr);
void CallRuntime(Runtime::FunctionId id, void CallRuntime(Runtime::FunctionId id,
int num_arguments, int num_arguments,
LInstruction* instr) { LInstruction* instr) {
const Runtime::Function* function = Runtime::FunctionForId(id); Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr); CallRuntime(function, num_arguments, instr);
} }
@ -209,8 +201,7 @@ class LCodeGen BASE_EMBEDDED {
// to be in edi. // to be in edi.
void CallKnownFunction(Handle<JSFunction> function, void CallKnownFunction(Handle<JSFunction> function,
int arity, int arity,
LInstruction* instr, LInstruction* instr);
CallKind call_kind);
void LoadHeapObject(Register result, Handle<HeapObject> object); void LoadHeapObject(Register result, Handle<HeapObject> object);
@ -237,10 +228,6 @@ class LCodeGen BASE_EMBEDDED {
void DoMathFloor(LUnaryMathOperation* instr); void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr); void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr); void DoMathSqrt(LUnaryMathOperation* instr);
void DoMathPowHalf(LUnaryMathOperation* instr);
void DoMathLog(LUnaryMathOperation* instr);
void DoMathCos(LUnaryMathOperation* instr);
void DoMathSin(LUnaryMathOperation* instr);
// Support for recording safepoint and position information. // Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers, void RecordSafepoint(LPointerMap* pointers,
@ -256,17 +243,13 @@ class LCodeGen BASE_EMBEDDED {
int arguments, int arguments,
int deoptimization_index); int deoptimization_index);
void RecordPosition(int position); void RecordPosition(int position);
int LastSafepointEnd() {
return static_cast<int>(safepoints_.GetPcAfterGap());
}
static Condition TokenToCondition(Token::Value op, bool is_unsigned); static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block); void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
void EmitBranch(int left_block, int right_block, Condition cc); void EmitBranch(int left_block, int right_block, Condition cc);
void EmitCmpI(LOperand* left, LOperand* right); void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input, void EmitNumberUntagD(Register input,
DoubleRegister result, DoubleRegister result,
bool deoptimize_on_undefined,
LEnvironment* env); LEnvironment* env);
// Emits optimized code for typeof x == "y". Modifies input register. // Emits optimized code for typeof x == "y". Modifies input register.
@ -280,6 +263,7 @@ class LCodeGen BASE_EMBEDDED {
// true and false label should be made, to optimize fallthrough. // true and false label should be made, to optimize fallthrough.
Condition EmitIsObject(Register input, Condition EmitIsObject(Register input,
Register temp1, Register temp1,
Register temp2,
Label* is_not_object, Label* is_not_object,
Label* is_object); Label* is_object);
@ -287,19 +271,6 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition. // Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2); void EmitIsConstructCall(Register temp1, Register temp2);
void EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name);
struct JumpTableEntry {
explicit inline JumpTableEntry(Address entry)
: label(),
address(entry) { }
Label label;
Address address;
};
LChunk* const chunk_; LChunk* const chunk_;
MacroAssembler* const masm_; MacroAssembler* const masm_;
CompilationInfo* const info_; CompilationInfo* const info_;
@ -308,7 +279,6 @@ class LCodeGen BASE_EMBEDDED {
int current_instruction_; int current_instruction_;
const ZoneList<LInstruction*>* instructions_; const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_; ZoneList<LEnvironment*> deoptimizations_;
ZoneList<JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_; ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_; int inlined_function_count_;
Scope* const scope_; Scope* const scope_;

2
deps/v8/src/arm/lithium-gap-resolver-arm.cc

@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "arm/lithium-gap-resolver-arm.h" #include "arm/lithium-gap-resolver-arm.h"
#include "arm/lithium-codegen-arm.h" #include "arm/lithium-codegen-arm.h"

765
deps/v8/src/arm/macro-assembler-arm.cc

File diff suppressed because it is too large

207
deps/v8/src/arm/macro-assembler-arm.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -29,11 +29,13 @@
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_ #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "assembler.h" #include "assembler.h"
#include "v8globals.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// Forward declaration.
class PostCallGenerator;
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Static helper functions // Static helper functions
@ -53,6 +55,12 @@ static inline Operand SmiUntagOperand(Register object) {
const Register cp = { 8 }; // JavaScript context pointer const Register cp = { 8 }; // JavaScript context pointer
const Register roots = { 10 }; // Roots array pointer. const Register roots = { 10 }; // Roots array pointer.
enum InvokeJSFlags {
CALL_JS,
JUMP_JS
};
// Flags used for the AllocateInNewSpace functions. // Flags used for the AllocateInNewSpace functions.
enum AllocationFlags { enum AllocationFlags {
// No special flags. // No special flags.
@ -82,28 +90,15 @@ enum ObjectToDoubleFlags {
// MacroAssembler implements a collection of frequently used macros. // MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler { class MacroAssembler: public Assembler {
public: public:
// The isolate parameter can be NULL if the macro assembler should MacroAssembler(void* buffer, int size);
// not use isolate-dependent functionality. In this case, it's the
// responsibility of the caller to never invoke such function on the
// macro assembler.
MacroAssembler(Isolate* isolate, void* buffer, int size);
// Jump, Call, and Ret pseudo instructions implementing inter-working. // Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al); void Jump(Register target, Condition cond = al);
void Jump(Address target, RelocInfo::Mode rmode, Condition cond = al); void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al); void Call(Register target, Condition cond = al);
int CallSize(Address target, RelocInfo::Mode rmode, Condition cond = al); void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(Address target, RelocInfo::Mode rmode, Condition cond = al); void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId,
Condition cond = al);
void Call(Handle<Code> code,
RelocInfo::Mode rmode = RelocInfo::CODE_TARGET,
unsigned ast_id = kNoASTId,
Condition cond = al);
void Ret(Condition cond = al); void Ret(Condition cond = al);
// Emit code to discard a non-negative number of pointer-sized elements // Emit code to discard a non-negative number of pointer-sized elements
@ -140,12 +135,11 @@ class MacroAssembler: public Assembler {
Condition cond = al); Condition cond = al);
void Call(Label* target); void Call(Label* target);
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Handle<Object> value); void Move(Register dst, Handle<Object> value);
void Move(Register dst, Register src, Condition cond = al); // May do nothing if the registers are identical.
void Move(DoubleRegister dst, DoubleRegister src); void Move(Register dst, Register src);
// Jumps to the label at the index given by the Smi in "index".
void SmiJumpTable(Register index, Vector<Label*> targets);
// Load an object from the root table. // Load an object from the root table.
void LoadRoot(Register destination, void LoadRoot(Register destination,
Heap::RootListIndex index, Heap::RootListIndex index,
@ -190,9 +184,6 @@ class MacroAssembler: public Assembler {
Register address, Register address,
Register scratch); Register scratch);
// Push a handle.
void Push(Handle<Object> handle);
// Push two registers. Pushes leftmost register first (to highest address). // Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) { void Push(Register src1, Register src2, Condition cond = al) {
ASSERT(!src1.is(src2)); ASSERT(!src1.is(src2));
@ -312,10 +303,6 @@ class MacroAssembler: public Assembler {
const Register fpscr_flags, const Register fpscr_flags,
const Condition cond = al); const Condition cond = al);
void Vmov(const DwVfpRegister dst,
const double imm,
const Condition cond = al);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Activation frames // Activation frames
@ -351,38 +338,29 @@ class MacroAssembler: public Assembler {
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// JavaScript invokes // JavaScript invokes
// Setup call kind marking in ecx. The method takes ecx as an
// explicit first parameter to make the code more readable at the
// call sites.
void SetCallKind(Register dst, CallKind kind);
// Invoke the JavaScript function code by either calling or jumping. // Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code, void InvokeCode(Register code,
const ParameterCount& expected, const ParameterCount& expected,
const ParameterCount& actual, const ParameterCount& actual,
InvokeFlag flag, InvokeFlag flag,
const CallWrapper& call_wrapper, PostCallGenerator* post_call_generator = NULL);
CallKind call_kind);
void InvokeCode(Handle<Code> code, void InvokeCode(Handle<Code> code,
const ParameterCount& expected, const ParameterCount& expected,
const ParameterCount& actual, const ParameterCount& actual,
RelocInfo::Mode rmode, RelocInfo::Mode rmode,
InvokeFlag flag, InvokeFlag flag);
CallKind call_kind);
// Invoke the JavaScript function in the given register. Changes the // Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking. // current context to the context in the function before invoking.
void InvokeFunction(Register function, void InvokeFunction(Register function,
const ParameterCount& actual, const ParameterCount& actual,
InvokeFlag flag, InvokeFlag flag,
const CallWrapper& call_wrapper, PostCallGenerator* post_call_generator = NULL);
CallKind call_kind);
void InvokeFunction(JSFunction* function, void InvokeFunction(JSFunction* function,
const ParameterCount& actual, const ParameterCount& actual,
InvokeFlag flag, InvokeFlag flag);
CallKind call_kind);
void IsObjectJSObjectType(Register heap_object, void IsObjectJSObjectType(Register heap_object,
Register map, Register map,
@ -582,12 +560,6 @@ class MacroAssembler: public Assembler {
InstanceType type); InstanceType type);
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map,
Register scratch,
Label* fail);
// Check if the map of an object is equal to a specified map (either // Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to // given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known // label if not. Skip the smi check if not required (object is known
@ -596,29 +568,13 @@ class MacroAssembler: public Assembler {
Register scratch, Register scratch,
Handle<Map> map, Handle<Map> map,
Label* fail, Label* fail,
SmiCheckType smi_check_type); bool is_heap_object);
void CheckMap(Register obj, void CheckMap(Register obj,
Register scratch, Register scratch,
Heap::RootListIndex index, Heap::RootListIndex index,
Label* fail, Label* fail,
SmiCheckType smi_check_type); bool is_heap_object);
// Check if the map of an object is equal to a specified map and branch to a
// specified target if equal. Skip the smi check if not required (object is
// known to be a heap object)
void DispatchMap(Register obj,
Register scratch,
Handle<Map> map,
Handle<Code> success,
SmiCheckType smi_check_type);
// Compare the object in a register to a value from the root list.
// Uses the ip register as scratch.
void CompareRoot(Register obj, Heap::RootListIndex index);
// Load and check the instance type of an object for being a string. // Load and check the instance type of an object for being a string.
@ -698,27 +654,6 @@ class MacroAssembler: public Assembler {
CheckForInexactConversion check CheckForInexactConversion check
= kDontCheckForInexactConversion); = kDontCheckForInexactConversion);
// Helper for EmitECMATruncate.
// This will truncate a floating-point value outside of the singed 32bit
// integer range to a 32bit signed integer.
// Expects the double value loaded in input_high and input_low.
// Exits with the answer in 'result'.
// Note that this code does not work for values in the 32bit range!
void EmitOutOfInt32RangeTruncate(Register result,
Register input_high,
Register input_low,
Register scratch);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer and all other registers clobbered.
void EmitECMATruncate(Register result,
DwVfpRegister double_input,
SwVfpRegister single_scratch,
Register scratch,
Register scratch2,
Register scratch3);
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz // Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer // instruction. On pre-ARM5 hardware this routine gives the wrong answer
// for 0 (31 instead of 32). Source and scratch can be the same in which case // for 0 (31 instead of 32). Source and scratch can be the same in which case
@ -734,11 +669,6 @@ class MacroAssembler: public Assembler {
// Call a code stub. // Call a code stub.
void CallStub(CodeStub* stub, Condition cond = al); void CallStub(CodeStub* stub, Condition cond = al);
// Call a code stub and return the code object called. Try to generate
// the code if necessary. Do not perform a GC but instead return a retry
// after GC failure.
MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub, Condition cond = al);
// Call a code stub. // Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al); void TailCallStub(CodeStub* stub, Condition cond = al);
@ -749,7 +679,7 @@ class MacroAssembler: public Assembler {
Condition cond = al); Condition cond = al);
// Call a runtime routine. // Call a runtime routine.
void CallRuntime(const Runtime::Function* f, int num_arguments); void CallRuntime(Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id); void CallRuntimeSaveDoubles(Runtime::FunctionId id);
// Convenience function: Same as above, but takes the fid instead. // Convenience function: Same as above, but takes the fid instead.
@ -777,32 +707,15 @@ class MacroAssembler: public Assembler {
int num_arguments, int num_arguments,
int result_size); int result_size);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
// Before calling a C-function from generated code, align arguments on stack. // Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in // After aligning the frame, non-register arguments must be stored in
// sp[0], sp[4], etc., not pushed. The argument count assumes all arguments // sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
// are word sized. If double arguments are used, this function assumes that // are word sized.
// all double arguments are stored before core registers; otherwise the
// correct alignment of the double values is not guaranteed.
// Some compilers/platforms require the stack to be aligned when calling // Some compilers/platforms require the stack to be aligned when calling
// C++ code. // C++ code.
// Needs a scratch register to do some arithmetic. This register will be // Needs a scratch register to do some arithmetic. This register will be
// trashed. // trashed.
void PrepareCallCFunction(int num_reg_arguments, void PrepareCallCFunction(int num_arguments, Register scratch);
int num_double_registers,
Register scratch);
void PrepareCallCFunction(int num_reg_arguments,
Register scratch);
// There are two ways of passing double arguments on ARM, depending on
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
void SetCallCDoubleArguments(DoubleRegister dreg);
void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
// Calls a C function and cleans up the space for arguments allocated // Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a // by PrepareCallCFunction. The called function is not allowed to trigger a
@ -810,13 +723,7 @@ class MacroAssembler: public Assembler {
// return address (unless this is somehow accounted for by the called // return address (unless this is somehow accounted for by the called
// function). // function).
void CallCFunction(ExternalReference function, int num_arguments); void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, Register scratch, int num_arguments); void CallCFunction(Register function, int num_arguments);
void CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments);
void CallCFunction(Register function, Register scratch,
int num_reg_arguments,
int num_double_arguments);
void GetCFunctionDoubleResult(const DoubleRegister dst); void GetCFunctionDoubleResult(const DoubleRegister dst);
@ -835,8 +742,8 @@ class MacroAssembler: public Assembler {
// Invoke specified builtin JavaScript function. Adds an entry to // Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve. // the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id, void InvokeBuiltin(Builtins::JavaScript id,
InvokeFlag flag, InvokeJSFlags flags,
const CallWrapper& call_wrapper = NullCallWrapper()); PostCallGenerator* post_call_generator = NULL);
// Store the code object for the given builtin in the target register and // Store the code object for the given builtin in the target register and
// setup the function in r1. // setup the function in r1.
@ -845,10 +752,7 @@ class MacroAssembler: public Assembler {
// Store the function for the given builtin in the target register. // Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id); void GetBuiltinFunction(Register target, Builtins::JavaScript id);
Handle<Object> CodeObject() { Handle<Object> CodeObject() { return code_object_; }
ASSERT(!code_object_.is_null());
return code_object_;
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -883,15 +787,6 @@ class MacroAssembler: public Assembler {
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; } void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; } bool allow_stub_calls() { return allow_stub_calls_; }
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
#if USE_EABI_HARDFLOAT
return true;
#else
return false;
#endif
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Number utilities // Number utilities
@ -902,16 +797,6 @@ class MacroAssembler: public Assembler {
void JumpIfNotPowerOfTwoOrZero(Register reg, void JumpIfNotPowerOfTwoOrZero(Register reg,
Register scratch, Register scratch,
Label* not_power_of_two_or_zero); Label* not_power_of_two_or_zero);
// Check whether the value of reg is a power of two and not zero.
// Control falls through if it is, with scratch containing the mask
// value (reg - 1).
// Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
// zero or negative, or jumps to the 'not_power_of_two' label if the value is
// strictly positive but not a power of two.
void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
Register scratch,
Label* zero_and_neg,
Label* not_power_of_two);
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Smi utilities // Smi utilities
@ -1019,23 +904,9 @@ class MacroAssembler: public Assembler {
Register result); Register result);
void ClampUint8(Register output_reg, Register input_reg);
void ClampDoubleToUint8(Register result_reg,
DoubleRegister input_reg,
DoubleRegister temp_double_reg);
void LoadInstanceDescriptors(Register map, Register descriptors);
private: private:
void CallCFunctionHelper(Register function,
ExternalReference function_reference,
Register scratch,
int num_reg_arguments,
int num_double_arguments);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al); void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
// Helper functions for generating invokes. // Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected, void InvokePrologue(const ParameterCount& expected,
@ -1044,8 +915,7 @@ class MacroAssembler: public Assembler {
Register code_reg, Register code_reg,
Label* done, Label* done,
InvokeFlag flag, InvokeFlag flag,
const CallWrapper& call_wrapper, PostCallGenerator* post_call_generator = NULL);
CallKind call_kind);
// Activation support. // Activation support.
void EnterFrame(StackFrame::Type type); void EnterFrame(StackFrame::Type type);
@ -1106,6 +976,17 @@ class CodePatcher {
#endif // ENABLE_DEBUGGER_SUPPORT #endif // ENABLE_DEBUGGER_SUPPORT
// Helper class for generating code or data associated with the code
// right after a call instruction. As an example this can be used to
// generate safepoint data after calls for crankshaft.
class PostCallGenerator {
public:
PostCallGenerator() { }
virtual ~PostCallGenerator() { }
virtual void Generate() = 0;
};
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Static helper functions. // Static helper functions.

45
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -60,7 +60,6 @@ namespace internal {
* Each call to a public method should retain this convention. * Each call to a public method should retain this convention.
* *
* The stack will have the following structure: * The stack will have the following structure:
* - fp[52] Isolate* isolate (Address of the current isolate)
* - fp[48] direct_call (if 1, direct call from JavaScript code, * - fp[48] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system). * if 0, call through the runtime system).
* - fp[44] stack_area_base (High end of the memory area to use as * - fp[44] stack_area_base (High end of the memory area to use as
@ -116,7 +115,7 @@ namespace internal {
RegExpMacroAssemblerARM::RegExpMacroAssemblerARM( RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
Mode mode, Mode mode,
int registers_to_save) int registers_to_save)
: masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)), : masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
mode_(mode), mode_(mode),
num_registers_(registers_to_save), num_registers_(registers_to_save),
num_saved_registers_(registers_to_save), num_saved_registers_(registers_to_save),
@ -347,7 +346,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ sub(current_input_offset(), r2, end_of_input_address()); __ sub(current_input_offset(), r2, end_of_input_address());
} else { } else {
ASSERT(mode_ == UC16); ASSERT(mode_ == UC16);
int argument_count = 4; int argument_count = 3;
__ PrepareCallCFunction(argument_count, r2); __ PrepareCallCFunction(argument_count, r2);
// r0 - offset of start of capture // r0 - offset of start of capture
@ -358,7 +357,6 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// r0: Address byte_offset1 - Address captured substring's start. // r0: Address byte_offset1 - Address captured substring's start.
// r1: Address byte_offset2 - Address of current character position. // r1: Address byte_offset2 - Address of current character position.
// r2: size_t byte_length - length of capture in bytes(!) // r2: size_t byte_length - length of capture in bytes(!)
// r3: Isolate* isolate
// Address of start of capture. // Address of start of capture.
__ add(r0, r0, Operand(end_of_input_address())); __ add(r0, r0, Operand(end_of_input_address()));
@ -368,11 +366,9 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ mov(r4, Operand(r1)); __ mov(r4, Operand(r1));
// Address of current input position. // Address of current input position.
__ add(r1, current_input_offset(), Operand(end_of_input_address())); __ add(r1, current_input_offset(), Operand(end_of_input_address()));
// Isolate.
__ mov(r3, Operand(ExternalReference::isolate_address()));
ExternalReference function = ExternalReference function =
ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate()); ExternalReference::re_case_insensitive_compare_uc16();
__ CallCFunction(function, argument_count); __ CallCFunction(function, argument_count);
// Check if function returned non-zero for success or zero for failure. // Check if function returned non-zero for success or zero for failure.
@ -605,7 +601,7 @@ void RegExpMacroAssemblerARM::Fail() {
} }
Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) { Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Finalize code - write the entry point code now we know how many // Finalize code - write the entry point code now we know how many
// registers we need. // registers we need.
@ -630,7 +626,7 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label stack_ok; Label stack_ok;
ExternalReference stack_limit = ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(masm_->isolate()); ExternalReference::address_of_stack_limit();
__ mov(r0, Operand(stack_limit)); __ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0)); __ ldr(r0, MemOperand(r0));
__ sub(r0, sp, r0, SetCC); __ sub(r0, sp, r0, SetCC);
@ -781,13 +777,12 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label grow_failed; Label grow_failed;
// Call GrowStack(backtrack_stackpointer(), &stack_base) // Call GrowStack(backtrack_stackpointer(), &stack_base)
static const int num_arguments = 3; static const int num_arguments = 2;
__ PrepareCallCFunction(num_arguments, r0); __ PrepareCallCFunction(num_arguments, r0);
__ mov(r0, backtrack_stackpointer()); __ mov(r0, backtrack_stackpointer());
__ add(r1, frame_pointer(), Operand(kStackHighEnd)); __ add(r1, frame_pointer(), Operand(kStackHighEnd));
__ mov(r2, Operand(ExternalReference::isolate_address()));
ExternalReference grow_stack = ExternalReference grow_stack =
ExternalReference::re_grow_stack(masm_->isolate()); ExternalReference::re_grow_stack();
__ CallCFunction(grow_stack, num_arguments); __ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and // If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception. // must exit with a stack-overflow exception.
@ -809,11 +804,11 @@ Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
CodeDesc code_desc; CodeDesc code_desc;
masm_->GetCode(&code_desc); masm_->GetCode(&code_desc);
Handle<Code> code = FACTORY->NewCode(code_desc, Handle<Code> code = Factory::NewCode(code_desc,
Code::ComputeFlags(Code::REGEXP), Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject()); masm_->CodeObject());
PROFILE(Isolate::Current(), RegExpCodeCreateEvent(*code, *source)); PROFILE(RegExpCodeCreateEvent(*code, *source));
return Handle<HeapObject>::cast(code); return Handle<Object>::cast(code);
} }
@ -899,12 +894,13 @@ void RegExpMacroAssemblerARM::PushBacktrack(Label* label) {
constant_offset - offset_of_pc_register_read; constant_offset - offset_of_pc_register_read;
ASSERT(pc_offset_of_constant < 0); ASSERT(pc_offset_of_constant < 0);
if (is_valid_memory_offset(pc_offset_of_constant)) { if (is_valid_memory_offset(pc_offset_of_constant)) {
Assembler::BlockConstPoolScope block_const_pool(masm_); masm_->BlockConstPoolBefore(masm_->pc_offset() + Assembler::kInstrSize);
__ ldr(r0, MemOperand(pc, pc_offset_of_constant)); __ ldr(r0, MemOperand(pc, pc_offset_of_constant));
} else { } else {
// Not a 12-bit offset, so it needs to be loaded from the constant // Not a 12-bit offset, so it needs to be loaded from the constant
// pool. // pool.
Assembler::BlockConstPoolScope block_const_pool(masm_); masm_->BlockConstPoolBefore(
masm_->pc_offset() + 2 * Assembler::kInstrSize);
__ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize)); __ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize));
__ ldr(r0, MemOperand(pc, r0)); __ ldr(r0, MemOperand(pc, r0));
} }
@ -1002,7 +998,7 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
__ mov(r1, Operand(masm_->CodeObject())); __ mov(r1, Operand(masm_->CodeObject()));
// r0 becomes return address pointer. // r0 becomes return address pointer.
ExternalReference stack_guard_check = ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state(masm_->isolate()); ExternalReference::re_check_stack_guard_state();
CallCFunctionUsingStub(stack_guard_check, num_arguments); CallCFunctionUsingStub(stack_guard_check, num_arguments);
} }
@ -1017,10 +1013,8 @@ static T& frame_entry(Address re_frame, int frame_offset) {
int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address, int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Code* re_code, Code* re_code,
Address re_frame) { Address re_frame) {
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate); if (StackGuard::IsStackOverflow()) {
ASSERT(isolate == Isolate::Current()); Top::StackOverflow();
if (isolate->stack_guard()->IsStackOverflow()) {
isolate->StackOverflow();
return EXCEPTION; return EXCEPTION;
} }
@ -1164,7 +1158,7 @@ void RegExpMacroAssemblerARM::Pop(Register target) {
void RegExpMacroAssemblerARM::CheckPreemption() { void RegExpMacroAssemblerARM::CheckPreemption() {
// Check for preemption. // Check for preemption.
ExternalReference stack_limit = ExternalReference stack_limit =
ExternalReference::address_of_stack_limit(masm_->isolate()); ExternalReference::address_of_stack_limit();
__ mov(r0, Operand(stack_limit)); __ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0)); __ ldr(r0, MemOperand(r0));
__ cmp(sp, r0); __ cmp(sp, r0);
@ -1174,7 +1168,7 @@ void RegExpMacroAssemblerARM::CheckPreemption() {
void RegExpMacroAssemblerARM::CheckStackLimit() { void RegExpMacroAssemblerARM::CheckStackLimit() {
ExternalReference stack_limit = ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit(masm_->isolate()); ExternalReference::address_of_regexp_stack_limit();
__ mov(r0, Operand(stack_limit)); __ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0)); __ ldr(r0, MemOperand(r0));
__ cmp(backtrack_stackpointer(), Operand(r0)); __ cmp(backtrack_stackpointer(), Operand(r0));
@ -1184,7 +1178,8 @@ void RegExpMacroAssemblerARM::CheckStackLimit() {
void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() { void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() {
__ CheckConstPool(false, false); __ CheckConstPool(false, false);
Assembler::BlockConstPoolScope block_const_pool(masm_); __ BlockConstPoolBefore(
masm_->pc_offset() + kBacktrackConstantPoolSize * Assembler::kInstrSize);
backtrack_constant_pool_offset_ = masm_->pc_offset(); backtrack_constant_pool_offset_ = masm_->pc_offset();
for (int i = 0; i < kBacktrackConstantPoolSize; i++) { for (int i = 0; i < kBacktrackConstantPoolSize; i++) {
__ emit(0); __ emit(0);

3
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -82,7 +82,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual bool CheckSpecialCharacterClass(uc16 type, virtual bool CheckSpecialCharacterClass(uc16 type,
Label* on_no_match); Label* on_no_match);
virtual void Fail(); virtual void Fail();
virtual Handle<HeapObject> GetCode(Handle<String> source); virtual Handle<Object> GetCode(Handle<String> source);
virtual void GoTo(Label* label); virtual void GoTo(Label* label);
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge); virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt); virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
@ -127,7 +127,6 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize; static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
static const int kStackHighEnd = kRegisterOutput + kPointerSize; static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize; static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer. // Below the frame pointer.
// Register parameters stored by setup code. // Register parameters stored by setup code.

86
deps/v8/src/extensions/experimental/collator.h → deps/v8/src/arm/register-allocator-arm-inl.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -25,44 +25,76 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H #ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
#define V8_EXTENSIONS_EXPERIMENTAL_COLLATOR_H_ #define V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_
#include "include/v8.h" #include "v8.h"
#include "unicode/uversion.h" namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
namespace U_ICU_NAMESPACE { bool RegisterAllocator::IsReserved(Register reg) {
class Collator; return reg.is(cp) || reg.is(fp) || reg.is(sp) || reg.is(pc);
class UnicodeString;
} }
namespace v8 {
namespace internal {
class Collator {
public:
static v8::Handle<v8::Value> JSCollator(const v8::Arguments& args);
// Helper methods for various bindings. // The register allocator uses small integers to represent the
// non-reserved assembler registers. The mapping is:
//
// r0 <-> 0
// r1 <-> 1
// r2 <-> 2
// r3 <-> 3
// r4 <-> 4
// r5 <-> 5
// r6 <-> 6
// r7 <-> 7
// r9 <-> 8
// r10 <-> 9
// ip <-> 10
// lr <-> 11
int RegisterAllocator::ToNumber(Register reg) {
ASSERT(reg.is_valid() && !IsReserved(reg));
const int kNumbers[] = {
0, // r0
1, // r1
2, // r2
3, // r3
4, // r4
5, // r5
6, // r6
7, // r7
-1, // cp
8, // r9
9, // r10
-1, // fp
10, // ip
-1, // sp
11, // lr
-1 // pc
};
return kNumbers[reg.code()];
}
// Unpacks collator object from corresponding JavaScript object.
static icu::Collator* UnpackCollator(v8::Handle<v8::Object> obj);
// Release memory we allocated for the Collator once the JS object that Register RegisterAllocator::ToRegister(int num) {
// holds the pointer gets garbage collected. ASSERT(num >= 0 && num < kNumRegisters);
static void DeleteCollator(v8::Persistent<v8::Value> object, void* param); const Register kRegisters[] =
{ r0, r1, r2, r3, r4, r5, r6, r7, r9, r10, ip, lr };
return kRegisters[num];
}
// Compare two strings and returns -1, 0 and 1 depending on
// whether string1 is smaller than, equal to or larger than string2.
static v8::Handle<v8::Value> CollatorCompare(const v8::Arguments& args);
private: void RegisterAllocator::Initialize() {
Collator() {} Reset();
}
static v8::Persistent<v8::FunctionTemplate> collator_template_;
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_EXTENSIONS_EXPERIMENTAL_COLLATOR #endif // V8_ARM_REGISTER_ALLOCATOR_ARM_INL_H_

63
deps/v8/src/arm/register-allocator-arm.cc

@ -0,0 +1,63 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen-inl.h"
#include "register-allocator-inl.h"
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// Result implementation.
void Result::ToRegister() {
UNIMPLEMENTED();
}
void Result::ToRegister(Register target) {
UNIMPLEMENTED();
}
// -------------------------------------------------------------------------
// RegisterAllocator implementation.
Result RegisterAllocator::AllocateByteRegisterWithoutSpilling() {
// No byte registers on ARM.
UNREACHABLE();
return Result();
}
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

44
deps/v8/src/arm/register-allocator-arm.h

@ -0,0 +1,44 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_REGISTER_ALLOCATOR_ARM_H_
#define V8_ARM_REGISTER_ALLOCATOR_ARM_H_
namespace v8 {
namespace internal {
class RegisterAllocatorConstants : public AllStatic {
public:
// No registers are currently managed by the register allocator on ARM.
static const int kNumRegisters = 0;
static const int kInvalidRegister = -1;
};
} } // namespace v8::internal
#endif // V8_ARM_REGISTER_ALLOCATOR_ARM_H_

515
deps/v8/src/arm/simulator-arm.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -49,12 +49,12 @@ namespace internal {
// Windows C Run-Time Library does not provide vsscanf. // Windows C Run-Time Library does not provide vsscanf.
#define SScanF sscanf // NOLINT #define SScanF sscanf // NOLINT
// The ArmDebugger class is used by the simulator while debugging simulated ARM // The Debugger class is used by the simulator while debugging simulated ARM
// code. // code.
class ArmDebugger { class Debugger {
public: public:
explicit ArmDebugger(Simulator* sim); explicit Debugger(Simulator* sim);
~ArmDebugger(); ~Debugger();
void Stop(Instruction* instr); void Stop(Instruction* instr);
void Debug(); void Debug();
@ -67,7 +67,6 @@ class ArmDebugger {
Simulator* sim_; Simulator* sim_;
int32_t GetRegisterValue(int regnum); int32_t GetRegisterValue(int regnum);
double GetRegisterPairDoubleValue(int regnum);
double GetVFPDoubleRegisterValue(int regnum); double GetVFPDoubleRegisterValue(int regnum);
bool GetValue(const char* desc, int32_t* value); bool GetValue(const char* desc, int32_t* value);
bool GetVFPSingleValue(const char* desc, float* value); bool GetVFPSingleValue(const char* desc, float* value);
@ -84,12 +83,12 @@ class ArmDebugger {
}; };
ArmDebugger::ArmDebugger(Simulator* sim) { Debugger::Debugger(Simulator* sim) {
sim_ = sim; sim_ = sim;
} }
ArmDebugger::~ArmDebugger() { Debugger::~Debugger() {
} }
@ -106,7 +105,7 @@ static void InitializeCoverage() {
} }
void ArmDebugger::Stop(Instruction* instr) { void Debugger::Stop(Instruction* instr) {
// Get the stop code. // Get the stop code.
uint32_t code = instr->SvcValue() & kStopCodeMask; uint32_t code = instr->SvcValue() & kStopCodeMask;
// Retrieve the encoded address, which comes just after this stop. // Retrieve the encoded address, which comes just after this stop.
@ -138,7 +137,7 @@ static void InitializeCoverage() {
} }
void ArmDebugger::Stop(Instruction* instr) { void Debugger::Stop(Instruction* instr) {
// Get the stop code. // Get the stop code.
uint32_t code = instr->SvcValue() & kStopCodeMask; uint32_t code = instr->SvcValue() & kStopCodeMask;
// Retrieve the encoded address, which comes just after this stop. // Retrieve the encoded address, which comes just after this stop.
@ -160,7 +159,7 @@ void ArmDebugger::Stop(Instruction* instr) {
#endif #endif
int32_t ArmDebugger::GetRegisterValue(int regnum) { int32_t Debugger::GetRegisterValue(int regnum) {
if (regnum == kPCRegister) { if (regnum == kPCRegister) {
return sim_->get_pc(); return sim_->get_pc();
} else { } else {
@ -169,17 +168,12 @@ int32_t ArmDebugger::GetRegisterValue(int regnum) {
} }
double ArmDebugger::GetRegisterPairDoubleValue(int regnum) { double Debugger::GetVFPDoubleRegisterValue(int regnum) {
return sim_->get_double_from_register_pair(regnum);
}
double ArmDebugger::GetVFPDoubleRegisterValue(int regnum) {
return sim_->get_double_from_d_register(regnum); return sim_->get_double_from_d_register(regnum);
} }
bool ArmDebugger::GetValue(const char* desc, int32_t* value) { bool Debugger::GetValue(const char* desc, int32_t* value) {
int regnum = Registers::Number(desc); int regnum = Registers::Number(desc);
if (regnum != kNoRegister) { if (regnum != kNoRegister) {
*value = GetRegisterValue(regnum); *value = GetRegisterValue(regnum);
@ -195,7 +189,7 @@ bool ArmDebugger::GetValue(const char* desc, int32_t* value) {
} }
bool ArmDebugger::GetVFPSingleValue(const char* desc, float* value) { bool Debugger::GetVFPSingleValue(const char* desc, float* value) {
bool is_double; bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double); int regnum = VFPRegisters::Number(desc, &is_double);
if (regnum != kNoRegister && !is_double) { if (regnum != kNoRegister && !is_double) {
@ -206,7 +200,7 @@ bool ArmDebugger::GetVFPSingleValue(const char* desc, float* value) {
} }
bool ArmDebugger::GetVFPDoubleValue(const char* desc, double* value) { bool Debugger::GetVFPDoubleValue(const char* desc, double* value) {
bool is_double; bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double); int regnum = VFPRegisters::Number(desc, &is_double);
if (regnum != kNoRegister && is_double) { if (regnum != kNoRegister && is_double) {
@ -217,7 +211,7 @@ bool ArmDebugger::GetVFPDoubleValue(const char* desc, double* value) {
} }
bool ArmDebugger::SetBreakpoint(Instruction* breakpc) { bool Debugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects. // Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != NULL) { if (sim_->break_pc_ != NULL) {
return false; return false;
@ -232,7 +226,7 @@ bool ArmDebugger::SetBreakpoint(Instruction* breakpc) {
} }
bool ArmDebugger::DeleteBreakpoint(Instruction* breakpc) { bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
if (sim_->break_pc_ != NULL) { if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_); sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
} }
@ -243,21 +237,21 @@ bool ArmDebugger::DeleteBreakpoint(Instruction* breakpc) {
} }
void ArmDebugger::UndoBreakpoints() { void Debugger::UndoBreakpoints() {
if (sim_->break_pc_ != NULL) { if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_); sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
} }
} }
void ArmDebugger::RedoBreakpoints() { void Debugger::RedoBreakpoints() {
if (sim_->break_pc_ != NULL) { if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr); sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
} }
} }
void ArmDebugger::Debug() { void Debugger::Debug() {
intptr_t last_pc = -1; intptr_t last_pc = -1;
bool done = false; bool done = false;
@ -311,45 +305,27 @@ void ArmDebugger::Debug() {
// Leave the debugger shell. // Leave the debugger shell.
done = true; done = true;
} else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) { } else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
if (argc == 2 || (argc == 3 && strcmp(arg2, "fp") == 0)) { if (argc == 2) {
int32_t value; int32_t value;
float svalue; float svalue;
double dvalue; double dvalue;
if (strcmp(arg1, "all") == 0) { if (strcmp(arg1, "all") == 0) {
for (int i = 0; i < kNumRegisters; i++) { for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i); value = GetRegisterValue(i);
PrintF("%3s: 0x%08x %10d", Registers::Name(i), value, value); PrintF("%3s: 0x%08x %10d\n", Registers::Name(i), value, value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) &&
i < 8 &&
(i % 2) == 0) {
dvalue = GetRegisterPairDoubleValue(i);
PrintF(" (%f)\n", dvalue);
} else {
PrintF("\n");
}
} }
for (int i = 0; i < kNumVFPDoubleRegisters; i++) { for (int i = 0; i < kNumVFPDoubleRegisters; i++) {
dvalue = GetVFPDoubleRegisterValue(i); dvalue = GetVFPDoubleRegisterValue(i);
uint64_t as_words = BitCast<uint64_t>(dvalue); PrintF("%3s: %f\n",
PrintF("%3s: %f 0x%08x %08x\n", VFPRegisters::Name(i, true), dvalue);
VFPRegisters::Name(i, true),
dvalue,
static_cast<uint32_t>(as_words >> 32),
static_cast<uint32_t>(as_words & 0xffffffff));
} }
} else { } else {
if (GetValue(arg1, &value)) { if (GetValue(arg1, &value)) {
PrintF("%s: 0x%08x %d \n", arg1, value, value); PrintF("%s: 0x%08x %d \n", arg1, value, value);
} else if (GetVFPSingleValue(arg1, &svalue)) { } else if (GetVFPSingleValue(arg1, &svalue)) {
uint32_t as_word = BitCast<uint32_t>(svalue); PrintF("%s: %f \n", arg1, svalue);
PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word);
} else if (GetVFPDoubleValue(arg1, &dvalue)) { } else if (GetVFPDoubleValue(arg1, &dvalue)) {
uint64_t as_words = BitCast<uint64_t>(dvalue); PrintF("%s: %f \n", arg1, dvalue);
PrintF("%s: %f 0x%08x %08x\n",
arg1,
dvalue,
static_cast<uint32_t>(as_words >> 32),
static_cast<uint32_t>(as_words & 0xffffffff));
} else { } else {
PrintF("%s unrecognized\n", arg1); PrintF("%s unrecognized\n", arg1);
} }
@ -404,24 +380,11 @@ void ArmDebugger::Debug() {
end = cur + words; end = cur + words;
while (cur < end) { while (cur < end) {
PrintF(" 0x%08x: 0x%08x %10d", PrintF(" 0x%08x: 0x%08x %10d\n",
reinterpret_cast<intptr_t>(cur), *cur, *cur); reinterpret_cast<intptr_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
Heap* current_heap = v8::internal::Isolate::Current()->heap();
if (current_heap->Contains(obj) || ((value & 1) == 0)) {
PrintF(" (");
if ((value & 1) == 0) {
PrintF("smi %d", value / 2);
} else {
obj->ShortPrint();
}
PrintF(")");
}
PrintF("\n");
cur++; cur++;
} }
} else if (strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) { } else if (strcmp(cmd, "disasm") == 0) {
disasm::NameConverter converter; disasm::NameConverter converter;
disasm::Disassembler dasm(converter); disasm::Disassembler dasm(converter);
// use a reasonably large buffer // use a reasonably large buffer
@ -435,24 +398,12 @@ void ArmDebugger::Debug() {
cur = reinterpret_cast<byte*>(sim_->get_pc()); cur = reinterpret_cast<byte*>(sim_->get_pc());
end = cur + (10 * Instruction::kInstrSize); end = cur + (10 * Instruction::kInstrSize);
} else if (argc == 2) { } else if (argc == 2) {
int regnum = Registers::Number(arg1);
if (regnum != kNoRegister || strncmp(arg1, "0x", 2) == 0) {
// The argument is an address or a register name.
int32_t value;
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(value);
// Disassemble 10 instructions at <arg1>.
end = cur + (10 * Instruction::kInstrSize);
}
} else {
// The argument is the number of instructions.
int32_t value; int32_t value;
if (GetValue(arg1, &value)) { if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(sim_->get_pc()); cur = reinterpret_cast<byte*>(sim_->get_pc());
// Disassemble <arg1> instructions. // Disassemble <arg1> instructions.
end = cur + (value * Instruction::kInstrSize); end = cur + (value * Instruction::kInstrSize);
} }
}
} else { } else {
int32_t value1; int32_t value1;
int32_t value2; int32_t value2;
@ -564,7 +515,6 @@ void ArmDebugger::Debug() {
PrintF("print <register>\n"); PrintF("print <register>\n");
PrintF(" print register content (alias 'p')\n"); PrintF(" print register content (alias 'p')\n");
PrintF(" use register name 'all' to print all registers\n"); PrintF(" use register name 'all' to print all registers\n");
PrintF(" add argument 'fp' to print register pair double values\n");
PrintF("printobject <register>\n"); PrintF("printobject <register>\n");
PrintF(" print an object from a register (alias 'po')\n"); PrintF(" print an object from a register (alias 'po')\n");
PrintF("flags\n"); PrintF("flags\n");
@ -574,10 +524,8 @@ void ArmDebugger::Debug() {
PrintF("mem <address> [<words>]\n"); PrintF("mem <address> [<words>]\n");
PrintF(" dump memory content, default dump 10 words)\n"); PrintF(" dump memory content, default dump 10 words)\n");
PrintF("disasm [<instructions>]\n"); PrintF("disasm [<instructions>]\n");
PrintF("disasm [<address/register>]\n"); PrintF("disasm [[<address>] <instructions>]\n");
PrintF("disasm [[<address/register>] <instructions>]\n"); PrintF(" disassemble code, default is 10 instructions from pc\n");
PrintF(" disassemble code, default is 10 instructions\n");
PrintF(" from pc (alias 'di')\n");
PrintF("gdb\n"); PrintF("gdb\n");
PrintF(" enter gdb\n"); PrintF(" enter gdb\n");
PrintF("break <address>\n"); PrintF("break <address>\n");
@ -591,7 +539,7 @@ void ArmDebugger::Debug() {
PrintF(" Stops are debug instructions inserted by\n"); PrintF(" Stops are debug instructions inserted by\n");
PrintF(" the Assembler::stop() function.\n"); PrintF(" the Assembler::stop() function.\n");
PrintF(" When hitting a stop, the Simulator will\n"); PrintF(" When hitting a stop, the Simulator will\n");
PrintF(" stop and and give control to the ArmDebugger.\n"); PrintF(" stop and and give control to the Debugger.\n");
PrintF(" The first %d stop codes are watched:\n", PrintF(" The first %d stop codes are watched:\n",
Simulator::kNumOfWatchedStops); Simulator::kNumOfWatchedStops);
PrintF(" - They can be enabled / disabled: the Simulator\n"); PrintF(" - They can be enabled / disabled: the Simulator\n");
@ -645,9 +593,7 @@ static bool AllOnOnePage(uintptr_t start, int size) {
} }
void Simulator::FlushICache(v8::internal::HashMap* i_cache, void Simulator::FlushICache(void* start_addr, size_t size) {
void* start_addr,
size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr); intptr_t start = reinterpret_cast<intptr_t>(start_addr);
int intra_line = (start & CachePage::kLineMask); int intra_line = (start & CachePage::kLineMask);
start -= intra_line; start -= intra_line;
@ -656,20 +602,20 @@ void Simulator::FlushICache(v8::internal::HashMap* i_cache,
int offset = (start & CachePage::kPageMask); int offset = (start & CachePage::kPageMask);
while (!AllOnOnePage(start, size - 1)) { while (!AllOnOnePage(start, size - 1)) {
int bytes_to_flush = CachePage::kPageSize - offset; int bytes_to_flush = CachePage::kPageSize - offset;
FlushOnePage(i_cache, start, bytes_to_flush); FlushOnePage(start, bytes_to_flush);
start += bytes_to_flush; start += bytes_to_flush;
size -= bytes_to_flush; size -= bytes_to_flush;
ASSERT_EQ(0, start & CachePage::kPageMask); ASSERT_EQ(0, start & CachePage::kPageMask);
offset = 0; offset = 0;
} }
if (size != 0) { if (size != 0) {
FlushOnePage(i_cache, start, size); FlushOnePage(start, size);
} }
} }
CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) { CachePage* Simulator::GetCachePage(void* page) {
v8::internal::HashMap::Entry* entry = i_cache->Lookup(page, v8::internal::HashMap::Entry* entry = i_cache_->Lookup(page,
ICacheHash(page), ICacheHash(page),
true); true);
if (entry->value == NULL) { if (entry->value == NULL) {
@ -681,28 +627,25 @@ CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
// Flush from start up to and not including start + size. // Flush from start up to and not including start + size.
void Simulator::FlushOnePage(v8::internal::HashMap* i_cache, void Simulator::FlushOnePage(intptr_t start, int size) {
intptr_t start,
int size) {
ASSERT(size <= CachePage::kPageSize); ASSERT(size <= CachePage::kPageSize);
ASSERT(AllOnOnePage(start, size - 1)); ASSERT(AllOnOnePage(start, size - 1));
ASSERT((start & CachePage::kLineMask) == 0); ASSERT((start & CachePage::kLineMask) == 0);
ASSERT((size & CachePage::kLineMask) == 0); ASSERT((size & CachePage::kLineMask) == 0);
void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask)); void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
int offset = (start & CachePage::kPageMask); int offset = (start & CachePage::kPageMask);
CachePage* cache_page = GetCachePage(i_cache, page); CachePage* cache_page = GetCachePage(page);
char* valid_bytemap = cache_page->ValidityByte(offset); char* valid_bytemap = cache_page->ValidityByte(offset);
memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift); memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
} }
void Simulator::CheckICache(v8::internal::HashMap* i_cache, void Simulator::CheckICache(Instruction* instr) {
Instruction* instr) {
intptr_t address = reinterpret_cast<intptr_t>(instr); intptr_t address = reinterpret_cast<intptr_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask)); void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask)); void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
int offset = (address & CachePage::kPageMask); int offset = (address & CachePage::kPageMask);
CachePage* cache_page = GetCachePage(i_cache, page); CachePage* cache_page = GetCachePage(page);
char* cache_valid_byte = cache_page->ValidityByte(offset); char* cache_valid_byte = cache_page->ValidityByte(offset);
bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID); bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask); char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
@ -719,21 +662,29 @@ void Simulator::CheckICache(v8::internal::HashMap* i_cache,
} }
void Simulator::Initialize(Isolate* isolate) { // Create one simulator per thread and keep it in thread local storage.
if (isolate->simulator_initialized()) return; static v8::internal::Thread::LocalStorageKey simulator_key;
isolate->set_simulator_initialized(true);
::v8::internal::ExternalReference::set_redirector(isolate,
&RedirectExternalReference); bool Simulator::initialized_ = false;
void Simulator::Initialize() {
if (initialized_) return;
simulator_key = v8::internal::Thread::CreateThreadLocalKey();
initialized_ = true;
::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
} }
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) { v8::internal::HashMap* Simulator::i_cache_ = NULL;
i_cache_ = isolate_->simulator_i_cache();
Simulator::Simulator() {
if (i_cache_ == NULL) { if (i_cache_ == NULL) {
i_cache_ = new v8::internal::HashMap(&ICacheMatch); i_cache_ = new v8::internal::HashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
} }
Initialize(isolate); Initialize();
// Setup simulator support first. Some of this information is needed to // Setup simulator support first. Some of this information is needed to
// setup the architecture state. // setup the architecture state.
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
@ -797,14 +748,11 @@ class Redirection {
: external_function_(external_function), : external_function_(external_function),
swi_instruction_(al | (0xf*B24) | kCallRtRedirected), swi_instruction_(al | (0xf*B24) | kCallRtRedirected),
type_(type), type_(type),
next_(NULL) { next_(list_) {
Isolate* isolate = Isolate::Current(); Simulator::current()->
next_ = isolate->simulator_redirection(); FlushICache(reinterpret_cast<void*>(&swi_instruction_),
Simulator::current(isolate)->
FlushICache(isolate->simulator_i_cache(),
reinterpret_cast<void*>(&swi_instruction_),
Instruction::kInstrSize); Instruction::kInstrSize);
isolate->set_simulator_redirection(this); list_ = this;
} }
void* address_of_swi_instruction() { void* address_of_swi_instruction() {
@ -816,9 +764,8 @@ class Redirection {
static Redirection* Get(void* external_function, static Redirection* Get(void* external_function,
ExternalReference::Type type) { ExternalReference::Type type) {
Isolate* isolate = Isolate::Current(); Redirection* current;
Redirection* current = isolate->simulator_redirection(); for (current = list_; current != NULL; current = current->next_) {
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) return current; if (current->external_function_ == external_function) return current;
} }
return new Redirection(external_function, type); return new Redirection(external_function, type);
@ -836,9 +783,13 @@ class Redirection {
uint32_t swi_instruction_; uint32_t swi_instruction_;
ExternalReference::Type type_; ExternalReference::Type type_;
Redirection* next_; Redirection* next_;
static Redirection* list_;
}; };
Redirection* Redirection::list_ = NULL;
void* Simulator::RedirectExternalReference(void* external_function, void* Simulator::RedirectExternalReference(void* external_function,
ExternalReference::Type type) { ExternalReference::Type type) {
Redirection* redirection = Redirection::Get(external_function, type); Redirection* redirection = Redirection::Get(external_function, type);
@ -847,16 +798,14 @@ void* Simulator::RedirectExternalReference(void* external_function,
// Get the active Simulator for the current thread. // Get the active Simulator for the current thread.
Simulator* Simulator::current(Isolate* isolate) { Simulator* Simulator::current() {
v8::internal::Isolate::PerIsolateThreadData* isolate_data = Initialize();
isolate->FindOrAllocatePerThreadDataForThisThread(); Simulator* sim = reinterpret_cast<Simulator*>(
ASSERT(isolate_data != NULL); v8::internal::Thread::GetThreadLocal(simulator_key));
Simulator* sim = isolate_data->simulator();
if (sim == NULL) { if (sim == NULL) {
// TODO(146): delete the simulator object when a thread/isolate goes away. // TODO(146): delete the simulator object when a thread goes away.
sim = new Simulator(isolate); sim = new Simulator();
isolate_data->set_simulator(sim); v8::internal::Thread::SetThreadLocal(simulator_key, sim);
} }
return sim; return sim;
} }
@ -885,19 +834,6 @@ int32_t Simulator::get_register(int reg) const {
} }
double Simulator::get_double_from_register_pair(int reg) {
ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
double dm_val = 0.0;
// Read the bits from the unsigned integer register_[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(vfp_register[0])];
memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
return(dm_val);
}
void Simulator::set_dw_register(int dreg, const int* dbl) { void Simulator::set_dw_register(int dreg, const int* dbl) {
ASSERT((dreg >= 0) && (dreg < num_d_registers)); ASSERT((dreg >= 0) && (dreg < num_d_registers));
registers_[dreg] = dbl[0]; registers_[dreg] = dbl[0];
@ -963,7 +899,12 @@ void Simulator::set_d_register_from_double(int dreg, const double& dbl) {
// 2*sreg and 2*sreg+1. // 2*sreg and 2*sreg+1.
char buffer[2 * sizeof(vfp_register[0])]; char buffer[2 * sizeof(vfp_register[0])];
memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0])); memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0]));
#ifndef BIG_ENDIAN_FLOATING_POINT
memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0])); memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0]));
#else
memcpy(&vfp_register[dreg * 2], &buffer[4], sizeof(vfp_register[0]));
memcpy(&vfp_register[dreg * 2 + 1], &buffer[0], sizeof(vfp_register[0]));
#endif
} }
@ -1000,81 +941,38 @@ double Simulator::get_double_from_d_register(int dreg) {
// Read the bits from the unsigned integer vfp_register[] array // Read the bits from the unsigned integer vfp_register[] array
// into the double precision floating point value and return it. // into the double precision floating point value and return it.
char buffer[2 * sizeof(vfp_register[0])]; char buffer[2 * sizeof(vfp_register[0])];
#ifdef BIG_ENDIAN_FLOATING_POINT
memcpy(&buffer[0], &vfp_register[2 * dreg + 1], sizeof(vfp_register[0]));
memcpy(&buffer[4], &vfp_register[2 * dreg], sizeof(vfp_register[0]));
#else
memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0])); memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
#endif
memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0])); memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0]));
return(dm_val); return(dm_val);
} }
// For use in calls that take two double values, constructed either // For use in calls that take two double values, constructed from r0, r1, r2
// from r0-r3 or d0 and d1. // and r3.
void Simulator::GetFpArgs(double* x, double* y) { void Simulator::GetFpArgs(double* x, double* y) {
if (use_eabi_hardfloat()) {
*x = vfp_register[0];
*y = vfp_register[1];
} else {
// We use a char buffer to get around the strict-aliasing rules which // We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy. // otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)]; char buffer[2 * sizeof(registers_[0])];
// Registers 0 and 1 -> x. // Registers 0 and 1 -> x.
memcpy(buffer, registers_, sizeof(*x)); memcpy(buffer, registers_, sizeof(buffer));
memcpy(x, buffer, sizeof(*x)); memcpy(x, buffer, sizeof(buffer));
// Registers 2 and 3 -> y. // Registers 2 and 3 -> y.
memcpy(buffer, registers_ + 2, sizeof(*y)); memcpy(buffer, registers_ + 2, sizeof(buffer));
memcpy(y, buffer, sizeof(*y)); memcpy(y, buffer, sizeof(buffer));
}
}
// For use in calls that take one double value, constructed either
// from r0 and r1 or d0.
void Simulator::GetFpArgs(double* x) {
if (use_eabi_hardfloat()) {
*x = vfp_register[0];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
memcpy(buffer, registers_, sizeof(*x));
memcpy(x, buffer, sizeof(*x));
}
} }
// For use in calls that take one double value constructed either
// from r0 and r1 or d0 and one integer value.
void Simulator::GetFpArgs(double* x, int32_t* y) {
if (use_eabi_hardfloat()) {
*x = vfp_register[0];
*y = registers_[1];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
memcpy(buffer, registers_, sizeof(*x));
memcpy(x, buffer, sizeof(*x));
// Register 2 -> y.
memcpy(buffer, registers_ + 2, sizeof(*y));
memcpy(y, buffer, sizeof(*y));
}
}
// The return value is either in r0/r1 or d0.
void Simulator::SetFpResult(const double& result) { void Simulator::SetFpResult(const double& result) {
if (use_eabi_hardfloat()) {
char buffer[2 * sizeof(vfp_register[0])];
memcpy(buffer, &result, sizeof(buffer));
// Copy result to d0.
memcpy(vfp_register, buffer, sizeof(buffer));
} else {
char buffer[2 * sizeof(registers_[0])]; char buffer[2 * sizeof(registers_[0])];
memcpy(buffer, &result, sizeof(buffer)); memcpy(buffer, &result, sizeof(buffer));
// Copy result to r0 and r1. // result -> registers 0 and 1.
memcpy(registers_, buffer, sizeof(buffer)); memcpy(registers_, buffer, sizeof(buffer));
} }
}
void Simulator::TrashCallerSaveRegisters() { void Simulator::TrashCallerSaveRegisters() {
@ -1327,13 +1225,12 @@ void Simulator::SetVFlag(bool val) {
// Calculate C flag value for additions. // Calculate C flag value for additions.
bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) { bool Simulator::CarryFrom(int32_t left, int32_t right) {
uint32_t uleft = static_cast<uint32_t>(left); uint32_t uleft = static_cast<uint32_t>(left);
uint32_t uright = static_cast<uint32_t>(right); uint32_t uright = static_cast<uint32_t>(right);
uint32_t urest = 0xffffffffU - uleft; uint32_t urest = 0xffffffffU - uleft;
return (uright > urest) || return (uright > urest);
(carry && (((uright + 1) > urest) || (uright > (urest - 1))));
} }
@ -1568,34 +1465,36 @@ static int count_bits(int bit_vector) {
} }
void Simulator::ProcessPUW(Instruction* instr, // Addressing Mode 4 - Load and Store Multiple
int num_regs, void Simulator::HandleRList(Instruction* instr, bool load) {
int reg_size,
intptr_t* start_address,
intptr_t* end_address) {
int rn = instr->RnValue(); int rn = instr->RnValue();
int32_t rn_val = get_register(rn); int32_t rn_val = get_register(rn);
int rlist = instr->RlistValue();
int num_regs = count_bits(rlist);
intptr_t start_address = 0;
intptr_t end_address = 0;
switch (instr->PUField()) { switch (instr->PUField()) {
case da_x: { case da_x: {
UNIMPLEMENTED(); UNIMPLEMENTED();
break; break;
} }
case ia_x: { case ia_x: {
*start_address = rn_val; start_address = rn_val;
*end_address = rn_val + (num_regs * reg_size) - reg_size; end_address = rn_val + (num_regs * 4) - 4;
rn_val = rn_val + (num_regs * reg_size); rn_val = rn_val + (num_regs * 4);
break; break;
} }
case db_x: { case db_x: {
*start_address = rn_val - (num_regs * reg_size); start_address = rn_val - (num_regs * 4);
*end_address = rn_val - reg_size; end_address = rn_val - 4;
rn_val = *start_address; rn_val = start_address;
break; break;
} }
case ib_x: { case ib_x: {
*start_address = rn_val + reg_size; start_address = rn_val + 4;
*end_address = rn_val + (num_regs * reg_size); end_address = rn_val + (num_regs * 4);
rn_val = *end_address; rn_val = end_address;
break; break;
} }
default: { default: {
@ -1606,17 +1505,6 @@ void Simulator::ProcessPUW(Instruction* instr,
if (instr->HasW()) { if (instr->HasW()) {
set_register(rn, rn_val); set_register(rn, rn_val);
} }
}
// Addressing Mode 4 - Load and Store Multiple
void Simulator::HandleRList(Instruction* instr, bool load) {
int rlist = instr->RlistValue();
int num_regs = count_bits(rlist);
intptr_t start_address = 0;
intptr_t end_address = 0;
ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address); intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
int reg = 0; int reg = 0;
while (rlist != 0) { while (rlist != 0) {
@ -1635,57 +1523,6 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
} }
// Addressing Mode 6 - Load and Store Multiple Coprocessor registers.
void Simulator::HandleVList(Instruction* instr) {
VFPRegPrecision precision =
(instr->SzValue() == 0) ? kSinglePrecision : kDoublePrecision;
int operand_size = (precision == kSinglePrecision) ? 4 : 8;
bool load = (instr->VLValue() == 0x1);
int vd;
int num_regs;
vd = instr->VFPDRegValue(precision);
if (precision == kSinglePrecision) {
num_regs = instr->Immed8Value();
} else {
num_regs = instr->Immed8Value() / 2;
}
intptr_t start_address = 0;
intptr_t end_address = 0;
ProcessPUW(instr, num_regs, operand_size, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
for (int reg = vd; reg < vd + num_regs; reg++) {
if (precision == kSinglePrecision) {
if (load) {
set_s_register_from_sinteger(
reg, ReadW(reinterpret_cast<int32_t>(address), instr));
} else {
WriteW(reinterpret_cast<int32_t>(address),
get_sinteger_from_s_register(reg), instr);
}
address += 1;
} else {
if (load) {
set_s_register_from_sinteger(
2 * reg, ReadW(reinterpret_cast<int32_t>(address), instr));
set_s_register_from_sinteger(
2 * reg + 1, ReadW(reinterpret_cast<int32_t>(address + 1), instr));
} else {
WriteW(reinterpret_cast<int32_t>(address),
get_sinteger_from_s_register(2 * reg), instr);
WriteW(reinterpret_cast<int32_t>(address + 1),
get_sinteger_from_s_register(2 * reg + 1), instr);
}
address += 2;
}
}
ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
}
// Calls into the V8 runtime are based on this very simple interface. // Calls into the V8 runtime are based on this very simple interface.
// Note: To be able to return two values from some calls the code in runtime.cc // Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair which is essentially two 32-bit values stuffed into a // uses the ObjectPair which is essentially two 32-bit values stuffed into a
@ -1696,8 +1533,7 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg1, int32_t arg1,
int32_t arg2, int32_t arg2,
int32_t arg3, int32_t arg3,
int32_t arg4, int32_t arg4);
int32_t arg5);
typedef double (*SimulatorRuntimeFPCall)(int32_t arg0, typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
int32_t arg1, int32_t arg1,
int32_t arg2, int32_t arg2,
@ -1728,94 +1564,28 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
int32_t arg2 = get_register(r2); int32_t arg2 = get_register(r2);
int32_t arg3 = get_register(r3); int32_t arg3 = get_register(r3);
int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp)); int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
int32_t arg4 = stack_pointer[0]; int32_t arg4 = *stack_pointer;
int32_t arg5 = stack_pointer[1];
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
if (use_eabi_hardfloat()) {
// With the hard floating point calling convention, double
// arguments are passed in VFP registers. Fetch the arguments
// from there and call the builtin using soft floating point
// convention.
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
arg0 = vfp_register[0];
arg1 = vfp_register[1];
arg2 = vfp_register[2];
arg3 = vfp_register[3];
break;
case ExternalReference::BUILTIN_FP_CALL:
arg0 = vfp_register[0];
arg1 = vfp_register[1];
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
arg0 = vfp_register[0];
arg1 = vfp_register[1];
arg2 = get_register(0);
break;
default:
break;
}
}
// This is dodgy but it works because the C entry stubs are never moved. // This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173. // See comment in codegen-arm.cc and bug 1242173.
int32_t saved_lr = get_register(lr); int32_t saved_lr = get_register(lr);
intptr_t external = intptr_t external =
reinterpret_cast<intptr_t>(redirection->external_function()); reinterpret_cast<intptr_t>(redirection->external_function());
if (fp_call) { if (redirection->type() == ExternalReference::FP_RETURN_CALL) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
SimulatorRuntimeFPCall target = SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external); reinterpret_cast<SimulatorRuntimeFPCall>(external);
double dval0, dval1; if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
int32_t ival; double x, y;
switch (redirection->type()) { GetFpArgs(&x, &y);
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
GetFpArgs(&dval0, &dval1);
PrintF("Call to host function at %p with args %f, %f", PrintF("Call to host function at %p with args %f, %f",
FUNCTION_ADDR(target), dval0, dval1); FUNCTION_ADDR(target), x, y);
break;
case ExternalReference::BUILTIN_FP_CALL:
GetFpArgs(&dval0);
PrintF("Call to host function at %p with arg %f",
FUNCTION_ADDR(target), dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
GetFpArgs(&dval0, &ival);
PrintF("Call to host function at %p with args %f, %d",
FUNCTION_ADDR(target), dval0, ival);
break;
default:
UNREACHABLE();
break;
}
if (!stack_aligned) { if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp)); PrintF(" with unaligned stack %08x\n", get_register(sp));
} }
PrintF("\n"); PrintF("\n");
} }
CHECK(stack_aligned); CHECK(stack_aligned);
if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external);
double result = target(arg0, arg1, arg2, arg3); double result = target(arg0, arg1, arg2, arg3);
SetFpResult(result); SetFpResult(result);
} else {
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %08x\n", lo_res);
}
set_register(r0, lo_res);
set_register(r1, hi_res);
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) { } else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
SimulatorRuntimeDirectApiCall target = SimulatorRuntimeDirectApiCall target =
reinterpret_cast<SimulatorRuntimeDirectApiCall>(external); reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
@ -1857,22 +1627,20 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
reinterpret_cast<SimulatorRuntimeCall>(external); reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) { if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF( PrintF(
"Call to host function at %p" "Call to host function at %p args %08x, %08x, %08x, %08x, %0xc",
"args %08x, %08x, %08x, %08x, %08x, %08x",
FUNCTION_ADDR(target), FUNCTION_ADDR(target),
arg0, arg0,
arg1, arg1,
arg2, arg2,
arg3, arg3,
arg4, arg4);
arg5);
if (!stack_aligned) { if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp)); PrintF(" with unaligned stack %08x\n", get_register(sp));
} }
PrintF("\n"); PrintF("\n");
} }
CHECK(stack_aligned); CHECK(stack_aligned);
int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5); int64_t result = target(arg0, arg1, arg2, arg3, arg4);
int32_t lo_res = static_cast<int32_t>(result); int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32); int32_t hi_res = static_cast<int32_t>(result >> 32);
if (::v8::internal::FLAG_trace_sim) { if (::v8::internal::FLAG_trace_sim) {
@ -1886,7 +1654,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break; break;
} }
case kBreakpoint: { case kBreakpoint: {
ArmDebugger dbg(this); Debugger dbg(this);
dbg.Debug(); dbg.Debug();
break; break;
} }
@ -1900,7 +1668,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// Stop if it is enabled, otherwise go on jumping over the stop // Stop if it is enabled, otherwise go on jumping over the stop
// and the message address. // and the message address.
if (isEnabledStop(code)) { if (isEnabledStop(code)) {
ArmDebugger dbg(this); Debugger dbg(this);
dbg.Stop(instr); dbg.Stop(instr);
} else { } else {
set_pc(get_pc() + 2 * Instruction::kInstrSize); set_pc(get_pc() + 2 * Instruction::kInstrSize);
@ -2208,7 +1976,7 @@ void Simulator::DecodeType01(Instruction* instr) {
break; break;
} }
case BKPT: { case BKPT: {
ArmDebugger dbg(this); Debugger dbg(this);
PrintF("Simulator hit BKPT.\n"); PrintF("Simulator hit BKPT.\n");
dbg.Debug(); dbg.Debug();
break; break;
@ -2320,15 +2088,8 @@ void Simulator::DecodeType01(Instruction* instr) {
} }
case ADC: { case ADC: {
// Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm"); Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
// Format(instr, "adc'cond's 'rd, 'rn, 'imm"); Format(instr, "adc'cond's 'rd, 'rn, 'imm");
alu_out = rn_val + shifter_operand + GetCarry();
set_register(rd, alu_out);
if (instr->HasS()) {
SetNZFlags(alu_out);
SetCFlag(CarryFrom(rn_val, shifter_operand, GetCarry()));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
}
break; break;
} }
@ -2706,8 +2467,6 @@ void Simulator::DecodeType7(Instruction* instr) {
// vmov :Rt = Sn // vmov :Rt = Sn
// vcvt: Dd = Sm // vcvt: Dd = Sm
// vcvt: Sd = Dm // vcvt: Sd = Dm
// Dd = vabs(Dm)
// Dd = vneg(Dm)
// Dd = vadd(Dn, Dm) // Dd = vadd(Dn, Dm)
// Dd = vsub(Dn, Dm) // Dd = vsub(Dn, Dm)
// Dd = vmul(Dn, Dm) // Dd = vmul(Dn, Dm)
@ -2743,11 +2502,6 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dm_value = get_double_from_d_register(vm); double dm_value = get_double_from_d_register(vm);
double dd_value = fabs(dm_value); double dd_value = fabs(dm_value);
set_d_register_from_double(vd, dd_value); set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
// vneg
double dm_value = get_double_from_d_register(vm);
double dd_value = -dm_value;
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) { } else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr); DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) { } else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
@ -3141,17 +2895,9 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
} }
break; break;
} }
case 0x4:
case 0x5:
case 0x6:
case 0x7:
case 0x9:
case 0xB:
// Load/store multiple single from memory: vldm/vstm.
HandleVList(instr);
break;
default: default:
UNIMPLEMENTED(); // Not used by V8. UNIMPLEMENTED(); // Not used by V8.
break;
} }
} else if (instr->CoprocessorValue() == 0xB) { } else if (instr->CoprocessorValue() == 0xB) {
switch (instr->OpcodeValue()) { switch (instr->OpcodeValue()) {
@ -3198,14 +2944,9 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
} }
break; break;
} }
case 0x4:
case 0x5:
case 0x9:
// Load/store multiple double from memory: vldm/vstm.
HandleVList(instr);
break;
default: default:
UNIMPLEMENTED(); // Not used by V8. UNIMPLEMENTED(); // Not used by V8.
break;
} }
} else { } else {
UNIMPLEMENTED(); // Not used by V8. UNIMPLEMENTED(); // Not used by V8.
@ -3216,7 +2957,7 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
// Executes the current instruction. // Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) { void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) { if (v8::internal::FLAG_check_icache) {
CheckICache(isolate_->simulator_i_cache(), instr); CheckICache(instr);
} }
pc_modified_ = false; pc_modified_ = false;
if (::v8::internal::FLAG_trace_sim) { if (::v8::internal::FLAG_trace_sim) {
@ -3299,7 +3040,7 @@ void Simulator::Execute() {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter); Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++; icount_++;
if (icount_ == ::v8::internal::FLAG_stop_sim_at) { if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
ArmDebugger dbg(this); Debugger dbg(this);
dbg.Debug(); dbg.Debug();
} else { } else {
InstructionDecode(instr); InstructionDecode(instr);

85
deps/v8/src/arm/simulator-arm.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -49,28 +49,25 @@ namespace internal {
(entry(p0, p1, p2, p3, p4)) (entry(p0, p1, p2, p3, p4))
typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*, typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
void*, int*, Address, int, Isolate*); void*, int*, Address, int);
// Call the generated regexp code directly. The code at the entry address // Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher. // should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for // The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls. // the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \ #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
(FUNCTION_CAST<arm_regexp_matcher>(entry)( \ (FUNCTION_CAST<arm_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6))
p0, p1, p2, p3, NULL, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
reinterpret_cast<TryCatch*>(try_catch_address) (reinterpret_cast<TryCatch*>(try_catch_address))
// The stack limit beyond which we will throw stack overflow errors in // The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on arm uses the C stack, we // generated code. Because generated code on arm uses the C stack, we
// just use the C stack limit. // just use the C stack limit.
class SimulatorStack : public v8::internal::AllStatic { class SimulatorStack : public v8::internal::AllStatic {
public: public:
static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate, static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
uintptr_t c_limit) {
USE(isolate);
return c_limit; return c_limit;
} }
@ -126,7 +123,7 @@ class CachePage {
class Simulator { class Simulator {
public: public:
friend class ArmDebugger; friend class Debugger;
enum Register { enum Register {
no_reg = -1, no_reg = -1,
r0 = 0, r1, r2, r3, r4, r5, r6, r7, r0 = 0, r1, r2, r3, r4, r5, r6, r7,
@ -145,19 +142,18 @@ class Simulator {
num_d_registers = 16 num_d_registers = 16
}; };
explicit Simulator(Isolate* isolate); Simulator();
~Simulator(); ~Simulator();
// The currently executing Simulator instance. Potentially there can be one // The currently executing Simulator instance. Potentially there can be one
// for each native thread. // for each native thread.
static Simulator* current(v8::internal::Isolate* isolate); static Simulator* current();
// Accessors for register state. Reading the pc value adheres to the ARM // Accessors for register state. Reading the pc value adheres to the ARM
// architecture specification and is off by a 8 from the currently executing // architecture specification and is off by a 8 from the currently executing
// instruction. // instruction.
void set_register(int reg, int32_t value); void set_register(int reg, int32_t value);
int32_t get_register(int reg) const; int32_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
void set_dw_register(int dreg, const int* dbl); void set_dw_register(int dreg, const int* dbl);
// Support for VFP. // Support for VFP.
@ -181,7 +177,7 @@ class Simulator {
void Execute(); void Execute();
// Call on program start. // Call on program start.
static void Initialize(Isolate* isolate); static void Initialize();
// V8 generally calls into generated JS code with 5 parameters and into // V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function, // generated RegExp code with 7 parameters. This is a convenience function,
@ -195,22 +191,12 @@ class Simulator {
uintptr_t PopAddress(); uintptr_t PopAddress();
// ICache checking. // ICache checking.
static void FlushICache(v8::internal::HashMap* i_cache, void* start, static void FlushICache(void* start, size_t size);
size_t size);
// Returns true if pc register contains one of the 'special_values' defined // Returns true if pc register contains one of the 'special_values' defined
// below (bad_lr, end_sim_pc). // below (bad_lr, end_sim_pc).
bool has_bad_pc() const; bool has_bad_pc() const;
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
#if USE_EABI_HARDFLOAT
return true;
#else
return false;
#endif
}
private: private:
enum special_values { enum special_values {
// Known bad pc value to ensure that the simulator does not execute // Known bad pc value to ensure that the simulator does not execute
@ -234,17 +220,13 @@ class Simulator {
void SetNZFlags(int32_t val); void SetNZFlags(int32_t val);
void SetCFlag(bool val); void SetCFlag(bool val);
void SetVFlag(bool val); void SetVFlag(bool val);
bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0); bool CarryFrom(int32_t left, int32_t right);
bool BorrowFrom(int32_t left, int32_t right); bool BorrowFrom(int32_t left, int32_t right);
bool OverflowFrom(int32_t alu_out, bool OverflowFrom(int32_t alu_out,
int32_t left, int32_t left,
int32_t right, int32_t right,
bool addition); bool addition);
inline int GetCarry() {
return c_flag_ ? 1 : 0;
};
// Support for VFP. // Support for VFP.
void Compute_FPSCR_Flags(double val1, double val2); void Compute_FPSCR_Flags(double val1, double val2);
void Copy_FPSCR_to_APSR(); void Copy_FPSCR_to_APSR();
@ -252,13 +234,7 @@ class Simulator {
// Helper functions to decode common "addressing" modes // Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instruction* instr, bool* carry_out); int32_t GetShiftRm(Instruction* instr, bool* carry_out);
int32_t GetImm(Instruction* instr, bool* carry_out); int32_t GetImm(Instruction* instr, bool* carry_out);
void ProcessPUW(Instruction* instr,
int num_regs,
int operand_size,
intptr_t* start_address,
intptr_t* end_address);
void HandleRList(Instruction* instr, bool load); void HandleRList(Instruction* instr, bool load);
void HandleVList(Instruction* inst);
void SoftwareInterrupt(Instruction* instr); void SoftwareInterrupt(Instruction* instr);
// Stop helper functions. // Stop helper functions.
@ -311,20 +287,18 @@ class Simulator {
void InstructionDecode(Instruction* instr); void InstructionDecode(Instruction* instr);
// ICache. // ICache.
static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr); static void CheckICache(Instruction* instr);
static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start, static void FlushOnePage(intptr_t start, int size);
int size); static CachePage* GetCachePage(void* page);
static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
// Runtime call support. // Runtime call support.
static void* RedirectExternalReference( static void* RedirectExternalReference(
void* external_function, void* external_function,
v8::internal::ExternalReference::Type type); v8::internal::ExternalReference::Type type);
// For use in calls that take double value arguments. // For use in calls that take two double values, constructed from r0, r1, r2
// and r3.
void GetFpArgs(double* x, double* y); void GetFpArgs(double* x, double* y);
void GetFpArgs(double* x);
void GetFpArgs(double* x, int32_t* y);
void SetFpResult(const double& result); void SetFpResult(const double& result);
void TrashCallerSaveRegisters(); void TrashCallerSaveRegisters();
@ -359,16 +333,15 @@ class Simulator {
char* stack_; char* stack_;
bool pc_modified_; bool pc_modified_;
int icount_; int icount_;
static bool initialized_;
// Icache simulation // Icache simulation
v8::internal::HashMap* i_cache_; static v8::internal::HashMap* i_cache_;
// Registered breakpoints. // Registered breakpoints.
Instruction* break_pc_; Instruction* break_pc_;
Instr break_instr_; Instr break_instr_;
v8::internal::Isolate* isolate_;
// A stop is watched if its code is less than kNumOfWatchedStops. // A stop is watched if its code is less than kNumOfWatchedStops.
// Only watched stops support enabling/disabling and the counter feature. // Only watched stops support enabling/disabling and the counter feature.
static const uint32_t kNumOfWatchedStops = 256; static const uint32_t kNumOfWatchedStops = 256;
@ -391,16 +364,15 @@ class Simulator {
// When running with the simulator transition into simulated execution at this // When running with the simulator transition into simulated execution at this
// point. // point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \ reinterpret_cast<Object*>(Simulator::current()->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4)) FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \ #define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
Simulator::current(Isolate::Current())->Call( \ Simulator::current()->Call(entry, 8, p0, p1, p2, p3, NULL, p4, p5, p6)
entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \ #define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \ try_catch_address == \
NULL : *(reinterpret_cast<TryCatch**>(try_catch_address)) NULL ? NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
// The simulator has its own stack. Thus it has a different stack limit from // The simulator has its own stack. Thus it has a different stack limit from
@ -410,18 +382,17 @@ class Simulator {
// trouble down the line. // trouble down the line.
class SimulatorStack : public v8::internal::AllStatic { class SimulatorStack : public v8::internal::AllStatic {
public: public:
static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate, static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
uintptr_t c_limit) { return Simulator::current()->StackLimit();
return Simulator::current(isolate)->StackLimit();
} }
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) { static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
Simulator* sim = Simulator::current(Isolate::Current()); Simulator* sim = Simulator::current();
return sim->PushAddress(try_catch_address); return sim->PushAddress(try_catch_address);
} }
static inline void UnregisterCTryCatch() { static inline void UnregisterCTryCatch() {
Simulator::current(Isolate::Current())->PopAddress(); Simulator::current()->PopAddress();
} }
}; };

1240
deps/v8/src/arm/stub-cache-arm.cc

File diff suppressed because it is too large

45
deps/v8/src/platform-tls-mac.h → deps/v8/src/arm/virtual-frame-arm-inl.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -25,38 +25,35 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_PLATFORM_TLS_MAC_H_ #ifndef V8_VIRTUAL_FRAME_ARM_INL_H_
#define V8_PLATFORM_TLS_MAC_H_ #define V8_VIRTUAL_FRAME_ARM_INL_H_
#include "globals.h" #include "assembler-arm.h"
#include "virtual-frame-arm.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
#if defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64) // These VirtualFrame methods should actually be in a virtual-frame-arm-inl.h
// file if such a thing existed.
#define V8_FAST_TLS_SUPPORTED 1 MemOperand VirtualFrame::ParameterAt(int index) {
// Index -1 corresponds to the receiver.
ASSERT(-1 <= index); // -1 is the receiver.
ASSERT(index <= parameter_count());
return MemOperand(fp, (1 + parameter_count() - index) * kPointerSize);
}
extern intptr_t kMacTlsBaseOffset; // The receiver frame slot.
MemOperand VirtualFrame::Receiver() {
return ParameterAt(-1);
}
INLINE(intptr_t InternalGetExistingThreadLocal(intptr_t index));
inline intptr_t InternalGetExistingThreadLocal(intptr_t index) { void VirtualFrame::Forget(int count) {
intptr_t result; SpillAll();
#if defined(V8_HOST_ARCH_IA32) LowerHeight(count);
asm("movl %%gs:(%1,%2,4), %0;"
:"=r"(result) // Output must be a writable register.
:"r"(kMacTlsBaseOffset), "r"(index));
#else
asm("movq %%gs:(%1,%2,8), %0;"
:"=r"(result)
:"r"(kMacTlsBaseOffset), "r"(index));
#endif
return result;
} }
#endif
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_PLATFORM_TLS_MAC_H_ #endif // V8_VIRTUAL_FRAME_ARM_INL_H_

843
deps/v8/src/arm/virtual-frame-arm.cc

@ -0,0 +1,843 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen-inl.h"
#include "register-allocator-inl.h"
#include "scopes.h"
#include "virtual-frame-inl.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm())
void VirtualFrame::PopToR1R0() {
// Shuffle things around so the top of stack is in r0 and r1.
MergeTOSTo(R0_R1_TOS);
// Pop the two registers off the stack so they are detached from the frame.
LowerHeight(2);
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::PopToR1() {
// Shuffle things around so the top of stack is only in r1.
MergeTOSTo(R1_TOS);
// Pop the register off the stack so it is detached from the frame.
LowerHeight(1);
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::PopToR0() {
// Shuffle things around so the top of stack only in r0.
MergeTOSTo(R0_TOS);
// Pop the register off the stack so it is detached from the frame.
LowerHeight(1);
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) {
if (Equals(expected)) return;
ASSERT((expected->tos_known_smi_map_ & tos_known_smi_map_) ==
expected->tos_known_smi_map_);
ASSERT(expected->IsCompatibleWith(this));
MergeTOSTo(expected->top_of_stack_state_, cond);
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
}
void VirtualFrame::MergeTo(VirtualFrame* expected, Condition cond) {
if (Equals(expected)) return;
tos_known_smi_map_ &= expected->tos_known_smi_map_;
MergeTOSTo(expected->top_of_stack_state_, cond);
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
}
void VirtualFrame::MergeTOSTo(
VirtualFrame::TopOfStack expected_top_of_stack_state, Condition cond) {
#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b))
switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) {
case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS):
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS):
__ pop(r0, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS):
__ pop(r1, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS):
__ pop(r0, cond);
__ pop(r1, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
__ pop(r1, cond);
__ pop(r0, cond);
break;
case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
__ push(r0, cond);
break;
case CASE_NUMBER(R0_TOS, R0_TOS):
break;
case CASE_NUMBER(R0_TOS, R1_TOS):
__ mov(r1, r0, LeaveCC, cond);
break;
case CASE_NUMBER(R0_TOS, R0_R1_TOS):
__ pop(r1, cond);
break;
case CASE_NUMBER(R0_TOS, R1_R0_TOS):
__ mov(r1, r0, LeaveCC, cond);
__ pop(r0, cond);
break;
case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS):
__ push(r1, cond);
break;
case CASE_NUMBER(R1_TOS, R0_TOS):
__ mov(r0, r1, LeaveCC, cond);
break;
case CASE_NUMBER(R1_TOS, R1_TOS):
break;
case CASE_NUMBER(R1_TOS, R0_R1_TOS):
__ mov(r0, r1, LeaveCC, cond);
__ pop(r1, cond);
break;
case CASE_NUMBER(R1_TOS, R1_R0_TOS):
__ pop(r0, cond);
break;
case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS):
__ Push(r1, r0, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R0_TOS):
__ push(r1, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R1_TOS):
__ push(r1, cond);
__ mov(r1, r0, LeaveCC, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS):
break;
case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS):
__ Swap(r0, r1, ip, cond);
break;
case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS):
__ Push(r0, r1, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R0_TOS):
__ push(r0, cond);
__ mov(r0, r1, LeaveCC, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R1_TOS):
__ push(r0, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS):
__ Swap(r0, r1, ip, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS):
break;
default:
UNREACHABLE();
#undef CASE_NUMBER
}
// A conditional merge will be followed by a conditional branch and the
// fall-through code will have an unchanged virtual frame state. If the
// merge is unconditional ('al'ways) then it might be followed by a fall
// through. We need to update the virtual frame state to match the code we
// are falling into. The final case is an unconditional merge followed by an
// unconditional branch, in which case it doesn't matter what we do to the
// virtual frame state, because the virtual frame will be invalidated.
if (cond == al) {
top_of_stack_state_ = expected_top_of_stack_state;
}
}
void VirtualFrame::Enter() {
Comment cmnt(masm(), "[ Enter JS frame");
#ifdef DEBUG
// Verify that r1 contains a JS function. The following code relies
// on r2 being available for use.
if (FLAG_debug_code) {
Label map_check, done;
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &map_check);
__ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
__ bind(&map_check);
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(eq, &done);
__ stop("VirtualFrame::Enter - r1 is not a function (map check).");
__ bind(&done);
}
#endif // DEBUG
// We are about to push four values to the frame.
Adjust(4);
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
// Adjust FP to point to saved FP.
__ add(fp, sp, Operand(2 * kPointerSize));
}
void VirtualFrame::Exit() {
Comment cmnt(masm(), "[ Exit JS frame");
// Record the location of the JS exit code for patching when setting
// break point.
__ RecordJSReturn();
// Drop the execution stack down to the frame pointer and restore the caller
// frame pointer and return address.
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
}
void VirtualFrame::AllocateStackSlots() {
int count = local_count();
if (count > 0) {
Comment cmnt(masm(), "[ Allocate space for locals");
Adjust(count);
// Initialize stack slots with 'undefined' value.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ LoadRoot(r2, Heap::kStackLimitRootIndex);
if (count < kLocalVarBound) {
// For less locals the unrolled loop is more compact.
for (int i = 0; i < count; i++) {
__ push(ip);
}
} else {
// For more locals a loop in generated code is more compact.
Label alloc_locals_loop;
__ mov(r1, Operand(count));
__ bind(&alloc_locals_loop);
__ push(ip);
__ sub(r1, r1, Operand(1), SetCC);
__ b(ne, &alloc_locals_loop);
}
} else {
__ LoadRoot(r2, Heap::kStackLimitRootIndex);
}
// Check the stack for overflow or a break request.
masm()->cmp(sp, Operand(r2));
StackCheckStub stub;
// Call the stub if lower.
masm()->mov(ip,
Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
RelocInfo::CODE_TARGET),
LeaveCC,
lo);
masm()->Call(ip, lo);
}
void VirtualFrame::PushReceiverSlotAddress() {
UNIMPLEMENTED();
}
void VirtualFrame::PushTryHandler(HandlerType type) {
// Grow the expression stack by handler size less one (the return
// address in lr is already counted by a call instruction).
Adjust(kHandlerSize - 1);
__ PushTryHandler(IN_JAVASCRIPT, type);
}
void VirtualFrame::CallJSFunction(int arg_count) {
// InvokeFunction requires function in r1.
PopToR1();
SpillAll();
// +1 for receiver.
Forget(arg_count + 1);
ASSERT(cgen()->HasValidEntryRegisters());
ParameterCount count(arg_count);
__ InvokeFunction(r1, count, CALL_FUNCTION);
// Restore the context.
__ ldr(cp, Context());
}
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
SpillAll();
Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
}
void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
SpillAll();
Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(id, arg_count);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
void VirtualFrame::DebugBreak() {
ASSERT(cgen()->HasValidEntryRegisters());
__ DebugBreak();
}
#endif
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
int arg_count) {
Forget(arg_count);
__ InvokeBuiltin(id, flags);
}
void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
PopToR0();
SpillAll();
__ mov(r2, Operand(name));
CallCodeObject(ic, mode, 0);
}
void VirtualFrame::CallStoreIC(Handle<String> name,
bool is_contextual,
StrictModeFlag strict_mode) {
Handle<Code> ic(Builtins::builtin(
(strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
PopToR0();
RelocInfo::Mode mode;
if (is_contextual) {
SpillAll();
__ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
mode = RelocInfo::CODE_TARGET_CONTEXT;
} else {
EmitPop(r1);
SpillAll();
mode = RelocInfo::CODE_TARGET;
}
__ mov(r2, Operand(name));
CallCodeObject(ic, mode, 0);
}
void VirtualFrame::CallKeyedLoadIC() {
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
PopToR1R0();
SpillAll();
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
}
void VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
Handle<Code> ic(Builtins::builtin(
(strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
PopToR1R0();
SpillAll();
EmitPop(r2);
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
}
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
int dropped_args) {
switch (code->kind()) {
case Code::CALL_IC:
case Code::KEYED_CALL_IC:
case Code::FUNCTION:
break;
case Code::KEYED_LOAD_IC:
case Code::LOAD_IC:
case Code::KEYED_STORE_IC:
case Code::STORE_IC:
ASSERT(dropped_args == 0);
break;
case Code::BUILTIN:
ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall));
break;
default:
UNREACHABLE();
break;
}
Forget(dropped_args);
ASSERT(cgen()->HasValidEntryRegisters());
__ Call(code, rmode);
}
// NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS.
const bool VirtualFrame::kR0InUse[TOS_STATES] =
{ false, true, false, true, true };
const bool VirtualFrame::kR1InUse[TOS_STATES] =
{ false, false, true, true, true };
const int VirtualFrame::kVirtualElements[TOS_STATES] =
{ 0, 1, 1, 2, 2 };
const Register VirtualFrame::kTopRegister[TOS_STATES] =
{ r0, r0, r1, r1, r0 };
const Register VirtualFrame::kBottomRegister[TOS_STATES] =
{ r0, r0, r1, r0, r1 };
const Register VirtualFrame::kAllocatedRegisters[
VirtualFrame::kNumberOfAllocatedRegisters] = { r2, r3, r4, r5, r6 };
// Popping is done by the transition implied by kStateAfterPop. Of course if
// there were no stack slots allocated to registers then the physical SP must
// be adjusted.
const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] =
{ NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, R0_TOS, R1_TOS };
// Pushing is done by the transition implied by kStateAfterPush. Of course if
// the maximum number of registers was already allocated to the top of stack
// slots then one register must be physically pushed onto the stack.
const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] =
{ R0_TOS, R1_R0_TOS, R0_R1_TOS, R0_R1_TOS, R1_R0_TOS };
bool VirtualFrame::SpilledScope::is_spilled_ = false;
void VirtualFrame::Drop(int count) {
ASSERT(count >= 0);
ASSERT(height() >= count);
// Discard elements from the virtual frame and free any registers.
int num_virtual_elements = kVirtualElements[top_of_stack_state_];
while (num_virtual_elements > 0) {
Pop();
num_virtual_elements--;
count--;
if (count == 0) return;
}
if (count == 0) return;
__ add(sp, sp, Operand(count * kPointerSize));
LowerHeight(count);
}
void VirtualFrame::Pop() {
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
__ add(sp, sp, Operand(kPointerSize));
} else {
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
}
LowerHeight(1);
}
void VirtualFrame::EmitPop(Register reg) {
ASSERT(!is_used(RegisterAllocator::ToNumber(reg)));
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
__ pop(reg);
} else {
__ mov(reg, kTopRegister[top_of_stack_state_]);
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
}
LowerHeight(1);
}
void VirtualFrame::SpillAllButCopyTOSToR0() {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
__ ldr(r0, MemOperand(sp, 0));
break;
case R0_TOS:
__ push(r0);
break;
case R1_TOS:
__ push(r1);
__ mov(r0, r1);
break;
case R0_R1_TOS:
__ Push(r1, r0);
break;
case R1_R0_TOS:
__ Push(r0, r1);
__ mov(r0, r1);
break;
default:
UNREACHABLE();
}
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::SpillAllButCopyTOSToR1() {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
__ ldr(r1, MemOperand(sp, 0));
break;
case R0_TOS:
__ push(r0);
__ mov(r1, r0);
break;
case R1_TOS:
__ push(r1);
break;
case R0_R1_TOS:
__ Push(r1, r0);
__ mov(r1, r0);
break;
case R1_R0_TOS:
__ Push(r0, r1);
break;
default:
UNREACHABLE();
}
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::SpillAllButCopyTOSToR1R0() {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
__ ldr(r1, MemOperand(sp, 0));
__ ldr(r0, MemOperand(sp, kPointerSize));
break;
case R0_TOS:
__ push(r0);
__ mov(r1, r0);
__ ldr(r0, MemOperand(sp, kPointerSize));
break;
case R1_TOS:
__ push(r1);
__ ldr(r0, MemOperand(sp, kPointerSize));
break;
case R0_R1_TOS:
__ Push(r1, r0);
__ Swap(r0, r1, ip);
break;
case R1_R0_TOS:
__ Push(r0, r1);
break;
default:
UNREACHABLE();
}
top_of_stack_state_ = NO_TOS_REGISTERS;
}
Register VirtualFrame::Peek() {
AssertIsNotSpilled();
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
Register answer = kTopRegister[top_of_stack_state_];
__ pop(answer);
return answer;
} else {
return kTopRegister[top_of_stack_state_];
}
}
Register VirtualFrame::Peek2() {
AssertIsNotSpilled();
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
case R0_TOS:
case R0_R1_TOS:
MergeTOSTo(R0_R1_TOS);
return r1;
case R1_TOS:
case R1_R0_TOS:
MergeTOSTo(R1_R0_TOS);
return r0;
default:
UNREACHABLE();
return no_reg;
}
}
void VirtualFrame::Dup() {
if (SpilledScope::is_spilled()) {
__ ldr(ip, MemOperand(sp, 0));
__ push(ip);
} else {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
__ ldr(r0, MemOperand(sp, 0));
top_of_stack_state_ = R0_TOS;
break;
case R0_TOS:
__ mov(r1, r0);
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
case R1_TOS:
__ mov(r0, r1);
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
case R0_R1_TOS:
__ push(r1);
__ mov(r1, r0);
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
case R1_R0_TOS:
__ push(r0);
__ mov(r0, r1);
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
default:
UNREACHABLE();
}
}
RaiseHeight(1, tos_known_smi_map_ & 1);
}
void VirtualFrame::Dup2() {
if (SpilledScope::is_spilled()) {
__ ldr(ip, MemOperand(sp, kPointerSize));
__ push(ip);
__ ldr(ip, MemOperand(sp, kPointerSize));
__ push(ip);
} else {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
__ ldr(r0, MemOperand(sp, 0));
__ ldr(r1, MemOperand(sp, kPointerSize));
top_of_stack_state_ = R0_R1_TOS;
break;
case R0_TOS:
__ push(r0);
__ ldr(r1, MemOperand(sp, kPointerSize));
top_of_stack_state_ = R0_R1_TOS;
break;
case R1_TOS:
__ push(r1);
__ ldr(r0, MemOperand(sp, kPointerSize));
top_of_stack_state_ = R1_R0_TOS;
break;
case R0_R1_TOS:
__ Push(r1, r0);
top_of_stack_state_ = R0_R1_TOS;
break;
case R1_R0_TOS:
__ Push(r0, r1);
top_of_stack_state_ = R1_R0_TOS;
break;
default:
UNREACHABLE();
}
}
RaiseHeight(2, tos_known_smi_map_ & 3);
}
Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
ASSERT(but_not_to_this_one.is(r0) ||
but_not_to_this_one.is(r1) ||
but_not_to_this_one.is(no_reg));
LowerHeight(1);
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
if (but_not_to_this_one.is(r0)) {
__ pop(r1);
return r1;
} else {
__ pop(r0);
return r0;
}
} else {
Register answer = kTopRegister[top_of_stack_state_];
ASSERT(!answer.is(but_not_to_this_one));
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
return answer;
}
}
void VirtualFrame::EnsureOneFreeTOSRegister() {
if (kVirtualElements[top_of_stack_state_] == kMaxTOSRegisters) {
__ push(kBottomRegister[top_of_stack_state_]);
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
}
ASSERT(kVirtualElements[top_of_stack_state_] != kMaxTOSRegisters);
}
void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
RaiseHeight(1, info.IsSmi() ? 1 : 0);
if (reg.is(cp)) {
// If we are pushing cp then we are about to make a call and things have to
// be pushed to the physical stack. There's nothing to be gained my moving
// to a TOS register and then pushing that, we might as well push to the
// physical stack immediately.
MergeTOSTo(NO_TOS_REGISTERS);
__ push(reg);
return;
}
if (SpilledScope::is_spilled()) {
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
__ push(reg);
return;
}
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
if (reg.is(r0)) {
top_of_stack_state_ = R0_TOS;
return;
}
if (reg.is(r1)) {
top_of_stack_state_ = R1_TOS;
return;
}
}
EnsureOneFreeTOSRegister();
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
Register dest = kTopRegister[top_of_stack_state_];
__ Move(dest, reg);
}
void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
if (this_far_down < kTOSKnownSmiMapSize) {
tos_known_smi_map_ &= ~(1 << this_far_down);
}
if (this_far_down == 0) {
Pop();
Register dest = GetTOSRegister();
if (dest.is(reg)) {
// We already popped one item off the top of the stack. If the only
// free register is the one we were asked to push then we have been
// asked to push a register that was already in use, which cannot
// happen. It therefore folows that there are two free TOS registers:
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
dest = dest.is(r0) ? r1 : r0;
}
__ mov(dest, reg);
EmitPush(dest);
} else if (this_far_down == 1) {
int virtual_elements = kVirtualElements[top_of_stack_state_];
if (virtual_elements < 2) {
__ str(reg, ElementAt(this_far_down));
} else {
ASSERT(virtual_elements == 2);
ASSERT(!reg.is(r0));
ASSERT(!reg.is(r1));
Register dest = kBottomRegister[top_of_stack_state_];
__ mov(dest, reg);
}
} else {
ASSERT(this_far_down >= 2);
ASSERT(kVirtualElements[top_of_stack_state_] <= 2);
__ str(reg, ElementAt(this_far_down));
}
}
Register VirtualFrame::GetTOSRegister() {
if (SpilledScope::is_spilled()) return r0;
EnsureOneFreeTOSRegister();
return kTopRegister[kStateAfterPush[top_of_stack_state_]];
}
void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
RaiseHeight(1, info.IsSmi() ? 1 : 0);
if (SpilledScope::is_spilled()) {
__ mov(r0, operand);
__ push(r0);
return;
}
EnsureOneFreeTOSRegister();
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
__ mov(kTopRegister[top_of_stack_state_], operand);
}
void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
RaiseHeight(1, info.IsSmi() ? 1 : 0);
if (SpilledScope::is_spilled()) {
__ ldr(r0, operand);
__ push(r0);
return;
}
EnsureOneFreeTOSRegister();
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
__ ldr(kTopRegister[top_of_stack_state_], operand);
}
void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
RaiseHeight(1, 0);
if (SpilledScope::is_spilled()) {
__ LoadRoot(r0, index);
__ push(r0);
return;
}
EnsureOneFreeTOSRegister();
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
__ LoadRoot(kTopRegister[top_of_stack_state_], index);
}
void VirtualFrame::EmitPushMultiple(int count, int src_regs) {
ASSERT(SpilledScope::is_spilled());
Adjust(count);
__ stm(db_w, sp, src_regs);
}
void VirtualFrame::SpillAll() {
switch (top_of_stack_state_) {
case R1_R0_TOS:
masm()->push(r0);
// Fall through.
case R1_TOS:
masm()->push(r1);
top_of_stack_state_ = NO_TOS_REGISTERS;
break;
case R0_R1_TOS:
masm()->push(r1);
// Fall through.
case R0_TOS:
masm()->push(r0);
top_of_stack_state_ = NO_TOS_REGISTERS;
// Fall through.
case NO_TOS_REGISTERS:
break;
default:
UNREACHABLE();
break;
}
ASSERT(register_allocation_map_ == 0); // Not yet implemented.
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

520
deps/v8/src/arm/virtual-frame-arm.h

@ -0,0 +1,520 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_VIRTUAL_FRAME_ARM_H_
#define V8_ARM_VIRTUAL_FRAME_ARM_H_
#include "register-allocator.h"
namespace v8 {
namespace internal {
// This dummy class is only used to create invalid virtual frames.
extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
// -------------------------------------------------------------------------
// Virtual frames
//
// The virtual frame is an abstraction of the physical stack frame. It
// encapsulates the parameters, frame-allocated locals, and the expression
// stack. It supports push/pop operations on the expression stack, as well
// as random access to the expression stack elements, locals, and
// parameters.
class VirtualFrame : public ZoneObject {
public:
class RegisterAllocationScope;
// A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code
// generator's current frame, and keeps it spilled.
class SpilledScope BASE_EMBEDDED {
public:
explicit SpilledScope(VirtualFrame* frame)
: old_is_spilled_(is_spilled_) {
if (frame != NULL) {
if (!is_spilled_) {
frame->SpillAll();
} else {
frame->AssertIsSpilled();
}
}
is_spilled_ = true;
}
~SpilledScope() {
is_spilled_ = old_is_spilled_;
}
static bool is_spilled() { return is_spilled_; }
private:
static bool is_spilled_;
int old_is_spilled_;
SpilledScope() { }
friend class RegisterAllocationScope;
};
class RegisterAllocationScope BASE_EMBEDDED {
public:
// A utility class to introduce a scope where the virtual frame
// is not spilled, ie. where register allocation occurs. Eventually
// when RegisterAllocationScope is ubiquitous it can be removed
// along with the (by then unused) SpilledScope class.
inline explicit RegisterAllocationScope(CodeGenerator* cgen);
inline ~RegisterAllocationScope();
private:
CodeGenerator* cgen_;
bool old_is_spilled_;
RegisterAllocationScope() { }
};
// An illegal index into the virtual frame.
static const int kIllegalIndex = -1;
// Construct an initial virtual frame on entry to a JS function.
inline VirtualFrame();
// Construct an invalid virtual frame, used by JumpTargets.
inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
// Construct a virtual frame as a clone of an existing one.
explicit inline VirtualFrame(VirtualFrame* original);
inline CodeGenerator* cgen() const;
inline MacroAssembler* masm();
// The number of elements on the virtual frame.
int element_count() const { return element_count_; }
// The height of the virtual expression stack.
inline int height() const;
bool is_used(int num) {
switch (num) {
case 0: { // r0.
return kR0InUse[top_of_stack_state_];
}
case 1: { // r1.
return kR1InUse[top_of_stack_state_];
}
case 2:
case 3:
case 4:
case 5:
case 6: { // r2 to r6.
ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
ASSERT(num >= kFirstAllocatedRegister);
if ((register_allocation_map_ &
(1 << (num - kFirstAllocatedRegister))) == 0) {
return false;
} else {
return true;
}
}
default: {
ASSERT(num < kFirstAllocatedRegister ||
num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
return false;
}
}
}
// Add extra in-memory elements to the top of the frame to match an actual
// frame (eg, the frame after an exception handler is pushed). No code is
// emitted.
void Adjust(int count);
// Forget elements from the top of the frame to match an actual frame (eg,
// the frame after a runtime call). No code is emitted except to bring the
// frame to a spilled state.
void Forget(int count);
// Spill all values from the frame to memory.
void SpillAll();
void AssertIsSpilled() const {
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
ASSERT(register_allocation_map_ == 0);
}
void AssertIsNotSpilled() {
ASSERT(!SpilledScope::is_spilled());
}
// Spill all occurrences of a specific register from the frame.
void Spill(Register reg) {
UNIMPLEMENTED();
}
// Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register
// (ie, they all have frame-external references). Unimplemented.
Register SpillAnyRegister();
// Make this virtual frame have a state identical to an expected virtual
// frame. As a side effect, code may be emitted to make this frame match
// the expected one.
void MergeTo(VirtualFrame* expected, Condition cond = al);
void MergeTo(const VirtualFrame* expected, Condition cond = al);
// Checks whether this frame can be branched to by the other frame.
bool IsCompatibleWith(const VirtualFrame* other) const {
return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
}
inline void ForgetTypeInfo() {
tos_known_smi_map_ = 0;
}
// Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
void DetachFromCodeGenerator() {
}
// (Re)attach a frame to its code generator. This informs the register
// allocator that the frame-internal register references are active again.
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
void AttachToCodeGenerator() {
}
// Emit code for the physical JS entry and exit frame sequences. After
// calling Enter, the virtual frame is ready for use; and after calling
// Exit it should not be used. Note that Enter does not allocate space in
// the physical frame for storing frame-allocated locals.
void Enter();
void Exit();
// Prepare for returning from the frame by elements in the virtual frame. This
// avoids generating unnecessary merge code when jumping to the
// shared return site. No spill code emitted. Value to return should be in r0.
inline void PrepareForReturn();
// Number of local variables after when we use a loop for allocating.
static const int kLocalVarBound = 5;
// Allocate and initialize the frame-allocated locals.
void AllocateStackSlots();
// The current top of the expression stack as an assembly operand.
MemOperand Top() {
AssertIsSpilled();
return MemOperand(sp, 0);
}
// An element of the expression stack as an assembly operand.
MemOperand ElementAt(int index) {
int adjusted_index = index - kVirtualElements[top_of_stack_state_];
ASSERT(adjusted_index >= 0);
return MemOperand(sp, adjusted_index * kPointerSize);
}
bool KnownSmiAt(int index) {
if (index >= kTOSKnownSmiMapSize) return false;
return (tos_known_smi_map_ & (1 << index)) != 0;
}
// A frame-allocated local as an assembly operand.
inline MemOperand LocalAt(int index);
// Push the address of the receiver slot on the frame.
void PushReceiverSlotAddress();
// The function frame slot.
MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
// The context frame slot.
MemOperand Context() { return MemOperand(fp, kContextOffset); }
// A parameter as an assembly operand.
inline MemOperand ParameterAt(int index);
// The receiver frame slot.
inline MemOperand Receiver();
// Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type);
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
inline void CallStub(CodeStub* stub, int arg_count);
// Call JS function from top of the stack with arguments
// taken from the stack.
void CallJSFunction(int arg_count);
// Call runtime given the number of arguments expected on (and
// removed from) the stack.
void CallRuntime(Runtime::Function* f, int arg_count);
void CallRuntime(Runtime::FunctionId id, int arg_count);
#ifdef ENABLE_DEBUGGER_SUPPORT
void DebugBreak();
#endif
// Invoke builtin given the number of arguments it expects on (and
// removes from) the stack.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flag,
int arg_count);
// Call load IC. Receiver is on the stack and is consumed. Result is returned
// in r0.
void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
// Call store IC. If the load is contextual, value is found on top of the
// frame. If not, value and receiver are on the frame. Both are consumed.
// Result is returned in r0.
void CallStoreIC(Handle<String> name, bool is_contextual,
StrictModeFlag strict_mode);
// Call keyed load IC. Key and receiver are on the stack. Both are consumed.
// Result is returned in r0.
void CallKeyedLoadIC();
// Call keyed store IC. Value, key and receiver are on the stack. All three
// are consumed. Result is returned in r0.
void CallKeyedStoreIC(StrictModeFlag strict_mode);
// Call into an IC stub given the number of arguments it removes
// from the stack. Register arguments to the IC stub are implicit,
// and depend on the type of IC stub.
void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
int dropped_args);
// Drop a number of elements from the top of the expression stack. May
// emit code to affect the physical frame. Does not clobber any registers
// excepting possibly the stack pointer.
void Drop(int count);
// Drop one element.
void Drop() { Drop(1); }
// Pop an element from the top of the expression stack. Discards
// the result.
void Pop();
// Pop an element from the top of the expression stack. The register
// will be one normally used for the top of stack register allocation
// so you can't hold on to it if you push on the stack.
Register PopToRegister(Register but_not_to_this_one = no_reg);
// Look at the top of the stack. The register returned is aliased and
// must be copied to a scratch register before modification.
Register Peek();
// Look at the value beneath the top of the stack. The register returned is
// aliased and must be copied to a scratch register before modification.
Register Peek2();
// Duplicate the top of stack.
void Dup();
// Duplicate the two elements on top of stack.
void Dup2();
// Flushes all registers, but it puts a copy of the top-of-stack in r0.
void SpillAllButCopyTOSToR0();
// Flushes all registers, but it puts a copy of the top-of-stack in r1.
void SpillAllButCopyTOSToR1();
// Flushes all registers, but it puts a copy of the top-of-stack in r1
// and the next value on the stack in r0.
void SpillAllButCopyTOSToR1R0();
// Pop and save an element from the top of the expression stack and
// emit a corresponding pop instruction.
void EmitPop(Register reg);
// Takes the top two elements and puts them in r0 (top element) and r1
// (second element).
void PopToR1R0();
// Takes the top element and puts it in r1.
void PopToR1();
// Takes the top element and puts it in r0.
void PopToR0();
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown());
void EmitPushRoot(Heap::RootListIndex index);
// Overwrite the nth thing on the stack. If the nth position is in a
// register then this turns into a mov, otherwise an str. Afterwards
// you can still use the register even if it is a register that can be
// used for TOS (r0 or r1).
void SetElementAt(Register reg, int this_far_down);
// Get a register which is free and which must be immediately used to
// push on the top of the stack.
Register GetTOSRegister();
// Push multiple registers on the stack and the virtual frame
// Register are selected by setting bit in src_regs and
// are pushed in decreasing order: r15 .. r0.
void EmitPushMultiple(int count, int src_regs);
static Register scratch0() { return r7; }
static Register scratch1() { return r9; }
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
static const int kContextOffset = StandardFrameConstants::kContextOffset;
static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
// 5 states for the top of stack, which can be in memory or in r0 and r1.
enum TopOfStack {
NO_TOS_REGISTERS,
R0_TOS,
R1_TOS,
R1_R0_TOS,
R0_R1_TOS,
TOS_STATES
};
static const int kMaxTOSRegisters = 2;
static const bool kR0InUse[TOS_STATES];
static const bool kR1InUse[TOS_STATES];
static const int kVirtualElements[TOS_STATES];
static const TopOfStack kStateAfterPop[TOS_STATES];
static const TopOfStack kStateAfterPush[TOS_STATES];
static const Register kTopRegister[TOS_STATES];
static const Register kBottomRegister[TOS_STATES];
// We allocate up to 5 locals in registers.
static const int kNumberOfAllocatedRegisters = 5;
// r2 to r6 are allocated to locals.
static const int kFirstAllocatedRegister = 2;
static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
static Register AllocatedRegister(int r) {
ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
return kAllocatedRegisters[r];
}
// The number of elements on the stack frame.
int element_count_;
TopOfStack top_of_stack_state_:3;
int register_allocation_map_:kNumberOfAllocatedRegisters;
static const int kTOSKnownSmiMapSize = 4;
unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
// The index of the element that is at the processor's stack pointer
// (the sp register). For now since everything is in memory it is given
// by the number of elements on the not-very-virtual stack frame.
int stack_pointer() { return element_count_ - 1; }
// The number of frame-allocated locals and parameters respectively.
inline int parameter_count() const;
inline int local_count() const;
// The index of the element that is at the processor's frame pointer
// (the fp register). The parameters, receiver, function, and context
// are below the frame pointer.
inline int frame_pointer() const;
// The index of the first parameter. The receiver lies below the first
// parameter.
int param0_index() { return 1; }
// The index of the context slot in the frame. It is immediately
// below the frame pointer.
inline int context_index();
// The index of the function slot in the frame. It is below the frame
// pointer and context slot.
inline int function_index();
// The index of the first local. Between the frame pointer and the
// locals lies the return address.
inline int local0_index() const;
// The index of the base of the expression stack.
inline int expression_base_index() const;
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
inline int fp_relative(int index);
// Spill all elements in registers. Spill the top spilled_args elements
// on the frame. Sync all other frame elements.
// Then drop dropped_args elements from the virtual frame, to match
// the effect of an upcoming call that will drop them from the stack.
void PrepareForCall(int spilled_args, int dropped_args);
// If all top-of-stack registers are in use then the lowest one is pushed
// onto the physical stack and made free.
void EnsureOneFreeTOSRegister();
// Emit instructions to get the top of stack state from where we are to where
// we want to be.
void MergeTOSTo(TopOfStack expected_state, Condition cond = al);
inline bool Equals(const VirtualFrame* other);
inline void LowerHeight(int count) {
element_count_ -= count;
if (count >= kTOSKnownSmiMapSize) {
tos_known_smi_map_ = 0;
} else {
tos_known_smi_map_ >>= count;
}
}
inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
ASSERT(count >= 32 || known_smi_map < (1u << count));
element_count_ += count;
if (count >= kTOSKnownSmiMapSize) {
tos_known_smi_map_ = known_smi_map;
} else {
tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
}
}
friend class JumpTarget;
};
} } // namespace v8::internal
#endif // V8_ARM_VIRTUAL_FRAME_ARM_H_

188
deps/v8/src/array.js

@ -33,7 +33,7 @@
// Global list of arrays visited during toString, toLocaleString and // Global list of arrays visited during toString, toLocaleString and
// join invocations. // join invocations.
var visited_arrays = new InternalArray(); var visited_arrays = new $Array();
// Gets a sorted array of array keys. Useful for operations on sparse // Gets a sorted array of array keys. Useful for operations on sparse
@ -67,32 +67,13 @@ function GetSortedArrayKeys(array, intervals) {
} }
function SparseJoinWithSeparator(array, len, convert, separator) {
var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
var totalLength = 0;
var elements = new InternalArray(keys.length * 2);
var previousKey = -1;
for (var i = 0; i < keys.length; i++) {
var key = keys[i];
if (key != previousKey) { // keys may contain duplicates.
var e = array[key];
if (!IS_STRING(e)) e = convert(e);
elements[i * 2] = key;
elements[i * 2 + 1] = e;
previousKey = key;
}
}
return %SparseJoinWithSeparator(elements, len, separator);
}
// Optimized for sparse arrays if separator is ''. // Optimized for sparse arrays if separator is ''.
function SparseJoin(array, len, convert) { function SparseJoin(array, len, convert) {
var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len)); var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
var last_key = -1; var last_key = -1;
var keys_length = keys.length; var keys_length = keys.length;
var elements = new InternalArray(keys_length); var elements = new $Array(keys_length);
var elements_length = 0; var elements_length = 0;
for (var i = 0; i < keys_length; i++) { for (var i = 0; i < keys_length; i++) {
@ -129,12 +110,8 @@ function Join(array, length, separator, convert) {
// Attempt to convert the elements. // Attempt to convert the elements.
try { try {
if (UseSparseVariant(array, length, is_array)) { if (UseSparseVariant(array, length, is_array) && (separator.length == 0)) {
if (separator.length == 0) {
return SparseJoin(array, length, convert); return SparseJoin(array, length, convert);
} else {
return SparseJoinWithSeparator(array, length, convert, separator);
}
} }
// Fast case for one-element arrays. // Fast case for one-element arrays.
@ -145,16 +122,18 @@ function Join(array, length, separator, convert) {
} }
// Construct an array for the elements. // Construct an array for the elements.
var elements = new InternalArray(length); var elements = new $Array(length);
// We pull the empty separator check outside the loop for speed! // We pull the empty separator check outside the loop for speed!
if (separator.length == 0) { if (separator.length == 0) {
var elements_length = 0; var elements_length = 0;
for (var i = 0; i < length; i++) { for (var i = 0; i < length; i++) {
var e = array[i]; var e = array[i];
if (!IS_UNDEFINED(e)) {
if (!IS_STRING(e)) e = convert(e); if (!IS_STRING(e)) e = convert(e);
elements[elements_length++] = e; elements[elements_length++] = e;
} }
}
elements.length = elements_length; elements.length = elements_length;
var result = %_FastAsciiArrayJoin(elements, ''); var result = %_FastAsciiArrayJoin(elements, '');
if (!IS_UNDEFINED(result)) return result; if (!IS_UNDEFINED(result)) return result;
@ -172,14 +151,13 @@ function Join(array, length, separator, convert) {
} else { } else {
for (var i = 0; i < length; i++) { for (var i = 0; i < length; i++) {
var e = array[i]; var e = array[i];
if (IS_NUMBER(e)) { if (IS_NUMBER(e)) elements[i] = %_NumberToString(e);
e = %_NumberToString(e); else {
} else if (!IS_STRING(e)) { if (!IS_STRING(e)) e = convert(e);
e = convert(e);
}
elements[i] = e; elements[i] = e;
} }
} }
}
var result = %_FastAsciiArrayJoin(elements, separator); var result = %_FastAsciiArrayJoin(elements, separator);
if (!IS_UNDEFINED(result)) return result; if (!IS_UNDEFINED(result)) return result;
@ -263,7 +241,7 @@ function SmartSlice(array, start_i, del_count, len, deleted_elements) {
// special array operations to handle sparse arrays in a sensible fashion. // special array operations to handle sparse arrays in a sensible fashion.
function SmartMove(array, start_i, del_count, len, num_additional_args) { function SmartMove(array, start_i, del_count, len, num_additional_args) {
// Move data to new array. // Move data to new array.
var new_array = new InternalArray(len - del_count + num_additional_args); var new_array = new $Array(len - del_count + num_additional_args);
var intervals = %GetArrayKeys(array, len); var intervals = %GetArrayKeys(array, len);
var length = intervals.length; var length = intervals.length;
for (var k = 0; k < length; k++) { for (var k = 0; k < length; k++) {
@ -397,11 +375,6 @@ function ArrayToLocaleString() {
function ArrayJoin(separator) { function ArrayJoin(separator) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.join"]);
}
if (IS_UNDEFINED(separator)) { if (IS_UNDEFINED(separator)) {
separator = ','; separator = ',';
} else if (!IS_STRING(separator)) { } else if (!IS_STRING(separator)) {
@ -418,11 +391,6 @@ function ArrayJoin(separator) {
// Removes the last element from the array and returns it. See // Removes the last element from the array and returns it. See
// ECMA-262, section 15.4.4.6. // ECMA-262, section 15.4.4.6.
function ArrayPop() { function ArrayPop() {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.pop"]);
}
var n = TO_UINT32(this.length); var n = TO_UINT32(this.length);
if (n == 0) { if (n == 0) {
this.length = n; this.length = n;
@ -439,11 +407,6 @@ function ArrayPop() {
// Appends the arguments to the end of the array and returns the new // Appends the arguments to the end of the array and returns the new
// length of the array. See ECMA-262, section 15.4.4.7. // length of the array. See ECMA-262, section 15.4.4.7.
function ArrayPush() { function ArrayPush() {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.push"]);
}
var n = TO_UINT32(this.length); var n = TO_UINT32(this.length);
var m = %_ArgumentsLength(); var m = %_ArgumentsLength();
for (var i = 0; i < m; i++) { for (var i = 0; i < m; i++) {
@ -455,13 +418,8 @@ function ArrayPush() {
function ArrayConcat(arg1) { // length == 1 function ArrayConcat(arg1) { // length == 1
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.concat"]);
}
var arg_count = %_ArgumentsLength(); var arg_count = %_ArgumentsLength();
var arrays = new InternalArray(1 + arg_count); var arrays = new $Array(1 + arg_count);
arrays[0] = this; arrays[0] = this;
for (var i = 0; i < arg_count; i++) { for (var i = 0; i < arg_count; i++) {
arrays[i + 1] = %_Arguments(i); arrays[i + 1] = %_Arguments(i);
@ -516,11 +474,6 @@ function SparseReverse(array, len) {
function ArrayReverse() { function ArrayReverse() {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.reverse"]);
}
var j = TO_UINT32(this.length) - 1; var j = TO_UINT32(this.length) - 1;
if (UseSparseVariant(this, j, IS_ARRAY(this))) { if (UseSparseVariant(this, j, IS_ARRAY(this))) {
@ -552,11 +505,6 @@ function ArrayReverse() {
function ArrayShift() { function ArrayShift() {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.shift"]);
}
var len = TO_UINT32(this.length); var len = TO_UINT32(this.length);
if (len === 0) { if (len === 0) {
@ -578,11 +526,6 @@ function ArrayShift() {
function ArrayUnshift(arg1) { // length == 1 function ArrayUnshift(arg1) { // length == 1
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.unshift"]);
}
var len = TO_UINT32(this.length); var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength(); var num_arguments = %_ArgumentsLength();
@ -602,11 +545,6 @@ function ArrayUnshift(arg1) { // length == 1
function ArraySlice(start, end) { function ArraySlice(start, end) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.slice"]);
}
var len = TO_UINT32(this.length); var len = TO_UINT32(this.length);
var start_i = TO_INTEGER(start); var start_i = TO_INTEGER(start);
var end_i = len; var end_i = len;
@ -631,9 +569,7 @@ function ArraySlice(start, end) {
if (end_i < start_i) return result; if (end_i < start_i) return result;
if (IS_ARRAY(this) && if (IS_ARRAY(this)) {
(end_i > 1000) &&
(%EstimateNumberOfElements(this) < end_i)) {
SmartSlice(this, start_i, end_i - start_i, len, result); SmartSlice(this, start_i, end_i - start_i, len, result);
} else { } else {
SimpleSlice(this, start_i, end_i - start_i, len, result); SimpleSlice(this, start_i, end_i - start_i, len, result);
@ -646,11 +582,6 @@ function ArraySlice(start, end) {
function ArraySplice(start, delete_count) { function ArraySplice(start, delete_count) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.splice"]);
}
var num_arguments = %_ArgumentsLength(); var num_arguments = %_ArgumentsLength();
var len = TO_UINT32(this.length); var len = TO_UINT32(this.length);
@ -722,11 +653,6 @@ function ArraySplice(start, delete_count) {
function ArraySort(comparefn) { function ArraySort(comparefn) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.sort"]);
}
// In-place QuickSort algorithm. // In-place QuickSort algorithm.
// For short (length <= 22) arrays, insertion sort is used for efficiency. // For short (length <= 22) arrays, insertion sort is used for efficiency.
@ -742,15 +668,14 @@ function ArraySort(comparefn) {
else return x < y ? -1 : 1; else return x < y ? -1 : 1;
}; };
} }
var receiver = var global_receiver = %GetGlobalReceiver();
%_IsNativeOrStrictMode(comparefn) ? void 0 : %GetGlobalReceiver();
function InsertionSort(a, from, to) { function InsertionSort(a, from, to) {
for (var i = from + 1; i < to; i++) { for (var i = from + 1; i < to; i++) {
var element = a[i]; var element = a[i];
for (var j = i - 1; j >= from; j--) { for (var j = i - 1; j >= from; j--) {
var tmp = a[j]; var tmp = a[j];
var order = %_CallFunction(receiver, tmp, element, comparefn); var order = %_CallFunction(global_receiver, tmp, element, comparefn);
if (order > 0) { if (order > 0) {
a[j + 1] = tmp; a[j + 1] = tmp;
} else { } else {
@ -772,14 +697,14 @@ function ArraySort(comparefn) {
var v1 = a[to - 1]; var v1 = a[to - 1];
var middle_index = from + ((to - from) >> 1); var middle_index = from + ((to - from) >> 1);
var v2 = a[middle_index]; var v2 = a[middle_index];
var c01 = %_CallFunction(receiver, v0, v1, comparefn); var c01 = %_CallFunction(global_receiver, v0, v1, comparefn);
if (c01 > 0) { if (c01 > 0) {
// v1 < v0, so swap them. // v1 < v0, so swap them.
var tmp = v0; var tmp = v0;
v0 = v1; v0 = v1;
v1 = tmp; v1 = tmp;
} // v0 <= v1. } // v0 <= v1.
var c02 = %_CallFunction(receiver, v0, v2, comparefn); var c02 = %_CallFunction(global_receiver, v0, v2, comparefn);
if (c02 >= 0) { if (c02 >= 0) {
// v2 <= v0 <= v1. // v2 <= v0 <= v1.
var tmp = v0; var tmp = v0;
@ -788,7 +713,7 @@ function ArraySort(comparefn) {
v1 = tmp; v1 = tmp;
} else { } else {
// v0 <= v1 && v0 < v2 // v0 <= v1 && v0 < v2
var c12 = %_CallFunction(receiver, v1, v2, comparefn); var c12 = %_CallFunction(global_receiver, v1, v2, comparefn);
if (c12 > 0) { if (c12 > 0) {
// v0 <= v2 < v1 // v0 <= v2 < v1
var tmp = v1; var tmp = v1;
@ -809,7 +734,7 @@ function ArraySort(comparefn) {
// From i to high_start are elements that haven't been compared yet. // From i to high_start are elements that haven't been compared yet.
partition: for (var i = low_end + 1; i < high_start; i++) { partition: for (var i = low_end + 1; i < high_start; i++) {
var element = a[i]; var element = a[i];
var order = %_CallFunction(receiver, element, pivot, comparefn); var order = %_CallFunction(global_receiver, element, pivot, comparefn);
if (order < 0) { if (order < 0) {
%_SwapElements(a, i, low_end); %_SwapElements(a, i, low_end);
low_end++; low_end++;
@ -818,7 +743,7 @@ function ArraySort(comparefn) {
high_start--; high_start--;
if (high_start == i) break partition; if (high_start == i) break partition;
var top_elem = a[high_start]; var top_elem = a[high_start];
order = %_CallFunction(receiver, top_elem, pivot, comparefn); order = %_CallFunction(global_receiver, top_elem, pivot, comparefn);
} while (order > 0); } while (order > 0);
%_SwapElements(a, i, high_start); %_SwapElements(a, i, high_start);
if (order < 0) { if (order < 0) {
@ -989,11 +914,6 @@ function ArraySort(comparefn) {
// preserving the semantics, since the calls to the receiver function can add // preserving the semantics, since the calls to the receiver function can add
// or delete elements from the array. // or delete elements from the array.
function ArrayFilter(f, receiver) { function ArrayFilter(f, receiver) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.filter"]);
}
if (!IS_FUNCTION(f)) { if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]); throw MakeTypeError('called_non_callable', [ f ]);
} }
@ -1005,9 +925,7 @@ function ArrayFilter(f, receiver) {
for (var i = 0; i < length; i++) { for (var i = 0; i < length; i++) {
var current = this[i]; var current = this[i];
if (!IS_UNDEFINED(current) || i in this) { if (!IS_UNDEFINED(current) || i in this) {
if (f.call(receiver, current, i, this)) { if (f.call(receiver, current, i, this)) result[result_length++] = current;
result[result_length++] = current;
}
} }
} }
return result; return result;
@ -1015,11 +933,6 @@ function ArrayFilter(f, receiver) {
function ArrayForEach(f, receiver) { function ArrayForEach(f, receiver) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.forEach"]);
}
if (!IS_FUNCTION(f)) { if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]); throw MakeTypeError('called_non_callable', [ f ]);
} }
@ -1038,11 +951,6 @@ function ArrayForEach(f, receiver) {
// Executes the function once for each element present in the // Executes the function once for each element present in the
// array until it finds one where callback returns true. // array until it finds one where callback returns true.
function ArraySome(f, receiver) { function ArraySome(f, receiver) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.some"]);
}
if (!IS_FUNCTION(f)) { if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]); throw MakeTypeError('called_non_callable', [ f ]);
} }
@ -1060,11 +968,6 @@ function ArraySome(f, receiver) {
function ArrayEvery(f, receiver) { function ArrayEvery(f, receiver) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.every"]);
}
if (!IS_FUNCTION(f)) { if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]); throw MakeTypeError('called_non_callable', [ f ]);
} }
@ -1081,36 +984,24 @@ function ArrayEvery(f, receiver) {
} }
function ArrayMap(f, receiver) { function ArrayMap(f, receiver) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.map"]);
}
if (!IS_FUNCTION(f)) { if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]); throw MakeTypeError('called_non_callable', [ f ]);
} }
// Pull out the length so that modifications to the length in the // Pull out the length so that modifications to the length in the
// loop will not affect the looping. // loop will not affect the looping.
var length = TO_UINT32(this.length); var length = TO_UINT32(this.length);
var result = new $Array(); var result = new $Array(length);
var accumulator = new InternalArray(length);
for (var i = 0; i < length; i++) { for (var i = 0; i < length; i++) {
var current = this[i]; var current = this[i];
if (!IS_UNDEFINED(current) || i in this) { if (!IS_UNDEFINED(current) || i in this) {
accumulator[i] = f.call(receiver, current, i, this); result[i] = f.call(receiver, current, i, this);
} }
} }
%MoveArrayContents(accumulator, result);
return result; return result;
} }
function ArrayIndexOf(element, index) { function ArrayIndexOf(element, index) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.indexOf"]);
}
var length = TO_UINT32(this.length); var length = TO_UINT32(this.length);
if (length == 0) return -1; if (length == 0) return -1;
if (IS_UNDEFINED(index)) { if (IS_UNDEFINED(index)) {
@ -1168,11 +1059,6 @@ function ArrayIndexOf(element, index) {
function ArrayLastIndexOf(element, index) { function ArrayLastIndexOf(element, index) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.lastIndexOf"]);
}
var length = TO_UINT32(this.length); var length = TO_UINT32(this.length);
if (length == 0) return -1; if (length == 0) return -1;
if (%_ArgumentsLength() < 2) { if (%_ArgumentsLength() < 2) {
@ -1226,11 +1112,6 @@ function ArrayLastIndexOf(element, index) {
function ArrayReduce(callback, current) { function ArrayReduce(callback, current) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.reduce"]);
}
if (!IS_FUNCTION(callback)) { if (!IS_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]); throw MakeTypeError('called_non_callable', [callback]);
} }
@ -1253,18 +1134,13 @@ function ArrayReduce(callback, current) {
for (; i < length; i++) { for (; i < length; i++) {
var element = this[i]; var element = this[i];
if (!IS_UNDEFINED(element) || i in this) { if (!IS_UNDEFINED(element) || i in this) {
current = callback.call(void 0, current, element, i, this); current = callback.call(null, current, element, i, this);
} }
} }
return current; return current;
} }
function ArrayReduceRight(callback, current) { function ArrayReduceRight(callback, current) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.reduceRight"]);
}
if (!IS_FUNCTION(callback)) { if (!IS_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]); throw MakeTypeError('called_non_callable', [callback]);
} }
@ -1284,7 +1160,7 @@ function ArrayReduceRight(callback, current) {
for (; i >= 0; i--) { for (; i >= 0; i--) {
var element = this[i]; var element = this[i];
if (!IS_UNDEFINED(element) || i in this) { if (!IS_UNDEFINED(element) || i in this) {
current = callback.call(void 0, current, element, i, this); current = callback.call(null, current, element, i, this);
} }
} }
return current; return current;
@ -1349,20 +1225,6 @@ function SetupArray() {
)); ));
%FinishArrayPrototypeSetup($Array.prototype); %FinishArrayPrototypeSetup($Array.prototype);
// The internal Array prototype doesn't need to be fancy, since it's never
// exposed to user code, so no hidden prototypes or DONT_ENUM attributes
// are necessary.
// The null __proto__ ensures that we never inherit any user created
// getters or setters from, e.g., Object.prototype.
InternalArray.prototype.__proto__ = null;
// Adding only the functions that are actually used, and a toString.
InternalArray.prototype.join = getFunction("join", ArrayJoin);
InternalArray.prototype.pop = getFunction("pop", ArrayPop);
InternalArray.prototype.push = getFunction("push", ArrayPush);
InternalArray.prototype.toString = function() {
return "Internal Array, length " + this.length;
};
} }

583
deps/v8/src/assembler.cc

@ -30,7 +30,7 @@
// The original source code covered by the above license above has been // The original source code covered by the above license above has been
// modified significantly by Google Inc. // modified significantly by Google Inc.
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2006-2009 the V8 project authors. All rights reserved.
#include "v8.h" #include "v8.h"
@ -55,8 +55,6 @@
#include "x64/regexp-macro-assembler-x64.h" #include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h" #include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/regexp-macro-assembler-mips.h"
#else // Unknown architecture. #else // Unknown architecture.
#error "Unknown architecture." #error "Unknown architecture."
#endif // Target architecture. #endif // Target architecture.
@ -69,24 +67,9 @@ namespace internal {
const double DoubleConstant::min_int = kMinInt; const double DoubleConstant::min_int = kMinInt;
const double DoubleConstant::one_half = 0.5; const double DoubleConstant::one_half = 0.5;
const double DoubleConstant::minus_zero = -0.0; const double DoubleConstant::minus_zero = -0.0;
const double DoubleConstant::uint8_max_value = 255;
const double DoubleConstant::zero = 0.0;
const double DoubleConstant::nan = OS::nan_value();
const double DoubleConstant::negative_infinity = -V8_INFINITY; const double DoubleConstant::negative_infinity = -V8_INFINITY;
const char* RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING"; const char* RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// -----------------------------------------------------------------------------
// Implementation of AssemblerBase
AssemblerBase::AssemblerBase(Isolate* isolate)
: isolate_(isolate),
jit_cookie_(0) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = V8::RandomPrivate(isolate);
}
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Implementation of Label // Implementation of Label
@ -101,85 +84,58 @@ int Label::pos() const {
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Implementation of RelocInfoWriter and RelocIterator // Implementation of RelocInfoWriter and RelocIterator
// //
// Relocation information is written backwards in memory, from high addresses
// towards low addresses, byte by byte. Therefore, in the encodings listed
// below, the first byte listed it at the highest address, and successive
// bytes in the record are at progressively lower addresses.
//
// Encoding // Encoding
// //
// The most common modes are given single-byte encodings. Also, it is // The most common modes are given single-byte encodings. Also, it is
// easy to identify the type of reloc info and skip unwanted modes in // easy to identify the type of reloc info and skip unwanted modes in
// an iteration. // an iteration.
// //
// The encoding relies on the fact that there are fewer than 14 // The encoding relies on the fact that there are less than 14
// different non-compactly encoded relocation modes. // different relocation modes.
// //
// The first byte of a relocation record has a tag in its low 2 bits: // embedded_object: [6 bits pc delta] 00
// Here are the record schemes, depending on the low tag and optional higher
// tags.
// //
// Low tag: // code_taget: [6 bits pc delta] 01
// 00: embedded_object: [6-bit pc delta] 00
// //
// 01: code_target: [6-bit pc delta] 01 // position: [6 bits pc delta] 10,
// [7 bits signed data delta] 0
// //
// 10: short_data_record: [6-bit pc delta] 10 followed by // statement_position: [6 bits pc delta] 10,
// [6-bit data delta] [2-bit data type tag] // [7 bits signed data delta] 1
// //
// 11: long_record [2-bit high tag][4 bit middle_tag] 11 // any nondata mode: 00 [4 bits rmode] 11, // rmode: 0..13 only
// followed by variable data depending on type. // 00 [6 bits pc delta]
// //
// 2-bit data type tags, used in short_data_record and data_jump long_record: // pc-jump: 00 1111 11,
// code_target_with_id: 00 // 00 [6 bits pc delta]
// position: 01
// statement_position: 10
// comment: 11 (not used in short_data_record)
// //
// Long record format: // pc-jump: 01 1111 11,
// 4-bit middle_tag: // (variable length) 7 - 26 bit pc delta, written in chunks of 7
// 0000 - 1100 : Short record for RelocInfo::Mode middle_tag + 2 // bits, the lowest 7 bits written first.
// (The middle_tag encodes rmode - RelocInfo::LAST_COMPACT_ENUM,
// and is between 0000 and 1100)
// The format is:
// 00 [4 bit middle_tag] 11 followed by
// 00 [6 bit pc delta]
// //
// 1101: not used (would allow one more relocation mode to be added) // data-jump + pos: 00 1110 11,
// 1110: long_data_record // signed intptr_t, lowest byte written first
// The format is: [2-bit data_type_tag] 1110 11 //
// data-jump + st.pos: 01 1110 11,
// signed intptr_t, lowest byte written first
//
// data-jump + comm.: 10 1110 11,
// signed intptr_t, lowest byte written first // signed intptr_t, lowest byte written first
// (except data_type code_target_with_id, which
// is followed by a signed int, not intptr_t.)
// //
// 1111: long_pc_jump
// The format is:
// pc-jump: 00 1111 11,
// 00 [6 bits pc delta]
// or
// pc-jump (variable length):
// 01 1111 11,
// [7 bits data] 0
// ...
// [7 bits data] 1
// (Bits 6..31 of pc delta, with leading zeroes
// dropped, and last non-zero chunk tagged with 1.)
const int kMaxRelocModes = 14; const int kMaxRelocModes = 14;
const int kTagBits = 2; const int kTagBits = 2;
const int kTagMask = (1 << kTagBits) - 1; const int kTagMask = (1 << kTagBits) - 1;
const int kExtraTagBits = 4; const int kExtraTagBits = 4;
const int kLocatableTypeTagBits = 2; const int kPositionTypeTagBits = 1;
const int kSmallDataBits = kBitsPerByte - kLocatableTypeTagBits; const int kSmallDataBits = kBitsPerByte - kPositionTypeTagBits;
const int kEmbeddedObjectTag = 0; const int kEmbeddedObjectTag = 0;
const int kCodeTargetTag = 1; const int kCodeTargetTag = 1;
const int kLocatableTag = 2; const int kPositionTag = 2;
const int kDefaultTag = 3; const int kDefaultTag = 3;
const int kPCJumpExtraTag = (1 << kExtraTagBits) - 1; const int kPCJumpTag = (1 << kExtraTagBits) - 1;
const int kSmallPCDeltaBits = kBitsPerByte - kTagBits; const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1; const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
@ -193,12 +149,11 @@ const int kLastChunkTagMask = 1;
const int kLastChunkTag = 1; const int kLastChunkTag = 1;
const int kDataJumpExtraTag = kPCJumpExtraTag - 1; const int kDataJumpTag = kPCJumpTag - 1;
const int kCodeWithIdTag = 0; const int kNonstatementPositionTag = 0;
const int kNonstatementPositionTag = 1; const int kStatementPositionTag = 1;
const int kStatementPositionTag = 2; const int kCommentTag = 2;
const int kCommentTag = 3;
uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) { uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
@ -206,7 +161,7 @@ uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
// Otherwise write a variable length PC jump for the bits that do // Otherwise write a variable length PC jump for the bits that do
// not fit in the kSmallPCDeltaBits bits. // not fit in the kSmallPCDeltaBits bits.
if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta; if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
WriteExtraTag(kPCJumpExtraTag, kVariableLengthPCJumpTopTag); WriteExtraTag(kPCJumpTag, kVariableLengthPCJumpTopTag);
uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits; uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
ASSERT(pc_jump > 0); ASSERT(pc_jump > 0);
// Write kChunkBits size chunks of the pc_jump. // Write kChunkBits size chunks of the pc_jump.
@ -229,7 +184,7 @@ void RelocInfoWriter::WriteTaggedPC(uint32_t pc_delta, int tag) {
void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) { void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) {
*--pos_ = static_cast<byte>(data_delta << kLocatableTypeTagBits | tag); *--pos_ = static_cast<byte>(data_delta << kPositionTypeTagBits | tag);
} }
@ -248,17 +203,8 @@ void RelocInfoWriter::WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag) {
} }
void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
WriteExtraTag(kDataJumpExtraTag, top_tag);
for (int i = 0; i < kIntSize; i++) {
*--pos_ = static_cast<byte>(data_delta);
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
data_delta = data_delta >> kBitsPerByte;
}
}
void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) { void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
WriteExtraTag(kDataJumpExtraTag, top_tag); WriteExtraTag(kDataJumpTag, top_tag);
for (int i = 0; i < kIntptrSize; i++) { for (int i = 0; i < kIntptrSize; i++) {
*--pos_ = static_cast<byte>(data_delta); *--pos_ = static_cast<byte>(data_delta);
// Signed right shift is arithmetic shift. Tested in test-utils.cc. // Signed right shift is arithmetic shift. Tested in test-utils.cc.
@ -271,9 +217,9 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
#ifdef DEBUG #ifdef DEBUG
byte* begin_pos = pos_; byte* begin_pos = pos_;
#endif #endif
Counters::reloc_info_count.Increment();
ASSERT(rinfo->pc() - last_pc_ >= 0); ASSERT(rinfo->pc() - last_pc_ >= 0);
ASSERT(RelocInfo::NUMBER_OF_MODES - RelocInfo::LAST_COMPACT_ENUM <= ASSERT(RelocInfo::NUMBER_OF_MODES <= kMaxRelocModes);
kMaxRelocModes);
// Use unsigned delta-encoding for pc. // Use unsigned delta-encoding for pc.
uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_); uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
RelocInfo::Mode rmode = rinfo->rmode(); RelocInfo::Mode rmode = rinfo->rmode();
@ -284,48 +230,35 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
} else if (rmode == RelocInfo::CODE_TARGET) { } else if (rmode == RelocInfo::CODE_TARGET) {
WriteTaggedPC(pc_delta, kCodeTargetTag); WriteTaggedPC(pc_delta, kCodeTargetTag);
ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize); ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
} else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
// Use signed delta-encoding for id.
ASSERT(static_cast<int>(rinfo->data()) == rinfo->data());
int id_delta = static_cast<int>(rinfo->data()) - last_id_;
// Check if delta is small enough to fit in a tagged byte.
if (is_intn(id_delta, kSmallDataBits)) {
WriteTaggedPC(pc_delta, kLocatableTag);
WriteTaggedData(id_delta, kCodeWithIdTag);
} else {
// Otherwise, use costly encoding.
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
WriteExtraTaggedIntData(id_delta, kCodeWithIdTag);
}
last_id_ = static_cast<int>(rinfo->data());
} else if (RelocInfo::IsPosition(rmode)) { } else if (RelocInfo::IsPosition(rmode)) {
// Use signed delta-encoding for position. // Use signed delta-encoding for data.
ASSERT(static_cast<int>(rinfo->data()) == rinfo->data()); intptr_t data_delta = rinfo->data() - last_data_;
int pos_delta = static_cast<int>(rinfo->data()) - last_position_; int pos_type_tag = rmode == RelocInfo::POSITION ? kNonstatementPositionTag
int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag
: kStatementPositionTag; : kStatementPositionTag;
// Check if delta is small enough to fit in a tagged byte. // Check if data is small enough to fit in a tagged byte.
if (is_intn(pos_delta, kSmallDataBits)) { // We cannot use is_intn because data_delta is not an int32_t.
WriteTaggedPC(pc_delta, kLocatableTag); if (data_delta >= -(1 << (kSmallDataBits-1)) &&
WriteTaggedData(pos_delta, pos_type_tag); data_delta < 1 << (kSmallDataBits-1)) {
WriteTaggedPC(pc_delta, kPositionTag);
WriteTaggedData(data_delta, pos_type_tag);
last_data_ = rinfo->data();
} else { } else {
// Otherwise, use costly encoding. // Otherwise, use costly encoding.
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag); WriteExtraTaggedPC(pc_delta, kPCJumpTag);
WriteExtraTaggedIntData(pos_delta, pos_type_tag); WriteExtraTaggedData(data_delta, pos_type_tag);
last_data_ = rinfo->data();
} }
last_position_ = static_cast<int>(rinfo->data());
} else if (RelocInfo::IsComment(rmode)) { } else if (RelocInfo::IsComment(rmode)) {
// Comments are normally not generated, so we use the costly encoding. // Comments are normally not generated, so we use the costly encoding.
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag); WriteExtraTaggedPC(pc_delta, kPCJumpTag);
WriteExtraTaggedData(rinfo->data(), kCommentTag); WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag);
last_data_ = rinfo->data();
ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize); ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
} else { } else {
ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM);
int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
// For all other modes we simply use the mode as the extra tag. // For all other modes we simply use the mode as the extra tag.
// None of these modes need a data component. // None of these modes need a data component.
ASSERT(saved_mode < kPCJumpExtraTag && saved_mode < kDataJumpExtraTag); ASSERT(rmode < kPCJumpTag && rmode < kDataJumpTag);
WriteExtraTaggedPC(pc_delta, saved_mode); WriteExtraTaggedPC(pc_delta, rmode);
} }
last_pc_ = rinfo->pc(); last_pc_ = rinfo->pc();
#ifdef DEBUG #ifdef DEBUG
@ -359,32 +292,12 @@ inline void RelocIterator::AdvanceReadPC() {
} }
void RelocIterator::AdvanceReadId() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
}
last_id_ += x;
rinfo_.data_ = last_id_;
}
void RelocIterator::AdvanceReadPosition() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
}
last_position_ += x;
rinfo_.data_ = last_position_;
}
void RelocIterator::AdvanceReadData() { void RelocIterator::AdvanceReadData() {
intptr_t x = 0; intptr_t x = 0;
for (int i = 0; i < kIntptrSize; i++) { for (int i = 0; i < kIntptrSize; i++) {
x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte; x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte;
} }
rinfo_.data_ = x; rinfo_.data_ += x;
} }
@ -404,33 +317,27 @@ void RelocIterator::AdvanceReadVariableLengthPCJump() {
} }
inline int RelocIterator::GetLocatableTypeTag() { inline int RelocIterator::GetPositionTypeTag() {
return *pos_ & ((1 << kLocatableTypeTagBits) - 1); return *pos_ & ((1 << kPositionTypeTagBits) - 1);
} }
inline void RelocIterator::ReadTaggedId() { inline void RelocIterator::ReadTaggedData() {
int8_t signed_b = *pos_; int8_t signed_b = *pos_;
// Signed right shift is arithmetic shift. Tested in test-utils.cc. // Signed right shift is arithmetic shift. Tested in test-utils.cc.
last_id_ += signed_b >> kLocatableTypeTagBits; rinfo_.data_ += signed_b >> kPositionTypeTagBits;
rinfo_.data_ = last_id_;
} }
inline void RelocIterator::ReadTaggedPosition() { inline RelocInfo::Mode RelocIterator::DebugInfoModeFromTag(int tag) {
int8_t signed_b = *pos_; if (tag == kStatementPositionTag) {
// Signed right shift is arithmetic shift. Tested in test-utils.cc. return RelocInfo::STATEMENT_POSITION;
last_position_ += signed_b >> kLocatableTypeTagBits; } else if (tag == kNonstatementPositionTag) {
rinfo_.data_ = last_position_; return RelocInfo::POSITION;
} else {
ASSERT(tag == kCommentTag);
return RelocInfo::COMMENT;
} }
static inline RelocInfo::Mode GetPositionModeFromTag(int tag) {
ASSERT(tag == kNonstatementPositionTag ||
tag == kStatementPositionTag);
return (tag == kNonstatementPositionTag) ?
RelocInfo::POSITION :
RelocInfo::STATEMENT_POSITION;
} }
@ -449,64 +356,37 @@ void RelocIterator::next() {
} else if (tag == kCodeTargetTag) { } else if (tag == kCodeTargetTag) {
ReadTaggedPC(); ReadTaggedPC();
if (SetMode(RelocInfo::CODE_TARGET)) return; if (SetMode(RelocInfo::CODE_TARGET)) return;
} else if (tag == kLocatableTag) { } else if (tag == kPositionTag) {
ReadTaggedPC(); ReadTaggedPC();
Advance(); Advance();
int locatable_tag = GetLocatableTypeTag(); // Check if we want source positions.
if (locatable_tag == kCodeWithIdTag) {
if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
ReadTaggedId();
return;
}
} else {
// Compact encoding is never used for comments,
// so it must be a position.
ASSERT(locatable_tag == kNonstatementPositionTag ||
locatable_tag == kStatementPositionTag);
if (mode_mask_ & RelocInfo::kPositionMask) { if (mode_mask_ & RelocInfo::kPositionMask) {
ReadTaggedPosition(); ReadTaggedData();
if (SetMode(GetPositionModeFromTag(locatable_tag))) return; if (SetMode(DebugInfoModeFromTag(GetPositionTypeTag()))) return;
}
} }
} else { } else {
ASSERT(tag == kDefaultTag); ASSERT(tag == kDefaultTag);
int extra_tag = GetExtraTag(); int extra_tag = GetExtraTag();
if (extra_tag == kPCJumpExtraTag) { if (extra_tag == kPCJumpTag) {
int top_tag = GetTopTag(); int top_tag = GetTopTag();
if (top_tag == kVariableLengthPCJumpTopTag) { if (top_tag == kVariableLengthPCJumpTopTag) {
AdvanceReadVariableLengthPCJump(); AdvanceReadVariableLengthPCJump();
} else { } else {
AdvanceReadPC(); AdvanceReadPC();
} }
} else if (extra_tag == kDataJumpExtraTag) { } else if (extra_tag == kDataJumpTag) {
int locatable_tag = GetTopTag(); // Check if we want debug modes (the only ones with data).
if (locatable_tag == kCodeWithIdTag) { if (mode_mask_ & RelocInfo::kDebugMask) {
if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) { int top_tag = GetTopTag();
AdvanceReadId();
return;
}
Advance(kIntSize);
} else if (locatable_tag != kCommentTag) {
ASSERT(locatable_tag == kNonstatementPositionTag ||
locatable_tag == kStatementPositionTag);
if (mode_mask_ & RelocInfo::kPositionMask) {
AdvanceReadPosition();
if (SetMode(GetPositionModeFromTag(locatable_tag))) return;
} else {
Advance(kIntSize);
}
} else {
ASSERT(locatable_tag == kCommentTag);
if (SetMode(RelocInfo::COMMENT)) {
AdvanceReadData(); AdvanceReadData();
return; if (SetMode(DebugInfoModeFromTag(top_tag))) return;
} } else {
// Otherwise, just skip over the data.
Advance(kIntptrSize); Advance(kIntptrSize);
} }
} else { } else {
AdvanceReadPC(); AdvanceReadPC();
int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM; if (SetMode(static_cast<RelocInfo::Mode>(extra_tag))) return;
if (SetMode(static_cast<RelocInfo::Mode>(rmode))) return;
} }
} }
} }
@ -522,8 +402,6 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
end_ = code->relocation_start(); end_ = code->relocation_start();
done_ = false; done_ = false;
mode_mask_ = mode_mask; mode_mask_ = mode_mask;
last_id_ = 0;
last_position_ = 0;
if (mode_mask_ == 0) pos_ = end_; if (mode_mask_ == 0) pos_ = end_;
next(); next();
} }
@ -537,8 +415,6 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
end_ = pos_ - desc.reloc_size; end_ = pos_ - desc.reloc_size;
done_ = false; done_ = false;
mode_mask_ = mode_mask; mode_mask_ = mode_mask;
last_id_ = 0;
last_position_ = 0;
if (mode_mask_ == 0) pos_ = end_; if (mode_mask_ == 0) pos_ = end_;
next(); next();
} }
@ -566,8 +442,6 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "debug break"; return "debug break";
case RelocInfo::CODE_TARGET: case RelocInfo::CODE_TARGET:
return "code target"; return "code target";
case RelocInfo::CODE_TARGET_WITH_ID:
return "code target with id";
case RelocInfo::GLOBAL_PROPERTY_CELL: case RelocInfo::GLOBAL_PROPERTY_CELL:
return "global property cell"; return "global property cell";
case RelocInfo::RUNTIME_ENTRY: case RelocInfo::RUNTIME_ENTRY:
@ -614,13 +488,9 @@ void RelocInfo::Print(FILE* out) {
Code* code = Code::GetCodeFromTargetAddress(target_address()); Code* code = Code::GetCodeFromTargetAddress(target_address());
PrintF(out, " (%s) (%p)", Code::Kind2String(code->kind()), PrintF(out, " (%s) (%p)", Code::Kind2String(code->kind()),
target_address()); target_address());
if (rmode_ == CODE_TARGET_WITH_ID) {
PrintF(" (id=%d)", static_cast<int>(data_));
}
} else if (IsPosition(rmode_)) { } else if (IsPosition(rmode_)) {
PrintF(out, " (%" V8_PTR_PREFIX "d)", data()); PrintF(out, " (%" V8_PTR_PREFIX "d)", data());
} else if (rmode_ == RelocInfo::RUNTIME_ENTRY && } else if (rmode_ == RelocInfo::RUNTIME_ENTRY) {
Isolate::Current()->deoptimizer_data() != NULL) {
// Depotimization bailouts are stored as runtime entries. // Depotimization bailouts are stored as runtime entries.
int id = Deoptimizer::GetDeoptimizationId( int id = Deoptimizer::GetDeoptimizationId(
target_address(), Deoptimizer::EAGER); target_address(), Deoptimizer::EAGER);
@ -650,14 +520,13 @@ void RelocInfo::Verify() {
#endif #endif
case CONSTRUCT_CALL: case CONSTRUCT_CALL:
case CODE_TARGET_CONTEXT: case CODE_TARGET_CONTEXT:
case CODE_TARGET_WITH_ID:
case CODE_TARGET: { case CODE_TARGET: {
// convert inline target address to code object // convert inline target address to code object
Address addr = target_address(); Address addr = target_address();
ASSERT(addr != NULL); ASSERT(addr != NULL);
// Check that we can find the right code object. // Check that we can find the right code object.
Code* code = Code::GetCodeFromTargetAddress(addr); Code* code = Code::GetCodeFromTargetAddress(addr);
Object* found = HEAP->FindCodeObject(addr); Object* found = Heap::FindCodeObject(addr);
ASSERT(found->IsCode()); ASSERT(found->IsCode());
ASSERT(code->address() == HeapObject::cast(found)->address()); ASSERT(code->address() == HeapObject::cast(found)->address());
break; break;
@ -683,184 +552,153 @@ void RelocInfo::Verify() {
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Implementation of ExternalReference // Implementation of ExternalReference
ExternalReference::ExternalReference(Builtins::CFunctionId id, Isolate* isolate) ExternalReference::ExternalReference(Builtins::CFunctionId id)
: address_(Redirect(isolate, Builtins::c_function_address(id))) {} : address_(Redirect(Builtins::c_function_address(id))) {}
ExternalReference::ExternalReference( ExternalReference::ExternalReference(
ApiFunction* fun, ApiFunction* fun, Type type = ExternalReference::BUILTIN_CALL)
Type type = ExternalReference::BUILTIN_CALL, : address_(Redirect(fun->address(), type)) {}
Isolate* isolate = NULL)
: address_(Redirect(isolate, fun->address(), type)) {}
ExternalReference::ExternalReference(Builtins::Name name, Isolate* isolate) ExternalReference::ExternalReference(Builtins::Name name)
: address_(isolate->builtins()->builtin_address(name)) {} : address_(Builtins::builtin_address(name)) {}
ExternalReference::ExternalReference(Runtime::FunctionId id, ExternalReference::ExternalReference(Runtime::FunctionId id)
Isolate* isolate) : address_(Redirect(Runtime::FunctionForId(id)->entry)) {}
: address_(Redirect(isolate, Runtime::FunctionForId(id)->entry)) {}
ExternalReference::ExternalReference(const Runtime::Function* f, ExternalReference::ExternalReference(Runtime::Function* f)
Isolate* isolate) : address_(Redirect(f->entry)) {}
: address_(Redirect(isolate, f->entry)) {}
ExternalReference ExternalReference::isolate_address() { ExternalReference::ExternalReference(const IC_Utility& ic_utility)
return ExternalReference(Isolate::Current()); : address_(Redirect(ic_utility.address())) {}
}
ExternalReference::ExternalReference(const IC_Utility& ic_utility,
Isolate* isolate)
: address_(Redirect(isolate, ic_utility.address())) {}
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference::ExternalReference(const Debug_Address& debug_address, ExternalReference::ExternalReference(const Debug_Address& debug_address)
Isolate* isolate) : address_(debug_address.address()) {}
: address_(debug_address.address(isolate)) {}
#endif #endif
ExternalReference::ExternalReference(StatsCounter* counter) ExternalReference::ExternalReference(StatsCounter* counter)
: address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {} : address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
ExternalReference::ExternalReference(Isolate::AddressId id, Isolate* isolate) ExternalReference::ExternalReference(Top::AddressId id)
: address_(isolate->get_address_from_id(id)) {} : address_(Top::get_address_from_id(id)) {}
ExternalReference::ExternalReference(const SCTableReference& table_ref) ExternalReference::ExternalReference(const SCTableReference& table_ref)
: address_(table_ref.address()) {} : address_(table_ref.address()) {}
ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) { ExternalReference ExternalReference::perform_gc_function() {
return ExternalReference(Redirect(isolate, return ExternalReference(Redirect(FUNCTION_ADDR(Runtime::PerformGC)));
FUNCTION_ADDR(Runtime::PerformGC)));
} }
ExternalReference ExternalReference::fill_heap_number_with_random_function( ExternalReference ExternalReference::fill_heap_number_with_random_function() {
Isolate* isolate) { return
return ExternalReference(Redirect( ExternalReference(Redirect(FUNCTION_ADDR(V8::FillHeapNumberWithRandom)));
isolate,
FUNCTION_ADDR(V8::FillHeapNumberWithRandom)));
} }
ExternalReference ExternalReference::delete_handle_scope_extensions( ExternalReference ExternalReference::delete_handle_scope_extensions() {
Isolate* isolate) { return ExternalReference(Redirect(FUNCTION_ADDR(
return ExternalReference(Redirect( HandleScope::DeleteExtensions)));
isolate,
FUNCTION_ADDR(HandleScope::DeleteExtensions)));
} }
ExternalReference ExternalReference::random_uint32_function( ExternalReference ExternalReference::random_uint32_function() {
Isolate* isolate) { return ExternalReference(Redirect(FUNCTION_ADDR(V8::Random)));
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(V8::Random)));
} }
ExternalReference ExternalReference::transcendental_cache_array_address( ExternalReference ExternalReference::transcendental_cache_array_address() {
Isolate* isolate) { return ExternalReference(TranscendentalCache::cache_array_address());
return ExternalReference(
isolate->transcendental_cache()->cache_array_address());
} }
ExternalReference ExternalReference::new_deoptimizer_function( ExternalReference ExternalReference::new_deoptimizer_function() {
Isolate* isolate) {
return ExternalReference( return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(Deoptimizer::New))); Redirect(FUNCTION_ADDR(Deoptimizer::New)));
} }
ExternalReference ExternalReference::compute_output_frames_function( ExternalReference ExternalReference::compute_output_frames_function() {
Isolate* isolate) {
return ExternalReference( return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames))); Redirect(FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
} }
ExternalReference ExternalReference::global_contexts_list(Isolate* isolate) { ExternalReference ExternalReference::global_contexts_list() {
return ExternalReference(isolate->heap()->global_contexts_list_address()); return ExternalReference(Heap::global_contexts_list_address());
} }
ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) { ExternalReference ExternalReference::keyed_lookup_cache_keys() {
return ExternalReference(isolate->keyed_lookup_cache()->keys_address()); return ExternalReference(KeyedLookupCache::keys_address());
} }
ExternalReference ExternalReference::keyed_lookup_cache_field_offsets( ExternalReference ExternalReference::keyed_lookup_cache_field_offsets() {
Isolate* isolate) { return ExternalReference(KeyedLookupCache::field_offsets_address());
return ExternalReference(
isolate->keyed_lookup_cache()->field_offsets_address());
} }
ExternalReference ExternalReference::the_hole_value_location(Isolate* isolate) { ExternalReference ExternalReference::the_hole_value_location() {
return ExternalReference(isolate->factory()->the_hole_value().location()); return ExternalReference(Factory::the_hole_value().location());
} }
ExternalReference ExternalReference::arguments_marker_location( ExternalReference ExternalReference::arguments_marker_location() {
Isolate* isolate) { return ExternalReference(Factory::arguments_marker().location());
return ExternalReference(isolate->factory()->arguments_marker().location());
} }
ExternalReference ExternalReference::roots_address(Isolate* isolate) { ExternalReference ExternalReference::roots_address() {
return ExternalReference(isolate->heap()->roots_address()); return ExternalReference(Heap::roots_address());
} }
ExternalReference ExternalReference::address_of_stack_limit(Isolate* isolate) { ExternalReference ExternalReference::address_of_stack_limit() {
return ExternalReference(isolate->stack_guard()->address_of_jslimit()); return ExternalReference(StackGuard::address_of_jslimit());
} }
ExternalReference ExternalReference::address_of_real_stack_limit( ExternalReference ExternalReference::address_of_real_stack_limit() {
Isolate* isolate) { return ExternalReference(StackGuard::address_of_real_jslimit());
return ExternalReference(isolate->stack_guard()->address_of_real_jslimit());
} }
ExternalReference ExternalReference::address_of_regexp_stack_limit( ExternalReference ExternalReference::address_of_regexp_stack_limit() {
Isolate* isolate) { return ExternalReference(RegExpStack::limit_address());
return ExternalReference(isolate->regexp_stack()->limit_address());
} }
ExternalReference ExternalReference::new_space_start(Isolate* isolate) { ExternalReference ExternalReference::new_space_start() {
return ExternalReference(isolate->heap()->NewSpaceStart()); return ExternalReference(Heap::NewSpaceStart());
} }
ExternalReference ExternalReference::new_space_mask(Isolate* isolate) { ExternalReference ExternalReference::new_space_mask() {
Address mask = reinterpret_cast<Address>(isolate->heap()->NewSpaceMask()); return ExternalReference(reinterpret_cast<Address>(Heap::NewSpaceMask()));
return ExternalReference(mask);
} }
ExternalReference ExternalReference::new_space_allocation_top_address( ExternalReference ExternalReference::new_space_allocation_top_address() {
Isolate* isolate) { return ExternalReference(Heap::NewSpaceAllocationTopAddress());
return ExternalReference(isolate->heap()->NewSpaceAllocationTopAddress());
} }
ExternalReference ExternalReference::heap_always_allocate_scope_depth( ExternalReference ExternalReference::heap_always_allocate_scope_depth() {
Isolate* isolate) { return ExternalReference(Heap::always_allocate_scope_depth_address());
Heap* heap = isolate->heap();
return ExternalReference(heap->always_allocate_scope_depth_address());
} }
ExternalReference ExternalReference::new_space_allocation_limit_address( ExternalReference ExternalReference::new_space_allocation_limit_address() {
Isolate* isolate) { return ExternalReference(Heap::NewSpaceAllocationLimitAddress());
return ExternalReference(isolate->heap()->NewSpaceAllocationLimitAddress());
} }
@ -879,9 +717,8 @@ ExternalReference ExternalReference::handle_scope_limit_address() {
} }
ExternalReference ExternalReference::scheduled_exception_address( ExternalReference ExternalReference::scheduled_exception_address() {
Isolate* isolate) { return ExternalReference(Top::scheduled_exception_address());
return ExternalReference(isolate->scheduled_exception_address());
} }
@ -903,34 +740,15 @@ ExternalReference ExternalReference::address_of_minus_zero() {
} }
ExternalReference ExternalReference::address_of_zero() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::zero)));
}
ExternalReference ExternalReference::address_of_uint8_max_value() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::uint8_max_value)));
}
ExternalReference ExternalReference::address_of_negative_infinity() { ExternalReference ExternalReference::address_of_negative_infinity() {
return ExternalReference(reinterpret_cast<void*>( return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::negative_infinity))); const_cast<double*>(&DoubleConstant::negative_infinity)));
} }
ExternalReference ExternalReference::address_of_nan() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::nan)));
}
#ifndef V8_INTERPRETED_REGEXP #ifndef V8_INTERPRETED_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state( ExternalReference ExternalReference::re_check_stack_guard_state() {
Isolate* isolate) {
Address function; Address function;
#ifdef V8_TARGET_ARCH_X64 #ifdef V8_TARGET_ARCH_X64
function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState); function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
@ -938,23 +756,19 @@ ExternalReference ExternalReference::re_check_stack_guard_state(
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState); function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM #elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState); function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#else #else
UNREACHABLE(); UNREACHABLE();
#endif #endif
return ExternalReference(Redirect(isolate, function)); return ExternalReference(Redirect(function));
} }
ExternalReference ExternalReference::re_grow_stack(Isolate* isolate) { ExternalReference ExternalReference::re_grow_stack() {
return ExternalReference( return ExternalReference(
Redirect(isolate, FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack))); Redirect(FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
} }
ExternalReference ExternalReference::re_case_insensitive_compare_uc16( ExternalReference ExternalReference::re_case_insensitive_compare_uc16() {
Isolate* isolate) {
return ExternalReference(Redirect( return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16))); FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
} }
@ -963,21 +777,16 @@ ExternalReference ExternalReference::re_word_character_map() {
NativeRegExpMacroAssembler::word_character_map_address()); NativeRegExpMacroAssembler::word_character_map_address());
} }
ExternalReference ExternalReference::address_of_static_offsets_vector( ExternalReference ExternalReference::address_of_static_offsets_vector() {
Isolate* isolate) { return ExternalReference(OffsetsVector::static_offsets_vector_address());
return ExternalReference(
OffsetsVector::static_offsets_vector_address(isolate));
} }
ExternalReference ExternalReference::address_of_regexp_stack_memory_address( ExternalReference ExternalReference::address_of_regexp_stack_memory_address() {
Isolate* isolate) { return ExternalReference(RegExpStack::memory_address());
return ExternalReference(
isolate->regexp_stack()->memory_address());
} }
ExternalReference ExternalReference::address_of_regexp_stack_memory_size( ExternalReference ExternalReference::address_of_regexp_stack_memory_size() {
Isolate* isolate) { return ExternalReference(RegExpStack::memory_size_address());
return ExternalReference(isolate->regexp_stack()->memory_size_address());
} }
#endif // V8_INTERPRETED_REGEXP #endif // V8_INTERPRETED_REGEXP
@ -1008,45 +817,6 @@ static double mod_two_doubles(double x, double y) {
} }
static double math_sin_double(double x) {
return sin(x);
}
static double math_cos_double(double x) {
return cos(x);
}
static double math_log_double(double x) {
return log(x);
}
ExternalReference ExternalReference::math_sin_double_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
FUNCTION_ADDR(math_sin_double),
BUILTIN_FP_CALL));
}
ExternalReference ExternalReference::math_cos_double_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
FUNCTION_ADDR(math_cos_double),
BUILTIN_FP_CALL));
}
ExternalReference ExternalReference::math_log_double_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
FUNCTION_ADDR(math_log_double),
BUILTIN_FP_CALL));
}
// Helper function to compute x^y, where y is known to be an // Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of // integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry // multiplications; see the discussion in "Hacker's Delight" by Henry
@ -1082,19 +852,15 @@ double power_double_double(double x, double y) {
} }
ExternalReference ExternalReference::power_double_double_function( ExternalReference ExternalReference::power_double_double_function() {
Isolate* isolate) { return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double),
return ExternalReference(Redirect(isolate, FP_RETURN_CALL));
FUNCTION_ADDR(power_double_double),
BUILTIN_FP_FP_CALL));
} }
ExternalReference ExternalReference::power_double_int_function( ExternalReference ExternalReference::power_double_int_function() {
Isolate* isolate) { return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int),
return ExternalReference(Redirect(isolate, FP_RETURN_CALL));
FUNCTION_ADDR(power_double_int),
BUILTIN_FP_INT_CALL));
} }
@ -1105,7 +871,7 @@ static int native_compare_doubles(double y, double x) {
ExternalReference ExternalReference::double_fp_operation( ExternalReference ExternalReference::double_fp_operation(
Token::Value operation, Isolate* isolate) { Token::Value operation) {
typedef double BinaryFPOperation(double x, double y); typedef double BinaryFPOperation(double x, double y);
BinaryFPOperation* function = NULL; BinaryFPOperation* function = NULL;
switch (operation) { switch (operation) {
@ -1127,28 +893,29 @@ ExternalReference ExternalReference::double_fp_operation(
default: default:
UNREACHABLE(); UNREACHABLE();
} }
return ExternalReference(Redirect(isolate, // Passing true as 2nd parameter indicates that they return an fp value.
FUNCTION_ADDR(function), return ExternalReference(Redirect(FUNCTION_ADDR(function), FP_RETURN_CALL));
BUILTIN_FP_FP_CALL));
} }
ExternalReference ExternalReference::compare_doubles(Isolate* isolate) { ExternalReference ExternalReference::compare_doubles() {
return ExternalReference(Redirect(isolate, return ExternalReference(Redirect(FUNCTION_ADDR(native_compare_doubles),
FUNCTION_ADDR(native_compare_doubles), BUILTIN_CALL));
BUILTIN_COMPARE_CALL));
} }
ExternalReference::ExternalReferenceRedirector*
ExternalReference::redirector_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference ExternalReference::debug_break(Isolate* isolate) { ExternalReference ExternalReference::debug_break() {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug_Break))); return ExternalReference(Redirect(FUNCTION_ADDR(Debug::Break)));
} }
ExternalReference ExternalReference::debug_step_in_fp_address( ExternalReference ExternalReference::debug_step_in_fp_address() {
Isolate* isolate) { return ExternalReference(Debug::step_in_fp_addr());
return ExternalReference(isolate->debug()->step_in_fp_addr());
} }
#endif #endif

324
deps/v8/src/assembler.h

@ -30,34 +30,19 @@
// The original source code covered by the above license above has been // The original source code covered by the above license above has been
// modified significantly by Google Inc. // modified significantly by Google Inc.
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2006-2009 the V8 project authors. All rights reserved.
#ifndef V8_ASSEMBLER_H_ #ifndef V8_ASSEMBLER_H_
#define V8_ASSEMBLER_H_ #define V8_ASSEMBLER_H_
#include "allocation.h"
#include "gdb-jit.h" #include "gdb-jit.h"
#include "runtime.h" #include "runtime.h"
#include "top.h"
#include "token.h" #include "token.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
const unsigned kNoASTId = -1;
// -----------------------------------------------------------------------------
// Platform independent assembler base class.
class AssemblerBase: public Malloced {
public:
explicit AssemblerBase(Isolate* isolate);
Isolate* isolate() const { return isolate_; }
int jit_cookie() { return jit_cookie_; }
private:
Isolate* isolate_;
int jit_cookie_;
};
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Common double constants. // Common double constants.
@ -67,10 +52,7 @@ class DoubleConstant: public AllStatic {
static const double min_int; static const double min_int;
static const double one_half; static const double one_half;
static const double minus_zero; static const double minus_zero;
static const double zero;
static const double uint8_max_value;
static const double negative_infinity; static const double negative_infinity;
static const double nan;
}; };
@ -82,32 +64,18 @@ class DoubleConstant: public AllStatic {
class Label BASE_EMBEDDED { class Label BASE_EMBEDDED {
public: public:
enum Distance { INLINE(Label()) { Unuse(); }
kNear, kFar INLINE(~Label()) { ASSERT(!is_linked()); }
};
INLINE(Label()) {
Unuse();
UnuseNear();
}
INLINE(~Label()) {
ASSERT(!is_linked());
ASSERT(!is_near_linked());
}
INLINE(void Unuse()) { pos_ = 0; } INLINE(void Unuse()) { pos_ = 0; }
INLINE(void UnuseNear()) { near_link_pos_ = 0; }
INLINE(bool is_bound() const) { return pos_ < 0; } INLINE(bool is_bound() const) { return pos_ < 0; }
INLINE(bool is_unused() const) { return pos_ == 0 && near_link_pos_ == 0; } INLINE(bool is_unused() const) { return pos_ == 0; }
INLINE(bool is_linked() const) { return pos_ > 0; } INLINE(bool is_linked() const) { return pos_ > 0; }
INLINE(bool is_near_linked() const) { return near_link_pos_ > 0; }
// Returns the position of bound or linked labels. Cannot be used // Returns the position of bound or linked labels. Cannot be used
// for unused labels. // for unused labels.
int pos() const; int pos() const;
int near_link_pos() const { return near_link_pos_ - 1; }
private: private:
// pos_ encodes both the binding state (via its sign) // pos_ encodes both the binding state (via its sign)
@ -118,30 +86,74 @@ class Label BASE_EMBEDDED {
// pos_ > 0 linked label, pos() returns the last reference position // pos_ > 0 linked label, pos() returns the last reference position
int pos_; int pos_;
// Behaves like |pos_| in the "> 0" case, but for near jumps to this label.
int near_link_pos_;
void bind_to(int pos) { void bind_to(int pos) {
pos_ = -pos - 1; pos_ = -pos - 1;
ASSERT(is_bound()); ASSERT(is_bound());
} }
void link_to(int pos, Distance distance = kFar) { void link_to(int pos) {
if (distance == kNear) {
near_link_pos_ = pos + 1;
ASSERT(is_near_linked());
} else {
pos_ = pos + 1; pos_ = pos + 1;
ASSERT(is_linked()); ASSERT(is_linked());
} }
}
friend class Assembler; friend class Assembler;
friend class RegexpAssembler; friend class RegexpAssembler;
friend class Displacement; friend class Displacement;
friend class ShadowTarget;
friend class RegExpMacroAssemblerIrregexp; friend class RegExpMacroAssemblerIrregexp;
}; };
// -----------------------------------------------------------------------------
// NearLabels are labels used for short jumps (in Intel jargon).
// NearLabels should be used if it can be guaranteed that the jump range is
// within -128 to +127. We already use short jumps when jumping backwards,
// so using a NearLabel will only have performance impact if used for forward
// jumps.
class NearLabel BASE_EMBEDDED {
public:
NearLabel() { Unuse(); }
~NearLabel() { ASSERT(!is_linked()); }
void Unuse() {
pos_ = -1;
unresolved_branches_ = 0;
#ifdef DEBUG
for (int i = 0; i < kMaxUnresolvedBranches; i++) {
unresolved_positions_[i] = -1;
}
#endif
}
int pos() {
ASSERT(is_bound());
return pos_;
}
bool is_bound() { return pos_ >= 0; }
bool is_linked() { return !is_bound() && unresolved_branches_ > 0; }
bool is_unused() { return !is_bound() && unresolved_branches_ == 0; }
void bind_to(int position) {
ASSERT(!is_bound());
pos_ = position;
}
void link_to(int position) {
ASSERT(!is_bound());
ASSERT(unresolved_branches_ < kMaxUnresolvedBranches);
unresolved_positions_[unresolved_branches_++] = position;
}
private:
static const int kMaxUnresolvedBranches = 8;
int pos_;
int unresolved_branches_;
int unresolved_positions_[kMaxUnresolvedBranches];
friend class Assembler;
};
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Relocation information // Relocation information
@ -185,11 +197,10 @@ class RelocInfo BASE_EMBEDDED {
enum Mode { enum Mode {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode). // Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CODE_TARGET, // Code target which is not any of the above.
CODE_TARGET_WITH_ID,
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor. CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
CODE_TARGET_CONTEXT, // Code target used for contextual loads and stores. CODE_TARGET_CONTEXT, // Code target used for contextual loads and stores.
DEBUG_BREAK, // Code target for the debugger statement. DEBUG_BREAK, // Code target for the debugger statement.
CODE_TARGET, // Code target which is not any of the above.
EMBEDDED_OBJECT, EMBEDDED_OBJECT,
GLOBAL_PROPERTY_CELL, GLOBAL_PROPERTY_CELL,
@ -205,12 +216,10 @@ class RelocInfo BASE_EMBEDDED {
// add more as needed // add more as needed
// Pseudo-types // Pseudo-types
NUMBER_OF_MODES, // There are at most 14 modes with noncompact encoding. NUMBER_OF_MODES, // must be no greater than 14 - see RelocInfoWriter
NONE, // never recorded NONE, // never recorded
LAST_CODE_ENUM = DEBUG_BREAK, LAST_CODE_ENUM = CODE_TARGET,
LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL, LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL
// Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID
}; };
@ -311,7 +320,7 @@ class RelocInfo BASE_EMBEDDED {
INLINE(void set_call_object(Object* target)); INLINE(void set_call_object(Object* target));
INLINE(Object** call_object_address()); INLINE(Object** call_object_address());
template<typename StaticVisitor> inline void Visit(Heap* heap); template<typename StaticVisitor> inline void Visit();
inline void Visit(ObjectVisitor* v); inline void Visit(ObjectVisitor* v);
// Patch the code with some other code. // Patch the code with some other code.
@ -340,8 +349,7 @@ class RelocInfo BASE_EMBEDDED {
static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1; static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1;
static const int kPositionMask = 1 << POSITION | 1 << STATEMENT_POSITION; static const int kPositionMask = 1 << POSITION | 1 << STATEMENT_POSITION;
static const int kDataMask = static const int kDebugMask = kPositionMask | 1 << COMMENT;
(1 << CODE_TARGET_WITH_ID) | kPositionMask | (1 << COMMENT);
static const int kApplyMask; // Modes affected by apply. Depends on arch. static const int kApplyMask; // Modes affected by apply. Depends on arch.
private: private:
@ -352,19 +360,6 @@ class RelocInfo BASE_EMBEDDED {
byte* pc_; byte* pc_;
Mode rmode_; Mode rmode_;
intptr_t data_; intptr_t data_;
#ifdef V8_TARGET_ARCH_MIPS
// Code and Embedded Object pointers in mips are stored split
// across two consecutive 32-bit instructions. Heap management
// routines expect to access these pointers indirectly. The following
// location provides a place for these pointers to exist natually
// when accessed via the Iterator.
Object *reconstructed_obj_ptr_;
// External-reference pointers are also split across instruction-pairs
// in mips, but are accessed via indirect pointers. This location
// provides a place for that pointer to exist naturally. Its address
// is returned by RelocInfo::target_reference_address().
Address reconstructed_adr_ptr_;
#endif // V8_TARGET_ARCH_MIPS
friend class RelocIterator; friend class RelocIterator;
}; };
@ -373,14 +368,9 @@ class RelocInfo BASE_EMBEDDED {
// lower addresses. // lower addresses.
class RelocInfoWriter BASE_EMBEDDED { class RelocInfoWriter BASE_EMBEDDED {
public: public:
RelocInfoWriter() : pos_(NULL), RelocInfoWriter() : pos_(NULL), last_pc_(NULL), last_data_(0) {}
last_pc_(NULL), RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc),
last_id_(0), last_data_(0) {}
last_position_(0) {}
RelocInfoWriter(byte* pos, byte* pc) : pos_(pos),
last_pc_(pc),
last_id_(0),
last_position_(0) {}
byte* pos() const { return pos_; } byte* pos() const { return pos_; }
byte* last_pc() const { return last_pc_; } byte* last_pc() const { return last_pc_; }
@ -405,15 +395,13 @@ class RelocInfoWriter BASE_EMBEDDED {
inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta); inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta);
inline void WriteTaggedPC(uint32_t pc_delta, int tag); inline void WriteTaggedPC(uint32_t pc_delta, int tag);
inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag); inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
inline void WriteExtraTaggedIntData(int data_delta, int top_tag);
inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag); inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
inline void WriteTaggedData(intptr_t data_delta, int tag); inline void WriteTaggedData(intptr_t data_delta, int tag);
inline void WriteExtraTag(int extra_tag, int top_tag); inline void WriteExtraTag(int extra_tag, int top_tag);
byte* pos_; byte* pos_;
byte* last_pc_; byte* last_pc_;
int last_id_; intptr_t last_data_;
int last_position_;
DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter); DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
}; };
@ -455,13 +443,12 @@ class RelocIterator: public Malloced {
int GetTopTag(); int GetTopTag();
void ReadTaggedPC(); void ReadTaggedPC();
void AdvanceReadPC(); void AdvanceReadPC();
void AdvanceReadId();
void AdvanceReadPosition();
void AdvanceReadData(); void AdvanceReadData();
void AdvanceReadVariableLengthPCJump(); void AdvanceReadVariableLengthPCJump();
int GetLocatableTypeTag(); int GetPositionTypeTag();
void ReadTaggedId(); void ReadTaggedData();
void ReadTaggedPosition();
static RelocInfo::Mode DebugInfoModeFromTag(int tag);
// If the given mode is wanted, set it in rinfo_ and return true. // If the given mode is wanted, set it in rinfo_ and return true.
// Else return false. Used for efficiently skipping unwanted modes. // Else return false. Used for efficiently skipping unwanted modes.
@ -474,8 +461,6 @@ class RelocIterator: public Malloced {
RelocInfo rinfo_; RelocInfo rinfo_;
bool done_; bool done_;
int mode_mask_; int mode_mask_;
int last_id_;
int last_position_;
DISALLOW_COPY_AND_ASSIGN(RelocIterator); DISALLOW_COPY_AND_ASSIGN(RelocIterator);
}; };
@ -504,21 +489,9 @@ class ExternalReference BASE_EMBEDDED {
// MaybeObject* f(v8::internal::Arguments). // MaybeObject* f(v8::internal::Arguments).
BUILTIN_CALL, // default BUILTIN_CALL, // default
// Builtin that takes float arguments and returns an int.
// int f(double, double).
BUILTIN_COMPARE_CALL,
// Builtin call that returns floating point. // Builtin call that returns floating point.
// double f(double, double). // double f(double, double).
BUILTIN_FP_FP_CALL, FP_RETURN_CALL,
// Builtin call that returns floating point.
// double f(double).
BUILTIN_FP_CALL,
// Builtin call that returns floating point.
// double f(double, int).
BUILTIN_FP_INT_CALL,
// Direct call to API function callback. // Direct call to API function callback.
// Handle<Value> f(v8::Arguments&) // Handle<Value> f(v8::Arguments&)
@ -531,131 +504,117 @@ class ExternalReference BASE_EMBEDDED {
typedef void* ExternalReferenceRedirector(void* original, Type type); typedef void* ExternalReferenceRedirector(void* original, Type type);
ExternalReference(Builtins::CFunctionId id, Isolate* isolate); explicit ExternalReference(Builtins::CFunctionId id);
ExternalReference(ApiFunction* ptr, Type type, Isolate* isolate); explicit ExternalReference(ApiFunction* ptr, Type type);
ExternalReference(Builtins::Name name, Isolate* isolate); explicit ExternalReference(Builtins::Name name);
ExternalReference(Runtime::FunctionId id, Isolate* isolate); explicit ExternalReference(Runtime::FunctionId id);
ExternalReference(const Runtime::Function* f, Isolate* isolate); explicit ExternalReference(Runtime::Function* f);
ExternalReference(const IC_Utility& ic_utility, Isolate* isolate); explicit ExternalReference(const IC_Utility& ic_utility);
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference(const Debug_Address& debug_address, Isolate* isolate); explicit ExternalReference(const Debug_Address& debug_address);
#endif #endif
explicit ExternalReference(StatsCounter* counter); explicit ExternalReference(StatsCounter* counter);
ExternalReference(Isolate::AddressId id, Isolate* isolate); explicit ExternalReference(Top::AddressId id);
explicit ExternalReference(const SCTableReference& table_ref); explicit ExternalReference(const SCTableReference& table_ref);
// Isolate::Current() as an external reference.
static ExternalReference isolate_address();
// One-of-a-kind references. These references are not part of a general // One-of-a-kind references. These references are not part of a general
// pattern. This means that they have to be added to the // pattern. This means that they have to be added to the
// ExternalReferenceTable in serialize.cc manually. // ExternalReferenceTable in serialize.cc manually.
static ExternalReference perform_gc_function(Isolate* isolate); static ExternalReference perform_gc_function();
static ExternalReference fill_heap_number_with_random_function( static ExternalReference fill_heap_number_with_random_function();
Isolate* isolate); static ExternalReference random_uint32_function();
static ExternalReference random_uint32_function(Isolate* isolate); static ExternalReference transcendental_cache_array_address();
static ExternalReference transcendental_cache_array_address(Isolate* isolate); static ExternalReference delete_handle_scope_extensions();
static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
// Deoptimization support. // Deoptimization support.
static ExternalReference new_deoptimizer_function(Isolate* isolate); static ExternalReference new_deoptimizer_function();
static ExternalReference compute_output_frames_function(Isolate* isolate); static ExternalReference compute_output_frames_function();
static ExternalReference global_contexts_list(Isolate* isolate); static ExternalReference global_contexts_list();
// Static data in the keyed lookup cache. // Static data in the keyed lookup cache.
static ExternalReference keyed_lookup_cache_keys(Isolate* isolate); static ExternalReference keyed_lookup_cache_keys();
static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate); static ExternalReference keyed_lookup_cache_field_offsets();
// Static variable Factory::the_hole_value.location() // Static variable Factory::the_hole_value.location()
static ExternalReference the_hole_value_location(Isolate* isolate); static ExternalReference the_hole_value_location();
// Static variable Factory::arguments_marker.location() // Static variable Factory::arguments_marker.location()
static ExternalReference arguments_marker_location(Isolate* isolate); static ExternalReference arguments_marker_location();
// Static variable Heap::roots_address() // Static variable Heap::roots_address()
static ExternalReference roots_address(Isolate* isolate); static ExternalReference roots_address();
// Static variable StackGuard::address_of_jslimit() // Static variable StackGuard::address_of_jslimit()
static ExternalReference address_of_stack_limit(Isolate* isolate); static ExternalReference address_of_stack_limit();
// Static variable StackGuard::address_of_real_jslimit() // Static variable StackGuard::address_of_real_jslimit()
static ExternalReference address_of_real_stack_limit(Isolate* isolate); static ExternalReference address_of_real_stack_limit();
// Static variable RegExpStack::limit_address() // Static variable RegExpStack::limit_address()
static ExternalReference address_of_regexp_stack_limit(Isolate* isolate); static ExternalReference address_of_regexp_stack_limit();
// Static variables for RegExp. // Static variables for RegExp.
static ExternalReference address_of_static_offsets_vector(Isolate* isolate); static ExternalReference address_of_static_offsets_vector();
static ExternalReference address_of_regexp_stack_memory_address( static ExternalReference address_of_regexp_stack_memory_address();
Isolate* isolate); static ExternalReference address_of_regexp_stack_memory_size();
static ExternalReference address_of_regexp_stack_memory_size(
Isolate* isolate);
// Static variable Heap::NewSpaceStart() // Static variable Heap::NewSpaceStart()
static ExternalReference new_space_start(Isolate* isolate); static ExternalReference new_space_start();
static ExternalReference new_space_mask(Isolate* isolate); static ExternalReference new_space_mask();
static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate); static ExternalReference heap_always_allocate_scope_depth();
// Used for fast allocation in generated code. // Used for fast allocation in generated code.
static ExternalReference new_space_allocation_top_address(Isolate* isolate); static ExternalReference new_space_allocation_top_address();
static ExternalReference new_space_allocation_limit_address(Isolate* isolate); static ExternalReference new_space_allocation_limit_address();
static ExternalReference double_fp_operation(Token::Value operation, static ExternalReference double_fp_operation(Token::Value operation);
Isolate* isolate); static ExternalReference compare_doubles();
static ExternalReference compare_doubles(Isolate* isolate); static ExternalReference power_double_double_function();
static ExternalReference power_double_double_function(Isolate* isolate); static ExternalReference power_double_int_function();
static ExternalReference power_double_int_function(Isolate* isolate);
static ExternalReference handle_scope_next_address(); static ExternalReference handle_scope_next_address();
static ExternalReference handle_scope_limit_address(); static ExternalReference handle_scope_limit_address();
static ExternalReference handle_scope_level_address(); static ExternalReference handle_scope_level_address();
static ExternalReference scheduled_exception_address(Isolate* isolate); static ExternalReference scheduled_exception_address();
// Static variables containing common double constants. // Static variables containing common double constants.
static ExternalReference address_of_min_int(); static ExternalReference address_of_min_int();
static ExternalReference address_of_one_half(); static ExternalReference address_of_one_half();
static ExternalReference address_of_minus_zero(); static ExternalReference address_of_minus_zero();
static ExternalReference address_of_zero();
static ExternalReference address_of_uint8_max_value();
static ExternalReference address_of_negative_infinity(); static ExternalReference address_of_negative_infinity();
static ExternalReference address_of_nan();
static ExternalReference math_sin_double_function(Isolate* isolate);
static ExternalReference math_cos_double_function(Isolate* isolate);
static ExternalReference math_log_double_function(Isolate* isolate);
Address address() const {return reinterpret_cast<Address>(address_);} Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// Function Debug::Break() // Function Debug::Break()
static ExternalReference debug_break(Isolate* isolate); static ExternalReference debug_break();
// Used to check if single stepping is enabled in generated code. // Used to check if single stepping is enabled in generated code.
static ExternalReference debug_step_in_fp_address(Isolate* isolate); static ExternalReference debug_step_in_fp_address();
#endif #endif
#ifndef V8_INTERPRETED_REGEXP #ifndef V8_INTERPRETED_REGEXP
// C functions called from RegExp generated code. // C functions called from RegExp generated code.
// Function NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16() // Function NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()
static ExternalReference re_case_insensitive_compare_uc16(Isolate* isolate); static ExternalReference re_case_insensitive_compare_uc16();
// Function RegExpMacroAssembler*::CheckStackGuardState() // Function RegExpMacroAssembler*::CheckStackGuardState()
static ExternalReference re_check_stack_guard_state(Isolate* isolate); static ExternalReference re_check_stack_guard_state();
// Function NativeRegExpMacroAssembler::GrowStack() // Function NativeRegExpMacroAssembler::GrowStack()
static ExternalReference re_grow_stack(Isolate* isolate); static ExternalReference re_grow_stack();
// byte NativeRegExpMacroAssembler::word_character_bitmap // byte NativeRegExpMacroAssembler::word_character_bitmap
static ExternalReference re_word_character_map(); static ExternalReference re_word_character_map();
@ -664,39 +623,30 @@ class ExternalReference BASE_EMBEDDED {
// This lets you register a function that rewrites all external references. // This lets you register a function that rewrites all external references.
// Used by the ARM simulator to catch calls to external references. // Used by the ARM simulator to catch calls to external references.
static void set_redirector(Isolate* isolate, static void set_redirector(ExternalReferenceRedirector* redirector) {
ExternalReferenceRedirector* redirector) { ASSERT(redirector_ == NULL); // We can't stack them.
// We can't stack them. redirector_ = redirector;
ASSERT(isolate->external_reference_redirector() == NULL);
isolate->set_external_reference_redirector(
reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
} }
private: private:
explicit ExternalReference(void* address) explicit ExternalReference(void* address)
: address_(address) {} : address_(address) {}
static void* Redirect(Isolate* isolate, static ExternalReferenceRedirector* redirector_;
void* address,
static void* Redirect(void* address,
Type type = ExternalReference::BUILTIN_CALL) { Type type = ExternalReference::BUILTIN_CALL) {
ExternalReferenceRedirector* redirector = if (redirector_ == NULL) return address;
reinterpret_cast<ExternalReferenceRedirector*>( void* answer = (*redirector_)(address, type);
isolate->external_reference_redirector());
if (redirector == NULL) return address;
void* answer = (*redirector)(address, type);
return answer; return answer;
} }
static void* Redirect(Isolate* isolate, static void* Redirect(Address address_arg,
Address address_arg,
Type type = ExternalReference::BUILTIN_CALL) { Type type = ExternalReference::BUILTIN_CALL) {
ExternalReferenceRedirector* redirector =
reinterpret_cast<ExternalReferenceRedirector*>(
isolate->external_reference_redirector());
void* address = reinterpret_cast<void*>(address_arg); void* address = reinterpret_cast<void*>(address_arg);
void* answer = (redirector == NULL) ? void* answer = (redirector_ == NULL) ?
address : address :
(*redirector)(address, type); (*redirector_)(address, type);
return answer; return answer;
} }
@ -835,28 +785,6 @@ static inline int NumberOfBitsSet(uint32_t x) {
double power_double_int(double x, int y); double power_double_int(double x, int y);
double power_double_double(double x, double y); double power_double_double(double x, double y);
// Helper class for generating code or data associated with the code
// right after a call instruction. As an example this can be used to
// generate safepoint data after calls for crankshaft.
class CallWrapper {
public:
CallWrapper() { }
virtual ~CallWrapper() { }
// Called just before emitting a call. Argument is the size of the generated
// call code.
virtual void BeforeCall(int call_size) const = 0;
// Called just after emitting a call, i.e., at the return site for the call.
virtual void AfterCall() const = 0;
};
class NullCallWrapper : public CallWrapper {
public:
NullCallWrapper() { }
virtual ~NullCallWrapper() { }
virtual void BeforeCall(int call_size) const { }
virtual void AfterCall() const { }
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_ASSEMBLER_H_ #endif // V8_ASSEMBLER_H_

11
deps/v8/src/ast-inl.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -31,7 +31,7 @@
#include "v8.h" #include "v8.h"
#include "ast.h" #include "ast.h"
#include "scopes.h" #include "jump-target-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -62,7 +62,7 @@ BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type)
IterationStatement::IterationStatement(ZoneStringList* labels) IterationStatement::IterationStatement(ZoneStringList* labels)
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS), : BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
body_(NULL), body_(NULL),
continue_target_(), continue_target_(JumpTarget::BIDIRECTIONAL),
osr_entry_id_(GetNextId()) { osr_entry_id_(GetNextId()) {
} }
@ -102,11 +102,6 @@ ForInStatement::ForInStatement(ZoneStringList* labels)
} }
bool FunctionLiteral::strict_mode() const {
return scope()->is_strict_mode();
}
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_AST_INL_H_ #endif // V8_AST_INL_H_

367
deps/v8/src/ast.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -28,21 +28,21 @@
#include "v8.h" #include "v8.h"
#include "ast.h" #include "ast.h"
#include "jump-target-inl.h"
#include "parser.h" #include "parser.h"
#include "scopes.h" #include "scopes.h"
#include "string-stream.h" #include "string-stream.h"
#include "type-info.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
AstSentinels::AstSentinels() unsigned AstNode::current_id_ = 0;
: this_proxy_(true), unsigned AstNode::count_ = 0;
identifier_proxy_(false), VariableProxySentinel VariableProxySentinel::this_proxy_(true);
valid_left_hand_side_sentinel_(), VariableProxySentinel VariableProxySentinel::identifier_proxy_(false);
this_property_(&this_proxy_, NULL, 0), ValidLeftHandSideSentinel ValidLeftHandSideSentinel::instance_;
call_sentinel_(NULL, NULL, 0) { Property Property::this_property_(VariableProxySentinel::this_proxy(), NULL, 0);
} Call Call::sentinel_(NULL, NULL, 0);
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
@ -77,23 +77,20 @@ VariableProxy::VariableProxy(Variable* var)
var_(NULL), // Will be set by the call to BindTo. var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()), is_this_(var->is_this()),
inside_with_(false), inside_with_(false),
is_trivial_(false), is_trivial_(false) {
position_(RelocInfo::kNoPosition) {
BindTo(var); BindTo(var);
} }
VariableProxy::VariableProxy(Handle<String> name, VariableProxy::VariableProxy(Handle<String> name,
bool is_this, bool is_this,
bool inside_with, bool inside_with)
int position)
: name_(name), : name_(name),
var_(NULL), var_(NULL),
is_this_(is_this), is_this_(is_this),
inside_with_(inside_with), inside_with_(inside_with),
is_trivial_(false), is_trivial_(false) {
position_(position) { // names must be canonicalized for fast equality checks
// Names must be canonicalized for fast equality checks.
ASSERT(name->IsSymbol()); ASSERT(name->IsSymbol());
} }
@ -173,7 +170,7 @@ ObjectLiteral::Property::Property(Literal* key, Expression* value) {
key_ = key; key_ = key;
value_ = value; value_ = value;
Object* k = *key->handle(); Object* k = *key->handle();
if (k->IsSymbol() && HEAP->Proto_symbol()->Equals(String::cast(k))) { if (k->IsSymbol() && Heap::Proto_symbol()->Equals(String::cast(k))) {
kind_ = PROTOTYPE; kind_ = PROTOTYPE;
} else if (value_->AsMaterializedLiteral() != NULL) { } else if (value_->AsMaterializedLiteral() != NULL) {
kind_ = MATERIALIZED_LITERAL; kind_ = MATERIALIZED_LITERAL;
@ -252,11 +249,10 @@ void ObjectLiteral::CalculateEmitStore() {
uint32_t hash; uint32_t hash;
HashMap* table; HashMap* table;
void* key; void* key;
Factory* factory = Isolate::Current()->factory();
if (handle->IsSymbol()) { if (handle->IsSymbol()) {
Handle<String> name(String::cast(*handle)); Handle<String> name(String::cast(*handle));
if (name->AsArrayIndex(&hash)) { if (name->AsArrayIndex(&hash)) {
Handle<Object> key_handle = factory->NewNumberFromUint(hash); Handle<Object> key_handle = Factory::NewNumberFromUint(hash);
key = key_handle.location(); key = key_handle.location();
table = &elements; table = &elements;
} else { } else {
@ -273,7 +269,7 @@ void ObjectLiteral::CalculateEmitStore() {
char arr[100]; char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr)); Vector<char> buffer(arr, ARRAY_SIZE(arr));
const char* str = DoubleToCString(num, buffer); const char* str = DoubleToCString(num, buffer);
Handle<String> name = factory->NewStringFromAscii(CStrVector(str)); Handle<String> name = Factory::NewStringFromAscii(CStrVector(str));
key = name.location(); key = name.location();
hash = name->Hash(); hash = name->Hash();
table = &properties; table = &properties;
@ -291,237 +287,137 @@ void ObjectLiteral::CalculateEmitStore() {
} }
void TargetCollector::AddTarget(Label* target) { void TargetCollector::AddTarget(BreakTarget* target) {
// Add the label to the collector, but discard duplicates. // Add the label to the collector, but discard duplicates.
int length = targets_.length(); int length = targets_->length();
for (int i = 0; i < length; i++) { for (int i = 0; i < length; i++) {
if (targets_[i] == target) return; if (targets_->at(i) == target) return;
} }
targets_.Add(target); targets_->Add(target);
} }
bool UnaryOperation::ResultOverwriteAllowed() { bool Expression::GuaranteedSmiResult() {
switch (op_) { BinaryOperation* node = AsBinaryOperation();
case Token::BIT_NOT: if (node == NULL) return false;
case Token::SUB: Token::Value op = node->op();
return true; switch (op) {
default:
return false;
}
}
bool BinaryOperation::ResultOverwriteAllowed() {
switch (op_) {
case Token::COMMA: case Token::COMMA:
case Token::OR: case Token::OR:
case Token::AND: case Token::AND:
return false;
case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND:
case Token::SHL:
case Token::SAR:
case Token::SHR:
case Token::ADD: case Token::ADD:
case Token::SUB: case Token::SUB:
case Token::MUL: case Token::MUL:
case Token::DIV: case Token::DIV:
case Token::MOD: case Token::MOD:
return true; case Token::BIT_XOR:
default: case Token::SHL:
UNREACHABLE();
}
return false; return false;
} break;
case Token::BIT_OR:
case Token::BIT_AND: {
bool CompareOperation::IsLiteralCompareTypeof(Expression** expr, Literal* left = node->left()->AsLiteral();
Handle<String>* check) { Literal* right = node->right()->AsLiteral();
if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return false; if (left != NULL && left->handle()->IsSmi()) {
int value = Smi::cast(*left->handle())->value();
UnaryOperation* left_unary = left_->AsUnaryOperation(); if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) {
UnaryOperation* right_unary = right_->AsUnaryOperation(); // Result of bitwise or is always a negative Smi.
Literal* left_literal = left_->AsLiteral();
Literal* right_literal = right_->AsLiteral();
// Check for the pattern: typeof <expression> == <string literal>.
if (left_unary != NULL && left_unary->op() == Token::TYPEOF &&
right_literal != NULL && right_literal->handle()->IsString()) {
*expr = left_unary->expression();
*check = Handle<String>::cast(right_literal->handle());
return true; return true;
} }
if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) {
// Check for the pattern: <string literal> == typeof <expression>. // Result of bitwise and is always a positive Smi.
if (right_unary != NULL && right_unary->op() == Token::TYPEOF &&
left_literal != NULL && left_literal->handle()->IsString()) {
*expr = right_unary->expression();
*check = Handle<String>::cast(left_literal->handle());
return true; return true;
} }
return false;
} }
if (right != NULL && right->handle()->IsSmi()) {
int value = Smi::cast(*right->handle())->value();
bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) { if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) {
if (op_ != Token::EQ_STRICT) return false; // Result of bitwise or is always a negative Smi.
UnaryOperation* left_unary = left_->AsUnaryOperation();
UnaryOperation* right_unary = right_->AsUnaryOperation();
// Check for the pattern: <expression> === void <literal>.
if (right_unary != NULL && right_unary->op() == Token::VOID &&
right_unary->expression()->AsLiteral() != NULL) {
*expr = left_;
return true; return true;
} }
if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) {
// Check for the pattern: void <literal> === <expression>. // Result of bitwise and is always a positive Smi.
if (left_unary != NULL && left_unary->op() == Token::VOID &&
left_unary->expression()->AsLiteral() != NULL) {
*expr = right_;
return true; return true;
} }
return false;
}
// ----------------------------------------------------------------------------
// Inlining support
bool Declaration::IsInlineable() const {
return proxy()->var()->IsStackAllocated() && fun() == NULL;
}
bool TargetCollector::IsInlineable() const {
UNREACHABLE();
return false;
}
bool Slot::IsInlineable() const {
UNREACHABLE();
return false;
}
bool ForInStatement::IsInlineable() const {
return false;
}
bool EnterWithContextStatement::IsInlineable() const {
return false;
}
bool ExitContextStatement::IsInlineable() const {
return false;
}
bool SwitchStatement::IsInlineable() const {
return false;
} }
bool TryStatement::IsInlineable() const {
return false; return false;
break;
} }
case Token::SAR:
case Token::SHR: {
bool TryCatchStatement::IsInlineable() const { Literal* right = node->right()->AsLiteral();
return false; if (right != NULL && right->handle()->IsSmi()) {
int value = Smi::cast(*right->handle())->value();
if ((value & 0x1F) > 1 ||
(op == Token::SAR && (value & 0x1F) == 1)) {
return true;
} }
bool TryFinallyStatement::IsInlineable() const {
return false;
} }
bool DebuggerStatement::IsInlineable() const {
return false; return false;
break;
} }
default:
UNREACHABLE();
bool Throw::IsInlineable() const { break;
return exception()->IsInlineable();
} }
bool MaterializedLiteral::IsInlineable() const {
// TODO(1322): Allow materialized literals.
return false; return false;
} }
bool FunctionLiteral::IsInlineable() const { void Expression::CopyAnalysisResultsFrom(Expression* other) {
// TODO(1322): Allow materialized literals. bitfields_ = other->bitfields_;
return false; type_ = other->type_;
} }
bool ThisFunction::IsInlineable() const { bool UnaryOperation::ResultOverwriteAllowed() {
switch (op_) {
case Token::BIT_NOT:
case Token::SUB:
return true;
default:
return false; return false;
} }
bool SharedFunctionInfoLiteral::IsInlineable() const {
return false;
} }
bool ValidLeftHandSideSentinel::IsInlineable() const { bool BinaryOperation::ResultOverwriteAllowed() {
UNREACHABLE(); switch (op_) {
case Token::COMMA:
case Token::OR:
case Token::AND:
return false; return false;
} case Token::BIT_OR:
case Token::BIT_XOR:
case Token::BIT_AND:
bool ForStatement::IsInlineable() const { case Token::SHL:
return (init() == NULL || init()->IsInlineable()) case Token::SAR:
&& (cond() == NULL || cond()->IsInlineable()) case Token::SHR:
&& (next() == NULL || next()->IsInlineable()) case Token::ADD:
&& body()->IsInlineable(); case Token::SUB:
} case Token::MUL:
case Token::DIV:
case Token::MOD:
bool WhileStatement::IsInlineable() const {
return cond()->IsInlineable()
&& body()->IsInlineable();
}
bool DoWhileStatement::IsInlineable() const {
return cond()->IsInlineable()
&& body()->IsInlineable();
}
bool ContinueStatement::IsInlineable() const {
return true; return true;
default:
UNREACHABLE();
} }
return false;
bool BreakStatement::IsInlineable() const {
return true;
} }
bool EmptyStatement::IsInlineable() const { BinaryOperation::BinaryOperation(Assignment* assignment) {
return true; ASSERT(assignment->is_compound());
op_ = assignment->binary_op();
left_ = assignment->target();
right_ = assignment->value();
pos_ = assignment->position();
CopyAnalysisResultsFrom(assignment);
} }
bool Literal::IsInlineable() const { // ----------------------------------------------------------------------------
return true; // Inlining support
}
bool Block::IsInlineable() const { bool Block::IsInlineable() const {
const int count = statements_.length(); const int count = statements_.length();
@ -538,9 +434,8 @@ bool ExpressionStatement::IsInlineable() const {
bool IfStatement::IsInlineable() const { bool IfStatement::IsInlineable() const {
return condition()->IsInlineable() return condition()->IsInlineable() && then_statement()->IsInlineable() &&
&& then_statement()->IsInlineable() else_statement()->IsInlineable();
&& else_statement()->IsInlineable();
} }
@ -591,17 +486,6 @@ bool CallNew::IsInlineable() const {
bool CallRuntime::IsInlineable() const { bool CallRuntime::IsInlineable() const {
// Don't try to inline JS runtime calls because we don't (currently) even
// optimize them.
if (is_jsruntime()) return false;
// Don't inline the %_ArgumentsLength or %_Arguments because their
// implementation will not work. There is no stack frame to get them
// from.
if (function()->intrinsic_type == Runtime::INLINE &&
(name()->IsEqualTo(CStrVector("_ArgumentsLength")) ||
name()->IsEqualTo(CStrVector("_Arguments")))) {
return false;
}
const int count = arguments()->length(); const int count = arguments()->length();
for (int i = 0; i < count; ++i) { for (int i = 0; i < count; ++i) {
if (!arguments()->at(i)->IsInlineable()) return false; if (!arguments()->at(i)->IsInlineable()) return false;
@ -640,14 +524,14 @@ bool CountOperation::IsInlineable() const {
void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) { void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
// Record type feedback from the oracle in the AST. // Record type feedback from the oracle in the AST.
is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this); is_monomorphic_ = oracle->LoadIsMonomorphic(this);
if (key()->IsPropertyName()) { if (key()->IsPropertyName()) {
if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_ArrayLength)) { if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_ArrayLength)) {
is_array_length_ = true; is_array_length_ = true;
} else if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_StringLength)) { } else if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_StringLength)) {
is_string_length_ = true; is_string_length_ = true;
} else if (oracle->LoadIsBuiltin(this, } else if (oracle->LoadIsBuiltin(this,
Builtins::kLoadIC_FunctionPrototype)) { Builtins::LoadIC_FunctionPrototype)) {
is_function_prototype_ = true; is_function_prototype_ = true;
} else { } else {
Literal* lit_key = key()->AsLiteral(); Literal* lit_key = key()->AsLiteral();
@ -656,13 +540,8 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
ZoneMapList* types = oracle->LoadReceiverTypes(this, name); ZoneMapList* types = oracle->LoadReceiverTypes(this, name);
receiver_types_ = types; receiver_types_ = types;
} }
} else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
is_string_access_ = true;
} else if (is_monomorphic_) { } else if (is_monomorphic_) {
monomorphic_receiver_type_ = oracle->LoadMonomorphicReceiverType(this); monomorphic_receiver_type_ = oracle->LoadMonomorphicReceiverType(this);
} else if (oracle->LoadIsMegamorphicWithTypeInfo(this)) {
receiver_types_ = new ZoneMapList(kMaxKeyedPolymorphism);
oracle->CollectKeyedReceiverTypes(this->id(), receiver_types_);
} }
} }
@ -670,7 +549,7 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) { void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
Property* prop = target()->AsProperty(); Property* prop = target()->AsProperty();
ASSERT(prop != NULL); ASSERT(prop != NULL);
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this); is_monomorphic_ = oracle->StoreIsMonomorphic(this);
if (prop->key()->IsPropertyName()) { if (prop->key()->IsPropertyName()) {
Literal* lit_key = prop->key()->AsLiteral(); Literal* lit_key = prop->key()->AsLiteral();
ASSERT(lit_key != NULL && lit_key->handle()->IsString()); ASSERT(lit_key != NULL && lit_key->handle()->IsString());
@ -678,23 +557,8 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
ZoneMapList* types = oracle->StoreReceiverTypes(this, name); ZoneMapList* types = oracle->StoreReceiverTypes(this, name);
receiver_types_ = types; receiver_types_ = types;
} else if (is_monomorphic_) { } else if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores. // Record receiver type for monomorphic keyed loads.
monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
} else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
receiver_types_ = new ZoneMapList(kMaxKeyedPolymorphism);
oracle->CollectKeyedReceiverTypes(this->id(), receiver_types_);
}
}
void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores.
monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this); monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
} else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
receiver_types_ = new ZoneMapList(kMaxKeyedPolymorphism);
oracle->CollectKeyedReceiverTypes(this->id(), receiver_types_);
} }
} }
@ -749,36 +613,38 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
bool Call::ComputeGlobalTarget(Handle<GlobalObject> global, bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
LookupResult* lookup) { Handle<String> name) {
target_ = Handle<JSFunction>::null(); target_ = Handle<JSFunction>::null();
cell_ = Handle<JSGlobalPropertyCell>::null(); cell_ = Handle<JSGlobalPropertyCell>::null();
ASSERT(lookup->IsProperty() && LookupResult lookup;
lookup->type() == NORMAL && global->Lookup(*name, &lookup);
lookup->holder() == *global); if (lookup.IsProperty() &&
cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(lookup)); lookup.type() == NORMAL &&
lookup.holder() == *global) {
cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(&lookup));
if (cell_->value()->IsJSFunction()) { if (cell_->value()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(cell_->value())); Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
// If the function is in new space we assume it's more likely to // If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code. // change and thus prefer the general IC code.
if (!HEAP->InNewSpace(*candidate) && if (!Heap::InNewSpace(*candidate) &&
CanCallWithoutIC(candidate, arguments()->length())) { CanCallWithoutIC(candidate, arguments()->length())) {
target_ = candidate; target_ = candidate;
return true; return true;
} }
} }
}
return false; return false;
} }
void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle, void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
CallKind call_kind) {
Property* property = expression()->AsProperty(); Property* property = expression()->AsProperty();
ASSERT(property != NULL); ASSERT(property != NULL);
// Specialize for the receiver types seen at runtime. // Specialize for the receiver types seen at runtime.
Literal* key = property->key()->AsLiteral(); Literal* key = property->key()->AsLiteral();
ASSERT(key != NULL && key->handle()->IsString()); ASSERT(key != NULL && key->handle()->IsString());
Handle<String> name = Handle<String>::cast(key->handle()); Handle<String> name = Handle<String>::cast(key->handle());
receiver_types_ = oracle->CallReceiverTypes(this, name, call_kind); receiver_types_ = oracle->CallReceiverTypes(this, name);
#ifdef DEBUG #ifdef DEBUG
if (FLAG_enable_slow_asserts) { if (FLAG_enable_slow_asserts) {
if (receiver_types_ != NULL) { if (receiver_types_ != NULL) {
@ -825,7 +691,7 @@ void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
bool AstVisitor::CheckStackOverflow() { bool AstVisitor::CheckStackOverflow() {
if (stack_overflow_) return true; if (stack_overflow_) return true;
StackLimitCheck check(isolate_); StackLimitCheck check;
if (!check.HasOverflowed()) return false; if (!check.HasOverflowed()) return false;
return (stack_overflow_ = true); return (stack_overflow_ = true);
} }
@ -1196,9 +1062,6 @@ CaseClause::CaseClause(Expression* label,
: label_(label), : label_(label),
statements_(statements), statements_(statements),
position_(pos), position_(pos),
compare_type_(NONE), compare_type_(NONE) {}
compare_id_(AstNode::GetNextId()),
entry_id_(AstNode::GetNextId()) {
}
} } // namespace v8::internal } } // namespace v8::internal

461
deps/v8/src/ast.h

File diff suppressed because it is too large

2
deps/v8/src/atomicops.h

@ -158,8 +158,6 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "atomicops_internals_x86_gcc.h" #include "atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM) #elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
#include "atomicops_internals_arm_gcc.h" #include "atomicops_internals_arm_gcc.h"
#elif defined(__GNUC__) && defined(V8_HOST_ARCH_MIPS)
#include "atomicops_internals_mips_gcc.h"
#else #else
#error "Atomic operations are not supported on your platform" #error "Atomic operations are not supported on your platform"
#endif #endif

169
deps/v8/src/atomicops_internals_mips_gcc.h

@ -1,169 +0,0 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("sync" : : : "memory")
namespace v8 {
namespace internal {
// Atomically execute:
// result = *ptr;
// if (*ptr == old_value)
// *ptr = new_value;
// return result;
//
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
__asm__ __volatile__("1:\n"
"ll %0, %1\n" // prev = *ptr
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2
"nop\n" // delay slot nop
"sc %2, %1\n" // *ptr = new_value (with atomic check)
"beqz %2, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
: "=&r" (prev), "=m" (*ptr), "+&r" (new_value)
: "Ir" (old_value), "r" (new_value), "m" (*ptr)
: "memory");
return prev;
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 temp, old;
__asm__ __volatile__("1:\n"
"ll %1, %2\n" // old = *ptr
"move %0, %3\n" // temp = new_value
"sc %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
: "=&r" (temp), "=&r" (old), "=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory");
return old;
}
// Atomically increment *ptr by "increment". Returns the new value of
// *ptr with the increment applied. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp, temp2;
__asm__ __volatile__("1:\n"
"ll %0, %2\n" // temp = *ptr
"addu %0, %3\n" // temp = temp + increment
"move %1, %0\n" // temp2 = temp
"sc %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
: "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
: "Ir" (increment), "m" (*ptr)
: "memory");
// temp2 now holds the final value.
return temp2;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
ATOMICOPS_COMPILER_BARRIER();
return res;
}
// "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
ATOMICOPS_COMPILER_BARRIER();
return x;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
ATOMICOPS_COMPILER_BARRIER();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrier() {
ATOMICOPS_COMPILER_BARRIER();
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
} } // namespace v8::internal
#undef ATOMICOPS_COMPILER_BARRIER
#endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_

13
deps/v8/src/atomicops_internals_x86_gcc.cc

@ -57,9 +57,6 @@
#if defined(cpuid) // initialize the struct only on x86 #if defined(cpuid) // initialize the struct only on x86
namespace v8 {
namespace internal {
// Set the flags so that code will run correctly and conservatively, so even // Set the flags so that code will run correctly and conservatively, so even
// if we haven't been initialized yet, we're probably single threaded, and our // if we haven't been initialized yet, we're probably single threaded, and our
// default values should hopefully be pretty safe. // default values should hopefully be pretty safe.
@ -68,14 +65,8 @@ struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
false, // no SSE2 false, // no SSE2
}; };
} } // namespace v8::internal
namespace {
// Initialize the AtomicOps_Internalx86CPUFeatures struct. // Initialize the AtomicOps_Internalx86CPUFeatures struct.
void AtomicOps_Internalx86CPUFeaturesInit() { static void AtomicOps_Internalx86CPUFeaturesInit() {
using v8::internal::AtomicOps_Internalx86CPUFeatures;
uint32_t eax; uint32_t eax;
uint32_t ebx; uint32_t ebx;
uint32_t ecx; uint32_t ecx;
@ -116,6 +107,8 @@ void AtomicOps_Internalx86CPUFeaturesInit() {
AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1); AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
} }
namespace {
class AtomicOpsx86Initializer { class AtomicOpsx86Initializer {
public: public:
AtomicOpsx86Initializer() { AtomicOpsx86Initializer() {

6
deps/v8/src/atomicops_internals_x86_gcc.h

@ -30,9 +30,6 @@
#ifndef V8_ATOMICOPS_INTERNALS_X86_GCC_H_ #ifndef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
#define V8_ATOMICOPS_INTERNALS_X86_GCC_H_ #define V8_ATOMICOPS_INTERNALS_X86_GCC_H_
namespace v8 {
namespace internal {
// This struct is not part of the public API of this module; clients may not // This struct is not part of the public API of this module; clients may not
// use it. // use it.
// Features of this x86. Values may not be correct before main() is run, // Features of this x86. Values may not be correct before main() is run,
@ -46,6 +43,9 @@ extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace v8 {
namespace internal {
// 32-bit low-level operations on any platform. // 32-bit low-level operations on any platform.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,

1192
deps/v8/src/bootstrapper.cc

File diff suppressed because it is too large

121
deps/v8/src/bootstrapper.h

@ -29,148 +29,77 @@
#ifndef V8_BOOTSTRAPPER_H_ #ifndef V8_BOOTSTRAPPER_H_
#define V8_BOOTSTRAPPER_H_ #define V8_BOOTSTRAPPER_H_
#include "allocation.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// A SourceCodeCache uses a FixedArray to store pairs of class BootstrapperActive BASE_EMBEDDED {
// (AsciiString*, JSFunction*), mapping names of native code files
// (runtime.js, etc.) to precompiled functions. Instead of mapping
// names to functions it might make sense to let the JS2C tool
// generate an index for each native JS file.
class SourceCodeCache BASE_EMBEDDED {
public: public:
explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { } BootstrapperActive() { nesting_++; }
~BootstrapperActive() { nesting_--; }
void Initialize(bool create_heap_objects) {
cache_ = create_heap_objects ? HEAP->empty_fixed_array() : NULL;
}
void Iterate(ObjectVisitor* v) { // Support for thread preemption.
v->VisitPointer(BitCast<Object**, FixedArray**>(&cache_)); static int ArchiveSpacePerThread();
} static char* ArchiveState(char* to);
static char* RestoreState(char* from);
bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
for (int i = 0; i < cache_->length(); i+=2) {
SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
if (str->IsEqualTo(name)) {
*handle = Handle<SharedFunctionInfo>(
SharedFunctionInfo::cast(cache_->get(i + 1)));
return true;
}
}
return false;
}
void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) {
HandleScope scope;
int length = cache_->length();
Handle<FixedArray> new_array =
FACTORY->NewFixedArray(length + 2, TENURED);
cache_->CopyTo(0, *new_array, 0, cache_->length());
cache_ = *new_array;
Handle<String> str = FACTORY->NewStringFromAscii(name, TENURED);
cache_->set(length, *str);
cache_->set(length + 1, *shared);
Script::cast(shared->script())->set_type(Smi::FromInt(type_));
}
private: private:
Script::Type type_; static bool IsActive() { return nesting_ != 0; }
FixedArray* cache_; static int nesting_;
DISALLOW_COPY_AND_ASSIGN(SourceCodeCache); friend class Bootstrapper;
}; };
// The Boostrapper is the public interface for creating a JavaScript global // The Boostrapper is the public interface for creating a JavaScript global
// context. // context.
class Bootstrapper { class Bootstrapper : public AllStatic {
public: public:
// Requires: Heap::Setup has been called. // Requires: Heap::Setup has been called.
void Initialize(bool create_heap_objects); static void Initialize(bool create_heap_objects);
void TearDown(); static void TearDown();
// Creates a JavaScript Global Context with initial object graph. // Creates a JavaScript Global Context with initial object graph.
// The returned value is a global handle casted to V8Environment*. // The returned value is a global handle casted to V8Environment*.
Handle<Context> CreateEnvironment( static Handle<Context> CreateEnvironment(
Isolate* isolate,
Handle<Object> global_object, Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template, v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions); v8::ExtensionConfiguration* extensions);
// Detach the environment from its outer global object. // Detach the environment from its outer global object.
void DetachGlobal(Handle<Context> env); static void DetachGlobal(Handle<Context> env);
// Reattach an outer global object to an environment. // Reattach an outer global object to an environment.
void ReattachGlobal(Handle<Context> env, Handle<Object> global_object); static void ReattachGlobal(Handle<Context> env, Handle<Object> global_object);
// Traverses the pointers for memory management. // Traverses the pointers for memory management.
void Iterate(ObjectVisitor* v); static void Iterate(ObjectVisitor* v);
// Accessor for the native scripts source code. // Accessor for the native scripts source code.
Handle<String> NativesSourceLookup(int index); static Handle<String> NativesSourceLookup(int index);
// Tells whether bootstrapping is active. // Tells whether bootstrapping is active.
bool IsActive() const { return nesting_ != 0; } static bool IsActive() { return BootstrapperActive::IsActive(); }
// Support for thread preemption. // Support for thread preemption.
static int ArchiveSpacePerThread(); static int ArchiveSpacePerThread();
char* ArchiveState(char* to); static char* ArchiveState(char* to);
char* RestoreState(char* from); static char* RestoreState(char* from);
void FreeThreadResources(); static void FreeThreadResources();
// This will allocate a char array that is deleted when V8 is shut down. // This will allocate a char array that is deleted when V8 is shut down.
// It should only be used for strictly finite allocations. // It should only be used for strictly finite allocations.
char* AllocateAutoDeletedArray(int bytes); static char* AllocateAutoDeletedArray(int bytes);
// Used for new context creation. // Used for new context creation.
bool InstallExtensions(Handle<Context> global_context, static bool InstallExtensions(Handle<Context> global_context,
v8::ExtensionConfiguration* extensions); v8::ExtensionConfiguration* extensions);
SourceCodeCache* extensions_cache() { return &extensions_cache_; }
private:
typedef int NestingCounterType;
NestingCounterType nesting_;
SourceCodeCache extensions_cache_;
// This is for delete, not delete[].
List<char*>* delete_these_non_arrays_on_tear_down_;
// This is for delete[]
List<char*>* delete_these_arrays_on_tear_down_;
friend class BootstrapperActive;
friend class Isolate;
friend class NativesExternalStringResource;
Bootstrapper();
DISALLOW_COPY_AND_ASSIGN(Bootstrapper);
};
class BootstrapperActive BASE_EMBEDDED {
public:
BootstrapperActive() {
++Isolate::Current()->bootstrapper()->nesting_;
}
~BootstrapperActive() {
--Isolate::Current()->bootstrapper()->nesting_;
}
private:
DISALLOW_COPY_AND_ASSIGN(BootstrapperActive);
}; };
class NativesExternalStringResource class NativesExternalStringResource
: public v8::String::ExternalAsciiStringResource { : public v8::String::ExternalAsciiStringResource {
public: public:
NativesExternalStringResource(Bootstrapper* bootstrapper, explicit NativesExternalStringResource(const char* source);
const char* source,
size_t length);
const char* data() const { const char* data() const {
return data_; return data_;

536
deps/v8/src/builtins.cc

File diff suppressed because it is too large

78
deps/v8/src/builtins.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -58,9 +58,8 @@ enum BuiltinExtraArguments {
V(FastHandleApiCall, NO_EXTRA_ARGUMENTS) \ V(FastHandleApiCall, NO_EXTRA_ARGUMENTS) \
V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \ V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \
V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \ V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \
V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS) \ V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS)
\
V(StrictModePoisonPill, NO_EXTRA_ARGUMENTS)
// Define list of builtins implemented in assembly. // Define list of builtins implemented in assembly.
#define BUILTIN_LIST_A(V) \ #define BUILTIN_LIST_A(V) \
@ -93,18 +92,11 @@ enum BuiltinExtraArguments {
Code::kNoExtraICState) \ Code::kNoExtraICState) \
V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \ V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \ Code::kNoExtraICState) \
V(KeyedLoadIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedLoadIC_Slow, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \ V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \ Code::kNoExtraICState) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \ V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \ Code::kNoExtraICState) \
V(KeyedStoreIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \ \
Code::kNoExtraICState) \
V(KeyedStoreIC_Slow, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \ V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \
Code::kNoExtraICState) \ Code::kNoExtraICState) \
V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \ V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
@ -131,8 +123,6 @@ enum BuiltinExtraArguments {
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \ V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \ Code::kNoExtraICState) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \ V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \ Code::kNoExtraICState) \
\ \
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \ V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
@ -165,8 +155,6 @@ enum BuiltinExtraArguments {
kStrictMode) \ kStrictMode) \
V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \ V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
kStrictMode) \ kStrictMode) \
V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
\ \
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \ /* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED, \ V(FunctionCall, BUILTIN, UNINITIALIZED, \
@ -247,28 +235,25 @@ enum BuiltinExtraArguments {
V(APPLY_OVERFLOW, 1) V(APPLY_OVERFLOW, 1)
class BuiltinFunctionTable;
class ObjectVisitor; class ObjectVisitor;
class Builtins { class Builtins : public AllStatic {
public: public:
~Builtins();
// Generate all builtin code objects. Should be called once during // Generate all builtin code objects. Should be called once during
// isolate initialization. // VM initialization.
void Setup(bool create_heap_objects); static void Setup(bool create_heap_objects);
void TearDown(); static void TearDown();
// Garbage collection support. // Garbage collection support.
void IterateBuiltins(ObjectVisitor* v); static void IterateBuiltins(ObjectVisitor* v);
// Disassembler support. // Disassembler support.
const char* Lookup(byte* pc); static const char* Lookup(byte* pc);
enum Name { enum Name {
#define DEF_ENUM_C(name, ignore) k##name, #define DEF_ENUM_C(name, ignore) name,
#define DEF_ENUM_A(name, kind, state, extra) k##name, #define DEF_ENUM_A(name, kind, state, extra) name,
BUILTIN_LIST_C(DEF_ENUM_C) BUILTIN_LIST_C(DEF_ENUM_C)
BUILTIN_LIST_A(DEF_ENUM_A) BUILTIN_LIST_A(DEF_ENUM_A)
BUILTIN_LIST_DEBUG_A(DEF_ENUM_A) BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
@ -291,22 +276,13 @@ class Builtins {
id_count id_count
}; };
#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name(); static Code* builtin(Name name) {
#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
Handle<Code> name();
BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
BUILTIN_LIST_DEBUG_A(DECLARE_BUILTIN_ACCESSOR_A)
#undef DECLARE_BUILTIN_ACCESSOR_C
#undef DECLARE_BUILTIN_ACCESSOR_A
Code* builtin(Name name) {
// Code::cast cannot be used here since we access builtins // Code::cast cannot be used here since we access builtins
// during the marking phase of mark sweep. See IC::Clear. // during the marking phase of mark sweep. See IC::Clear.
return reinterpret_cast<Code*>(builtins_[name]); return reinterpret_cast<Code*>(builtins_[name]);
} }
Address builtin_address(Name name) { static Address builtin_address(Name name) {
return reinterpret_cast<Address>(&builtins_[name]); return reinterpret_cast<Address>(&builtins_[name]);
} }
@ -316,24 +292,20 @@ class Builtins {
static const char* GetName(JavaScript id) { return javascript_names_[id]; } static const char* GetName(JavaScript id) { return javascript_names_[id]; }
static int GetArgumentsCount(JavaScript id) { return javascript_argc_[id]; } static int GetArgumentsCount(JavaScript id) { return javascript_argc_[id]; }
Handle<Code> GetCode(JavaScript id, bool* resolved); static Handle<Code> GetCode(JavaScript id, bool* resolved);
static int NumberOfJavaScriptBuiltins() { return id_count; } static int NumberOfJavaScriptBuiltins() { return id_count; }
bool is_initialized() const { return initialized_; }
private: private:
Builtins();
// The external C++ functions called from the code. // The external C++ functions called from the code.
static Address const c_functions_[cfunction_count]; static Address c_functions_[cfunction_count];
// Note: These are always Code objects, but to conform with // Note: These are always Code objects, but to conform with
// IterateBuiltins() above which assumes Object**'s for the callback // IterateBuiltins() above which assumes Object**'s for the callback
// function f, we use an Object* array here. // function f, we use an Object* array here.
Object* builtins_[builtin_count]; static Object* builtins_[builtin_count];
const char* names_[builtin_count]; static const char* names_[builtin_count];
static const char* const javascript_names_[id_count]; static const char* javascript_names_[id_count];
static int const javascript_argc_[id_count]; static int javascript_argc_[id_count];
static void Generate_Adaptor(MacroAssembler* masm, static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id, CFunctionId id,
@ -358,16 +330,8 @@ class Builtins {
static void Generate_ArrayConstructCode(MacroAssembler* masm); static void Generate_ArrayConstructCode(MacroAssembler* masm);
static void Generate_StringConstructCode(MacroAssembler* masm); static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
static void InitBuiltinFunctionTable(); static void Generate_OnStackReplacement(MacroAssembler* masm);
bool initialized_;
friend class BuiltinFunctionTable;
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(Builtins);
}; };
} } // namespace v8::internal } } // namespace v8::internal

4
deps/v8/src/char-predicates.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -28,8 +28,6 @@
#ifndef V8_CHAR_PREDICATES_H_ #ifndef V8_CHAR_PREDICATES_H_
#define V8_CHAR_PREDICATES_H_ #define V8_CHAR_PREDICATES_H_
#include "unicode.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {

4
deps/v8/src/checks.cc

@ -30,8 +30,8 @@
#include "v8.h" #include "v8.h"
#include "platform.h" #include "platform.h"
#include "top.h"
// TODO(isolates): is it necessary to lift this?
static int fatal_error_handler_nesting_depth = 0; static int fatal_error_handler_nesting_depth = 0;
// Contains protection against recursive calls (faults while handling faults). // Contains protection against recursive calls (faults while handling faults).
@ -52,7 +52,7 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
if (fatal_error_handler_nesting_depth < 3) { if (fatal_error_handler_nesting_depth < 3) {
if (i::FLAG_stack_trace_on_abort) { if (i::FLAG_stack_trace_on_abort) {
// Call this one twice on double fault // Call this one twice on double fault
i::Isolate::Current()->PrintStack(); i::Top::PrintStack();
} }
} }
i::OS::Abort(); i::OS::Abort();

4
deps/v8/src/checks.h

@ -271,8 +271,6 @@ bool EnableSlowAsserts();
#define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2) #define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2)
#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2) #define ASSERT_NE(v1, v2) CHECK_NE(v1, v2)
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2) #define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
#define SLOW_ASSERT(condition) if (EnableSlowAsserts()) CHECK(condition) #define SLOW_ASSERT(condition) if (EnableSlowAsserts()) CHECK(condition)
#else #else
#define ASSERT_RESULT(expr) (expr) #define ASSERT_RESULT(expr) (expr)
@ -280,8 +278,6 @@ bool EnableSlowAsserts();
#define ASSERT_EQ(v1, v2) ((void) 0) #define ASSERT_EQ(v1, v2) ((void) 0)
#define ASSERT_NE(v1, v2) ((void) 0) #define ASSERT_NE(v1, v2) ((void) 0)
#define ASSERT_GE(v1, v2) ((void) 0) #define ASSERT_GE(v1, v2) ((void) 0)
#define ASSERT_LT(v1, v2) ((void) 0)
#define ASSERT_LE(v1, v2) ((void) 0)
#define SLOW_ASSERT(condition) ((void) 0) #define SLOW_ASSERT(condition) ((void) 0)
#endif #endif
// Static asserts has no impact on runtime performance, so they can be // Static asserts has no impact on runtime performance, so they can be

73
deps/v8/src/code-stubs.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -29,7 +29,6 @@
#include "bootstrapper.h" #include "bootstrapper.h"
#include "code-stubs.h" #include "code-stubs.h"
#include "stub-cache.h"
#include "factory.h" #include "factory.h"
#include "gdb-jit.h" #include "gdb-jit.h"
#include "macro-assembler.h" #include "macro-assembler.h"
@ -38,10 +37,9 @@ namespace v8 {
namespace internal { namespace internal {
bool CodeStub::FindCodeInCache(Code** code_out) { bool CodeStub::FindCodeInCache(Code** code_out) {
Heap* heap = Isolate::Current()->heap(); int index = Heap::code_stubs()->FindEntry(GetKey());
int index = heap->code_stubs()->FindEntry(GetKey());
if (index != NumberDictionary::kNotFound) { if (index != NumberDictionary::kNotFound) {
*code_out = Code::cast(heap->code_stubs()->ValueAt(index)); *code_out = Code::cast(Heap::code_stubs()->ValueAt(index));
return true; return true;
} }
return false; return false;
@ -50,7 +48,7 @@ bool CodeStub::FindCodeInCache(Code** code_out) {
void CodeStub::GenerateCode(MacroAssembler* masm) { void CodeStub::GenerateCode(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated. // Update the static counter each time a new code stub is generated.
masm->isolate()->counters()->code_stubs()->Increment(); Counters::code_stubs.Increment();
// Nested stubs are not allowed for leafs. // Nested stubs are not allowed for leafs.
AllowStubCallsScope allow_scope(masm, AllowsStubCalls()); AllowStubCallsScope allow_scope(masm, AllowsStubCalls());
@ -64,11 +62,9 @@ void CodeStub::GenerateCode(MacroAssembler* masm) {
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) { void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->set_major_key(MajorKey()); code->set_major_key(MajorKey());
Isolate* isolate = masm->isolate(); PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
GDBJIT(AddCode(GDBJITInterface::STUB, GetName(), code)); GDBJIT(AddCode(GDBJITInterface::STUB, GetName(), code));
Counters* counters = isolate->counters(); Counters::total_stubs_code_size.Increment(code->instruction_size());
counters->total_stubs_code_size()->Increment(code->instruction_size());
#ifdef ENABLE_DISASSEMBLER #ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) { if (FLAG_print_code_stubs) {
@ -88,15 +84,12 @@ int CodeStub::GetCodeKind() {
Handle<Code> CodeStub::GetCode() { Handle<Code> CodeStub::GetCode() {
Isolate* isolate = Isolate::Current();
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
Code* code; Code* code;
if (!FindCodeInCache(&code)) { if (!FindCodeInCache(&code)) {
HandleScope scope(isolate); v8::HandleScope scope;
// Generate the new code. // Generate the new code.
MacroAssembler masm(isolate, NULL, 256); MacroAssembler masm(NULL, 256);
GenerateCode(&masm); GenerateCode(&masm);
// Create the code object. // Create the code object.
@ -108,24 +101,22 @@ Handle<Code> CodeStub::GetCode() {
static_cast<Code::Kind>(GetCodeKind()), static_cast<Code::Kind>(GetCodeKind()),
InLoop(), InLoop(),
GetICState()); GetICState());
Handle<Code> new_object = factory->NewCode( Handle<Code> new_object = Factory::NewCode(desc, flags, masm.CodeObject());
desc, flags, masm.CodeObject(), NeedsImmovableCode());
RecordCodeGeneration(*new_object, &masm); RecordCodeGeneration(*new_object, &masm);
FinishCode(*new_object); FinishCode(*new_object);
// Update the dictionary and the root in Heap. // Update the dictionary and the root in Heap.
Handle<NumberDictionary> dict = Handle<NumberDictionary> dict =
factory->DictionaryAtNumberPut( Factory::DictionaryAtNumberPut(
Handle<NumberDictionary>(heap->code_stubs()), Handle<NumberDictionary>(Heap::code_stubs()),
GetKey(), GetKey(),
new_object); new_object);
heap->public_set_code_stubs(*dict); Heap::public_set_code_stubs(*dict);
code = *new_object; code = *new_object;
} }
ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code)); return Handle<Code>(code);
return Handle<Code>(code, isolate);
} }
@ -133,9 +124,8 @@ MaybeObject* CodeStub::TryGetCode() {
Code* code; Code* code;
if (!FindCodeInCache(&code)) { if (!FindCodeInCache(&code)) {
// Generate the new code. // Generate the new code.
MacroAssembler masm(Isolate::Current(), NULL, 256); MacroAssembler masm(NULL, 256);
GenerateCode(&masm); GenerateCode(&masm);
Heap* heap = masm.isolate()->heap();
// Create the code object. // Create the code object.
CodeDesc desc; CodeDesc desc;
@ -148,7 +138,7 @@ MaybeObject* CodeStub::TryGetCode() {
GetICState()); GetICState());
Object* new_object; Object* new_object;
{ MaybeObject* maybe_new_object = { MaybeObject* maybe_new_object =
heap->CreateCode(desc, flags, masm.CodeObject()); Heap::CreateCode(desc, flags, masm.CodeObject());
if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object; if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
} }
code = Code::cast(new_object); code = Code::cast(new_object);
@ -157,9 +147,9 @@ MaybeObject* CodeStub::TryGetCode() {
// Try to update the code cache but do not fail if unable. // Try to update the code cache but do not fail if unable.
MaybeObject* maybe_new_object = MaybeObject* maybe_new_object =
heap->code_stubs()->AtNumberPut(GetKey(), code); Heap::code_stubs()->AtNumberPut(GetKey(), code);
if (maybe_new_object->ToObject(&new_object)) { if (maybe_new_object->ToObject(&new_object)) {
heap->public_set_code_stubs(NumberDictionary::cast(new_object)); Heap::public_set_code_stubs(NumberDictionary::cast(new_object));
} }
} }
@ -198,12 +188,6 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
case CompareIC::HEAP_NUMBERS: case CompareIC::HEAP_NUMBERS:
GenerateHeapNumbers(masm); GenerateHeapNumbers(masm);
break; break;
case CompareIC::STRINGS:
GenerateStrings(masm);
break;
case CompareIC::SYMBOLS:
GenerateSymbols(masm);
break;
case CompareIC::OBJECTS: case CompareIC::OBJECTS:
GenerateObjects(masm); GenerateObjects(masm);
break; break;
@ -216,8 +200,7 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
const char* InstanceofStub::GetName() { const char* InstanceofStub::GetName() {
if (name_ != NULL) return name_; if (name_ != NULL) return name_;
const int kMaxNameLength = 100; const int kMaxNameLength = 100;
name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
kMaxNameLength);
if (name_ == NULL) return "OOM"; if (name_ == NULL) return "OOM";
const char* args = ""; const char* args = "";
@ -244,24 +227,4 @@ const char* InstanceofStub::GetName() {
} }
void KeyedLoadFastElementStub::Generate(MacroAssembler* masm) {
KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
}
void KeyedStoreFastElementStub::Generate(MacroAssembler* masm) {
KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
}
void KeyedLoadExternalArrayStub::Generate(MacroAssembler* masm) {
KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
}
void KeyedStoreExternalArrayStub::Generate(MacroAssembler* masm) {
KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, elements_kind_);
}
} } // namespace v8::internal } } // namespace v8::internal

224
deps/v8/src/code-stubs.h

@ -28,38 +28,35 @@
#ifndef V8_CODE_STUBS_H_ #ifndef V8_CODE_STUBS_H_
#define V8_CODE_STUBS_H_ #define V8_CODE_STUBS_H_
#include "allocation.h"
#include "globals.h" #include "globals.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// List of code stubs used on all platforms. // List of code stubs used on all platforms. The order in this list is important
// as only the stubs up to and including Instanceof allows nested stub calls.
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \ #define CODE_STUB_LIST_ALL_PLATFORMS(V) \
V(CallFunction) \ V(CallFunction) \
V(UnaryOp) \ V(GenericBinaryOp) \
V(BinaryOp) \ V(TypeRecordingBinaryOp) \
V(StringAdd) \ V(StringAdd) \
V(StringCharAt) \
V(SubString) \ V(SubString) \
V(StringCompare) \ V(StringCompare) \
V(SmiOp) \
V(Compare) \ V(Compare) \
V(CompareIC) \ V(CompareIC) \
V(MathPow) \ V(MathPow) \
V(TranscendentalCache) \ V(TranscendentalCache) \
V(Instanceof) \ V(Instanceof) \
/* All stubs above this line only exist in a few versions, which are */ \
/* generated ahead of time. Therefore compiling a call to one of */ \
/* them can't cause a new stub to be compiled, so compiling a call to */ \
/* them is GC safe. The ones below this line exist in many variants */ \
/* so code compiling a call to one can cause a GC. This means they */ \
/* can't be called from other stubs, since stub generation code is */ \
/* not GC safe. */ \
V(ConvertToDouble) \ V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \ V(WriteInt32ToHeapNumber) \
V(IntegerMod) \
V(StackCheck) \ V(StackCheck) \
V(FastNewClosure) \ V(FastNewClosure) \
V(FastNewContext) \ V(FastNewContext) \
V(FastCloneShallowArray) \ V(FastCloneShallowArray) \
V(GenericUnaryOp) \
V(RevertToNumber) \ V(RevertToNumber) \
V(ToBoolean) \ V(ToBoolean) \
V(ToNumber) \ V(ToNumber) \
@ -70,12 +67,7 @@ namespace internal {
V(NumberToString) \ V(NumberToString) \
V(CEntry) \ V(CEntry) \
V(JSEntry) \ V(JSEntry) \
V(KeyedLoadFastElement) \ V(DebuggerStatement)
V(KeyedStoreFastElement) \
V(KeyedLoadExternalArray) \
V(KeyedStoreExternalArray) \
V(DebuggerStatement) \
V(StringDictionaryNegativeLookup)
// List of code stubs only used on ARM platforms. // List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM #ifdef V8_TARGET_ARCH_ARM
@ -89,20 +81,10 @@ namespace internal {
#define CODE_STUB_LIST_ARM(V) #define CODE_STUB_LIST_ARM(V)
#endif #endif
// List of code stubs only used on MIPS platforms.
#ifdef V8_TARGET_ARCH_MIPS
#define CODE_STUB_LIST_MIPS(V) \
V(RegExpCEntry) \
V(DirectCEntry)
#else
#define CODE_STUB_LIST_MIPS(V)
#endif
// Combined list of code stubs. // Combined list of code stubs.
#define CODE_STUB_LIST(V) \ #define CODE_STUB_LIST(V) \
CODE_STUB_LIST_ALL_PLATFORMS(V) \ CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V) \ CODE_STUB_LIST_ARM(V)
CODE_STUB_LIST_MIPS(V)
// Mode to overwrite BinaryExpression values. // Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT }; enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
@ -174,10 +156,10 @@ class CodeStub BASE_EMBEDDED {
// lazily generated function should be fully optimized or not. // lazily generated function should be fully optimized or not.
virtual InLoopFlag InLoop() { return NOT_IN_LOOP; } virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
// BinaryOpStub needs to override this. // GenericBinaryOpStub needs to override this.
virtual int GetCodeKind(); virtual int GetCodeKind();
// BinaryOpStub needs to override this. // GenericBinaryOpStub needs to override this.
virtual InlineCacheState GetICState() { virtual InlineCacheState GetICState() {
return UNINITIALIZED; return UNINITIALIZED;
} }
@ -185,10 +167,6 @@ class CodeStub BASE_EMBEDDED {
// Returns a name for logging/debugging purposes. // Returns a name for logging/debugging purposes.
virtual const char* GetName() { return MajorName(MajorKey(), false); } virtual const char* GetName() { return MajorName(MajorKey(), false); }
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
virtual bool NeedsImmovableCode() { return false; }
#ifdef DEBUG #ifdef DEBUG
virtual void Print() { PrintF("%s\n", GetName()); } virtual void Print() { PrintF("%s\n", GetName()); }
#endif #endif
@ -200,7 +178,6 @@ class CodeStub BASE_EMBEDDED {
MajorKeyBits::encode(MajorKey()); MajorKeyBits::encode(MajorKey());
} }
// See comment above, where Instanceof is defined.
bool AllowsStubCalls() { return MajorKey() <= Instanceof; } bool AllowsStubCalls() { return MajorKey() <= Instanceof; }
class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {}; class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
@ -274,6 +251,7 @@ class StackCheckStub : public CodeStub {
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
private: private:
const char* GetName() { return "StackCheckStub"; } const char* GetName() { return "StackCheckStub"; }
Major MajorKey() { return StackCheck; } Major MajorKey() { return StackCheck; }
@ -296,17 +274,12 @@ class ToNumberStub: public CodeStub {
class FastNewClosureStub : public CodeStub { class FastNewClosureStub : public CodeStub {
public: public:
explicit FastNewClosureStub(StrictModeFlag strict_mode)
: strict_mode_(strict_mode) { }
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
private: private:
const char* GetName() { return "FastNewClosureStub"; } const char* GetName() { return "FastNewClosureStub"; }
Major MajorKey() { return FastNewClosure; } Major MajorKey() { return FastNewClosure; }
int MinorKey() { return strict_mode_; } int MinorKey() { return 0; }
StrictModeFlag strict_mode_;
}; };
@ -400,6 +373,54 @@ class InstanceofStub: public CodeStub {
}; };
enum NegativeZeroHandling {
kStrictNegativeZero,
kIgnoreNegativeZero
};
enum UnaryOpFlags {
NO_UNARY_FLAGS = 0,
NO_UNARY_SMI_CODE_IN_STUB = 1 << 0
};
class GenericUnaryOpStub : public CodeStub {
public:
GenericUnaryOpStub(Token::Value op,
UnaryOverwriteMode overwrite,
UnaryOpFlags flags,
NegativeZeroHandling negative_zero = kStrictNegativeZero)
: op_(op),
overwrite_(overwrite),
include_smi_code_((flags & NO_UNARY_SMI_CODE_IN_STUB) == 0),
negative_zero_(negative_zero) { }
private:
Token::Value op_;
UnaryOverwriteMode overwrite_;
bool include_smi_code_;
NegativeZeroHandling negative_zero_;
class OverwriteField: public BitField<UnaryOverwriteMode, 0, 1> {};
class IncludeSmiCodeField: public BitField<bool, 1, 1> {};
class NegativeZeroField: public BitField<NegativeZeroHandling, 2, 1> {};
class OpField: public BitField<Token::Value, 3, kMinorBits - 3> {};
Major MajorKey() { return GenericUnaryOp; }
int MinorKey() {
return OpField::encode(op_) |
OverwriteField::encode(overwrite_) |
IncludeSmiCodeField::encode(include_smi_code_) |
NegativeZeroField::encode(negative_zero_);
}
void Generate(MacroAssembler* masm);
const char* GetName();
};
class MathPowStub: public CodeStub { class MathPowStub: public CodeStub {
public: public:
MathPowStub() {} MathPowStub() {}
@ -413,6 +434,18 @@ class MathPowStub: public CodeStub {
}; };
class StringCharAtStub: public CodeStub {
public:
StringCharAtStub() {}
private:
Major MajorKey() { return StringCharAt; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class ICCompareStub: public CodeStub { class ICCompareStub: public CodeStub {
public: public:
ICCompareStub(Token::Value op, CompareIC::State state) ICCompareStub(Token::Value op, CompareIC::State state)
@ -435,8 +468,6 @@ class ICCompareStub: public CodeStub {
void GenerateSmis(MacroAssembler* masm); void GenerateSmis(MacroAssembler* masm);
void GenerateHeapNumbers(MacroAssembler* masm); void GenerateHeapNumbers(MacroAssembler* masm);
void GenerateSymbols(MacroAssembler* masm);
void GenerateStrings(MacroAssembler* masm);
void GenerateObjects(MacroAssembler* masm); void GenerateObjects(MacroAssembler* masm);
void GenerateMiss(MacroAssembler* masm); void GenerateMiss(MacroAssembler* masm);
@ -592,8 +623,6 @@ class CEntryStub : public CodeStub {
Major MajorKey() { return CEntry; } Major MajorKey() { return CEntry; }
int MinorKey(); int MinorKey();
bool NeedsImmovableCode();
const char* GetName() { return "CEntryStub"; } const char* GetName() { return "CEntryStub"; }
}; };
@ -632,9 +661,7 @@ class ArgumentsAccessStub: public CodeStub {
public: public:
enum Type { enum Type {
READ_ELEMENT, READ_ELEMENT,
NEW_NON_STRICT_FAST, NEW_OBJECT
NEW_NON_STRICT_SLOW,
NEW_STRICT
}; };
explicit ArgumentsAccessStub(Type type) : type_(type) { } explicit ArgumentsAccessStub(Type type) : type_(type) { }
@ -647,9 +674,7 @@ class ArgumentsAccessStub: public CodeStub {
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
void GenerateReadElement(MacroAssembler* masm); void GenerateReadElement(MacroAssembler* masm);
void GenerateNewStrict(MacroAssembler* masm); void GenerateNewObject(MacroAssembler* masm);
void GenerateNewNonStrictFast(MacroAssembler* masm);
void GenerateNewNonStrictSlow(MacroAssembler* masm);
const char* GetName() { return "ArgumentsAccessStub"; } const char* GetName() { return "ArgumentsAccessStub"; }
@ -740,9 +765,8 @@ class CallFunctionStub: public CodeStub {
} }
InLoopFlag InLoop() { return in_loop_; } InLoopFlag InLoop() { return in_loop_; }
bool ReceiverMightBeValue() {
bool ReceiverMightBeImplicit() { return (flags_ & RECEIVER_MIGHT_BE_VALUE) != 0;
return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0;
} }
}; };
@ -921,98 +945,6 @@ class AllowStubCallsScope {
DISALLOW_COPY_AND_ASSIGN(AllowStubCallsScope); DISALLOW_COPY_AND_ASSIGN(AllowStubCallsScope);
}; };
#ifdef DEBUG
#define DECLARE_ARRAY_STUB_PRINT(name) void Print() { PrintF(#name); }
#else
#define DECLARE_ARRAY_STUB_PRINT(name)
#endif
class KeyedLoadFastElementStub : public CodeStub {
public:
explicit KeyedLoadFastElementStub() {
}
Major MajorKey() { return KeyedLoadFastElement; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "KeyedLoadFastElementStub"; }
DECLARE_ARRAY_STUB_PRINT(KeyedLoadFastElementStub)
};
class KeyedStoreFastElementStub : public CodeStub {
public:
explicit KeyedStoreFastElementStub(bool is_js_array)
: is_js_array_(is_js_array) { }
Major MajorKey() { return KeyedStoreFastElement; }
int MinorKey() { return is_js_array_ ? 1 : 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "KeyedStoreFastElementStub"; }
DECLARE_ARRAY_STUB_PRINT(KeyedStoreFastElementStub)
private:
bool is_js_array_;
};
class KeyedLoadExternalArrayStub : public CodeStub {
public:
explicit KeyedLoadExternalArrayStub(JSObject::ElementsKind elements_kind)
: elements_kind_(elements_kind) { }
Major MajorKey() { return KeyedLoadExternalArray; }
int MinorKey() { return elements_kind_; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "KeyedLoadExternalArrayStub"; }
DECLARE_ARRAY_STUB_PRINT(KeyedLoadExternalArrayStub)
protected:
JSObject::ElementsKind elements_kind_;
};
class KeyedStoreExternalArrayStub : public CodeStub {
public:
explicit KeyedStoreExternalArrayStub(JSObject::ElementsKind elements_kind)
: elements_kind_(elements_kind) { }
Major MajorKey() { return KeyedStoreExternalArray; }
int MinorKey() { return elements_kind_; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "KeyedStoreExternalArrayStub"; }
DECLARE_ARRAY_STUB_PRINT(KeyedStoreExternalArrayStub)
protected:
JSObject::ElementsKind elements_kind_;
};
class ToBooleanStub: public CodeStub {
public:
explicit ToBooleanStub(Register tos) : tos_(tos) { }
void Generate(MacroAssembler* masm);
private:
Register tos_;
Major MajorKey() { return ToBoolean; }
int MinorKey() { return tos_.code(); }
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_CODE_STUBS_H_ #endif // V8_CODE_STUBS_H_

2
deps/v8/src/code.h

@ -28,8 +28,6 @@
#ifndef V8_CODE_H_ #ifndef V8_CODE_H_
#define V8_CODE_H_ #define V8_CODE_H_
#include "allocation.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {

64
deps/v8/src/codegen-inl.h

@ -0,0 +1,64 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_CODEGEN_INL_H_
#define V8_CODEGEN_INL_H_
#include "codegen.h"
#include "compiler.h"
#include "register-allocator-inl.h"
#if V8_TARGET_ARCH_IA32
#include "ia32/codegen-ia32-inl.h"
#elif V8_TARGET_ARCH_X64
#include "x64/codegen-x64-inl.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/codegen-mips-inl.h"
#else
#error Unsupported target architecture.
#endif
namespace v8 {
namespace internal {
Handle<Script> CodeGenerator::script() { return info_->script(); }
bool CodeGenerator::is_eval() { return info_->is_eval(); }
Scope* CodeGenerator::scope() { return info_->function()->scope(); }
StrictModeFlag CodeGenerator::strict_mode_flag() {
return info_->function()->strict_mode() ? kStrictMode : kNonStrictMode;
}
} } // namespace v8::internal
#endif // V8_CODEGEN_INL_H_

315
deps/v8/src/codegen.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -28,13 +28,16 @@
#include "v8.h" #include "v8.h"
#include "bootstrapper.h" #include "bootstrapper.h"
#include "codegen.h" #include "codegen-inl.h"
#include "compiler.h" #include "compiler.h"
#include "debug.h" #include "debug.h"
#include "prettyprinter.h" #include "prettyprinter.h"
#include "register-allocator-inl.h"
#include "rewriter.h" #include "rewriter.h"
#include "runtime.h" #include "runtime.h"
#include "scopeinfo.h"
#include "stub-cache.h" #include "stub-cache.h"
#include "virtual-frame-inl.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -58,6 +61,67 @@ Comment::~Comment() {
#undef __ #undef __
CodeGenerator* CodeGeneratorScope::top_ = NULL;
void CodeGenerator::ProcessDeferred() {
while (!deferred_.is_empty()) {
DeferredCode* code = deferred_.RemoveLast();
ASSERT(masm_ == code->masm());
// Record position of deferred code stub.
masm_->positions_recorder()->RecordStatementPosition(
code->statement_position());
if (code->position() != RelocInfo::kNoPosition) {
masm_->positions_recorder()->RecordPosition(code->position());
}
// Generate the code.
Comment cmnt(masm_, code->comment());
masm_->bind(code->entry_label());
if (code->AutoSaveAndRestore()) {
code->SaveRegisters();
}
code->Generate();
if (code->AutoSaveAndRestore()) {
code->RestoreRegisters();
code->Exit();
}
}
}
void DeferredCode::Exit() {
masm_->jmp(exit_label());
}
void CodeGenerator::SetFrame(VirtualFrame* new_frame,
RegisterFile* non_frame_registers) {
RegisterFile saved_counts;
if (has_valid_frame()) {
frame_->DetachFromCodeGenerator();
// The remaining register reference counts are the non-frame ones.
allocator_->SaveTo(&saved_counts);
}
if (new_frame != NULL) {
// Restore the non-frame register references that go with the new frame.
allocator_->RestoreFrom(non_frame_registers);
new_frame->AttachToCodeGenerator();
}
frame_ = new_frame;
saved_counts.CopyTo(non_frame_registers);
}
void CodeGenerator::DeleteFrame() {
if (has_valid_frame()) {
frame_->DetachFromCodeGenerator();
frame_ = NULL;
}
}
void CodeGenerator::MakeCodePrologue(CompilationInfo* info) { void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
#ifdef DEBUG #ifdef DEBUG
bool print_source = false; bool print_source = false;
@ -65,7 +129,7 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
bool print_json_ast = false; bool print_json_ast = false;
const char* ftype; const char* ftype;
if (Isolate::Current()->bootstrapper()->IsActive()) { if (Bootstrapper::IsActive()) {
print_source = FLAG_print_builtin_source; print_source = FLAG_print_builtin_source;
print_ast = FLAG_print_builtin_ast; print_ast = FLAG_print_builtin_ast;
print_json_ast = FLAG_print_builtin_json_ast; print_json_ast = FLAG_print_builtin_json_ast;
@ -114,17 +178,13 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm, Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
Code::Flags flags, Code::Flags flags,
CompilationInfo* info) { CompilationInfo* info) {
Isolate* isolate = info->isolate();
// Allocate and install the code. // Allocate and install the code.
CodeDesc desc; CodeDesc desc;
masm->GetCode(&desc); masm->GetCode(&desc);
Handle<Code> code = Handle<Code> code = Factory::NewCode(desc, flags, masm->CodeObject());
isolate->factory()->NewCode(desc, flags, masm->CodeObject());
if (!code.is_null()) { if (!code.is_null()) {
isolate->counters()->total_compiled_code_size()->Increment( Counters::total_compiled_code_size.Increment(code->instruction_size());
code->instruction_size());
} }
return code; return code;
} }
@ -132,7 +192,7 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) { void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#ifdef ENABLE_DISASSEMBLER #ifdef ENABLE_DISASSEMBLER
bool print_code = Isolate::Current()->bootstrapper()->IsActive() bool print_code = Bootstrapper::IsActive()
? FLAG_print_builtin_code ? FLAG_print_builtin_code
: (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code)); : (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code));
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter); Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
@ -169,18 +229,62 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#endif // ENABLE_DISASSEMBLER #endif // ENABLE_DISASSEMBLER
} }
#ifdef ENABLE_LOGGING_AND_PROFILING
static Vector<const char> kRegexp = CStrVector("regexp"); // Generate the code. Compile the AST and assemble all the pieces into a
// Code object.
bool CodeGenerator::MakeCode(CompilationInfo* info) {
// When using Crankshaft the classic backend should never be used.
ASSERT(!V8::UseCrankshaft());
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
int len = String::cast(script->source())->length();
Counters::total_old_codegen_source_size.Increment(len);
}
if (FLAG_trace_codegen) {
PrintF("Classic Compiler - ");
}
MakeCodePrologue(info);
// Generate code.
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize);
#ifdef ENABLE_GDB_JIT_INTERFACE
masm.positions_recorder()->StartGDBJITLineInfoRecording();
#endif
CodeGenerator cgen(&masm);
CodeGeneratorScope scope(&cgen);
cgen.Generate(info);
if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception());
return false;
}
InLoopFlag in_loop = info->is_in_loop() ? IN_LOOP : NOT_IN_LOOP;
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
Handle<Code> code = MakeCodeEpilogue(cgen.masm(), flags, info);
// There is no stack check table in code generated by the classic backend.
code->SetNoStackCheckTable();
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // May be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit && !code.is_null()) {
GDBJITLineInfo* lineinfo =
masm.positions_recorder()->DetachGDBJITLineInfo();
GDBJIT(RegisterDetailedLineInfo(*code, lineinfo));
}
#endif
return !code.is_null();
}
#ifdef ENABLE_LOGGING_AND_PROFILING
bool CodeGenerator::ShouldGenerateLog(Expression* type) { bool CodeGenerator::ShouldGenerateLog(Expression* type) {
ASSERT(type != NULL); ASSERT(type != NULL);
Isolate* isolate = Isolate::Current(); if (!Logger::is_logging() && !CpuProfiler::is_profiling()) return false;
if (!isolate->logger()->is_logging() && !CpuProfiler::is_profiling(isolate)) {
return false;
}
Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle()); Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
if (FLAG_log_regexp) { if (FLAG_log_regexp) {
static Vector<const char> kRegexp = CStrVector("regexp");
if (name->IsEqualTo(kRegexp)) if (name->IsEqualTo(kRegexp))
return true; return true;
} }
@ -190,6 +294,120 @@ bool CodeGenerator::ShouldGenerateLog(Expression* type) {
#endif #endif
void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
int length = declarations->length();
int globals = 0;
for (int i = 0; i < length; i++) {
Declaration* node = declarations->at(i);
Variable* var = node->proxy()->var();
Slot* slot = var->AsSlot();
// If it was not possible to allocate the variable at compile
// time, we need to "declare" it at runtime to make sure it
// actually exists in the local context.
if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
VisitDeclaration(node);
} else {
// Count global variables and functions for later processing
globals++;
}
}
// Return in case of no declared global functions or variables.
if (globals == 0) return;
// Compute array of global variable and function declarations.
Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
for (int j = 0, i = 0; i < length; i++) {
Declaration* node = declarations->at(i);
Variable* var = node->proxy()->var();
Slot* slot = var->AsSlot();
if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
// Skip - already processed.
} else {
array->set(j++, *(var->name()));
if (node->fun() == NULL) {
if (var->mode() == Variable::CONST) {
// In case this is const property use the hole.
array->set_the_hole(j++);
} else {
array->set_undefined(j++);
}
} else {
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(node->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) {
SetStackOverflow();
return;
}
array->set(j++, *function);
}
}
}
// Invoke the platform-dependent code generator to do the actual
// declaration the global variables and functions.
DeclareGlobals(array);
}
void CodeGenerator::VisitIncrementOperation(IncrementOperation* expr) {
UNREACHABLE();
}
// Lookup table for code generators for special runtime calls which are
// generated inline.
#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
&CodeGenerator::Generate##Name,
const CodeGenerator::InlineFunctionGenerator
CodeGenerator::kInlineFunctionGenerators[] = {
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
};
#undef INLINE_FUNCTION_GENERATOR_ADDRESS
bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
ZoneList<Expression*>* args = node->arguments();
Handle<String> name = node->name();
Runtime::Function* function = node->function();
if (function != NULL && function->intrinsic_type == Runtime::INLINE) {
int lookup_index = static_cast<int>(function->function_id) -
static_cast<int>(Runtime::kFirstInlineFunction);
ASSERT(lookup_index >= 0);
ASSERT(static_cast<size_t>(lookup_index) <
ARRAY_SIZE(kInlineFunctionGenerators));
InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index];
(this->*generator)(args);
return true;
}
return false;
}
// Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
// known result for the test expression, with no side effects.
CodeGenerator::ConditionAnalysis CodeGenerator::AnalyzeCondition(
Expression* cond) {
if (cond == NULL) return ALWAYS_TRUE;
Literal* lit = cond->AsLiteral();
if (lit == NULL) return DONT_KNOW;
if (lit->IsTrue()) {
return ALWAYS_TRUE;
} else if (lit->IsFalse()) {
return ALWAYS_FALSE;
}
return DONT_KNOW;
}
bool CodeGenerator::RecordPositions(MacroAssembler* masm, bool CodeGenerator::RecordPositions(MacroAssembler* masm,
int pos, int pos,
bool right_here) { bool right_here) {
@ -204,20 +422,61 @@ bool CodeGenerator::RecordPositions(MacroAssembler* masm,
} }
void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
if (FLAG_debug_info) RecordPositions(masm(), fun->start_position(), false);
}
void CodeGenerator::CodeForReturnPosition(FunctionLiteral* fun) {
if (FLAG_debug_info) RecordPositions(masm(), fun->end_position() - 1, false);
}
void CodeGenerator::CodeForStatementPosition(Statement* stmt) {
if (FLAG_debug_info) RecordPositions(masm(), stmt->statement_pos(), false);
}
void CodeGenerator::CodeForDoWhileConditionPosition(DoWhileStatement* stmt) {
if (FLAG_debug_info)
RecordPositions(masm(), stmt->condition_position(), false);
}
void CodeGenerator::CodeForSourcePosition(int pos) {
if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
masm()->positions_recorder()->RecordPosition(pos);
}
}
const char* GenericUnaryOpStub::GetName() {
switch (op_) {
case Token::SUB:
if (negative_zero_ == kStrictNegativeZero) {
return overwrite_ == UNARY_OVERWRITE
? "GenericUnaryOpStub_SUB_Overwrite_Strict0"
: "GenericUnaryOpStub_SUB_Alloc_Strict0";
} else {
return overwrite_ == UNARY_OVERWRITE
? "GenericUnaryOpStub_SUB_Overwrite_Ignore0"
: "GenericUnaryOpStub_SUB_Alloc_Ignore0";
}
case Token::BIT_NOT:
return overwrite_ == UNARY_OVERWRITE
? "GenericUnaryOpStub_BIT_NOT_Overwrite"
: "GenericUnaryOpStub_BIT_NOT_Alloc";
default:
UNREACHABLE();
return "<unknown>";
}
}
void ArgumentsAccessStub::Generate(MacroAssembler* masm) { void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
switch (type_) { switch (type_) {
case READ_ELEMENT: case READ_ELEMENT: GenerateReadElement(masm); break;
GenerateReadElement(masm); case NEW_OBJECT: GenerateNewObject(masm); break;
break;
case NEW_NON_STRICT_FAST:
GenerateNewNonStrictFast(masm);
break;
case NEW_NON_STRICT_SLOW:
GenerateNewNonStrictSlow(masm);
break;
case NEW_STRICT:
GenerateNewStrict(masm);
break;
} }
} }

163
deps/v8/src/codegen.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -54,6 +54,7 @@
// shared code: // shared code:
// CodeGenerator // CodeGenerator
// ~CodeGenerator // ~CodeGenerator
// ProcessDeferred
// Generate // Generate
// ComputeLazyCompile // ComputeLazyCompile
// BuildFunctionInfo // BuildFunctionInfo
@ -67,6 +68,7 @@
// CodeForDoWhileConditionPosition // CodeForDoWhileConditionPosition
// CodeForSourcePosition // CodeForSourcePosition
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#if V8_TARGET_ARCH_IA32 #if V8_TARGET_ARCH_IA32
@ -81,4 +83,163 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif
#include "register-allocator.h"
namespace v8 {
namespace internal {
// Code generation can be nested. Code generation scopes form a stack
// of active code generators.
class CodeGeneratorScope BASE_EMBEDDED {
public:
explicit CodeGeneratorScope(CodeGenerator* cgen) {
previous_ = top_;
top_ = cgen;
}
~CodeGeneratorScope() {
top_ = previous_;
}
static CodeGenerator* Current() {
ASSERT(top_ != NULL);
return top_;
}
private:
static CodeGenerator* top_;
CodeGenerator* previous_;
};
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
// State of used registers in a virtual frame.
class FrameRegisterState {
public:
// Captures the current state of the given frame.
explicit FrameRegisterState(VirtualFrame* frame);
// Saves the state in the stack.
void Save(MacroAssembler* masm) const;
// Restores the state from the stack.
void Restore(MacroAssembler* masm) const;
private:
// Constants indicating special actions. They should not be multiples
// of kPointerSize so they will not collide with valid offsets from
// the frame pointer.
static const int kIgnore = -1;
static const int kPush = 1;
// This flag is ored with a valid offset from the frame pointer, so
// it should fit in the low zero bits of a valid offset.
static const int kSyncedFlag = 2;
int registers_[RegisterAllocator::kNumRegisters];
};
#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
class FrameRegisterState {
public:
inline FrameRegisterState(VirtualFrame frame) : frame_(frame) { }
inline const VirtualFrame* frame() const { return &frame_; }
private:
VirtualFrame frame_;
};
#else
#error Unsupported target architecture.
#endif
// RuntimeCallHelper implementation that saves/restores state of a
// virtual frame.
class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper {
public:
// Does not take ownership of |frame_state|.
explicit VirtualFrameRuntimeCallHelper(const FrameRegisterState* frame_state)
: frame_state_(frame_state) {}
virtual void BeforeCall(MacroAssembler* masm) const;
virtual void AfterCall(MacroAssembler* masm) const;
private:
const FrameRegisterState* frame_state_;
};
// Deferred code objects are small pieces of code that are compiled
// out of line. They are used to defer the compilation of uncommon
// paths thereby avoiding expensive jumps around uncommon code parts.
class DeferredCode: public ZoneObject {
public:
DeferredCode();
virtual ~DeferredCode() { }
virtual void Generate() = 0;
MacroAssembler* masm() { return masm_; }
int statement_position() const { return statement_position_; }
int position() const { return position_; }
Label* entry_label() { return &entry_label_; }
Label* exit_label() { return &exit_label_; }
#ifdef DEBUG
void set_comment(const char* comment) { comment_ = comment; }
const char* comment() const { return comment_; }
#else
void set_comment(const char* comment) { }
const char* comment() const { return ""; }
#endif
inline void Jump();
inline void Branch(Condition cc);
void BindExit() { masm_->bind(&exit_label_); }
const FrameRegisterState* frame_state() const { return &frame_state_; }
void SaveRegisters();
void RestoreRegisters();
void Exit();
// If this returns true then all registers will be saved for the duration
// of the Generate() call. Otherwise the registers are not saved and the
// Generate() call must bracket runtime any runtime calls with calls to
// SaveRegisters() and RestoreRegisters(). In this case the Generate
// method must also call Exit() in order to return to the non-deferred
// code.
virtual bool AutoSaveAndRestore() { return true; }
protected:
MacroAssembler* masm_;
private:
int statement_position_;
int position_;
Label entry_label_;
Label exit_label_;
FrameRegisterState frame_state_;
#ifdef DEBUG
const char* comment_;
#endif
DISALLOW_COPY_AND_ASSIGN(DeferredCode);
};
} } // namespace v8::internal
#endif // V8_CODEGEN_H_ #endif // V8_CODEGEN_H_

317
deps/v8/src/compilation-cache.cc

@ -33,6 +33,8 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// The number of sub caches covering the different types to cache.
static const int kSubCacheCount = 4;
// The number of generations for each sub cache. // The number of generations for each sub cache.
// The number of ScriptGenerations is carefully chosen based on histograms. // The number of ScriptGenerations is carefully chosen based on histograms.
@ -45,28 +47,162 @@ static const int kRegExpGenerations = 2;
// Initial size of each compilation cache table allocated. // Initial size of each compilation cache table allocated.
static const int kInitialCacheSize = 64; static const int kInitialCacheSize = 64;
// Index for the first generation in the cache.
static const int kFirstGeneration = 0;
CompilationCache::CompilationCache(Isolate* isolate) // The compilation cache consists of several generational sub-caches which uses
: isolate_(isolate), // this class as a base class. A sub-cache contains a compilation cache tables
script_(isolate, kScriptGenerations), // for each generation of the sub-cache. Since the same source code string has
eval_global_(isolate, kEvalGlobalGenerations), // different compiled code for scripts and evals, we use separate sub-caches
eval_contextual_(isolate, kEvalContextualGenerations), // for different compilation modes, to avoid retrieving the wrong result.
reg_exp_(isolate, kRegExpGenerations), class CompilationSubCache {
enabled_(true) { public:
CompilationSubCache* subcaches[kSubCacheCount] = explicit CompilationSubCache(int generations): generations_(generations) {
{&script_, &eval_global_, &eval_contextual_, &reg_exp_}; tables_ = NewArray<Object*>(generations);
for (int i = 0; i < kSubCacheCount; ++i) {
subcaches_[i] = subcaches[i];
} }
~CompilationSubCache() { DeleteArray(tables_); }
// Get the compilation cache tables for a specific generation.
Handle<CompilationCacheTable> GetTable(int generation);
// Accessors for first generation.
Handle<CompilationCacheTable> GetFirstTable() {
return GetTable(kFirstGeneration);
} }
void SetFirstTable(Handle<CompilationCacheTable> value) {
ASSERT(kFirstGeneration < generations_);
tables_[kFirstGeneration] = *value;
}
// Age the sub-cache by evicting the oldest generation and creating a new
// young generation.
void Age();
// GC support.
void Iterate(ObjectVisitor* v);
void IterateFunctions(ObjectVisitor* v);
// Clear this sub-cache evicting all its content.
void Clear();
// Remove given shared function info from sub-cache.
void Remove(Handle<SharedFunctionInfo> function_info);
// Number of generations in this sub-cache.
inline int generations() { return generations_; }
private:
int generations_; // Number of generations.
Object** tables_; // Compilation cache tables - one for each generation.
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
};
CompilationCache::~CompilationCache() {}
// Sub-cache for scripts.
class CompilationCacheScript : public CompilationSubCache {
public:
explicit CompilationCacheScript(int generations)
: CompilationSubCache(generations) { }
Handle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset);
void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
private:
MUST_USE_RESULT MaybeObject* TryTablePut(
Handle<String> source, Handle<SharedFunctionInfo> function_info);
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(
Handle<String> source, Handle<SharedFunctionInfo> function_info);
bool HasOrigin(Handle<SharedFunctionInfo> function_info,
Handle<Object> name,
int line_offset,
int column_offset);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
};
static Handle<CompilationCacheTable> AllocateTable(Isolate* isolate, int size) {
CALL_HEAP_FUNCTION(isolate, // Sub-cache for eval scripts.
CompilationCacheTable::Allocate(size), class CompilationCacheEval: public CompilationSubCache {
public:
explicit CompilationCacheEval(int generations)
: CompilationSubCache(generations) { }
Handle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<Context> context,
StrictModeFlag strict_mode);
void Put(Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
private:
MUST_USE_RESULT MaybeObject* TryTablePut(
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
};
// Sub-cache for regular expressions.
class CompilationCacheRegExp: public CompilationSubCache {
public:
explicit CompilationCacheRegExp(int generations)
: CompilationSubCache(generations) { }
Handle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags);
void Put(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
private:
MUST_USE_RESULT MaybeObject* TryTablePut(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
};
// Statically allocate all the sub-caches.
static CompilationCacheScript script(kScriptGenerations);
static CompilationCacheEval eval_global(kEvalGlobalGenerations);
static CompilationCacheEval eval_contextual(kEvalContextualGenerations);
static CompilationCacheRegExp reg_exp(kRegExpGenerations);
static CompilationSubCache* subcaches[kSubCacheCount] =
{&script, &eval_global, &eval_contextual, &reg_exp};
// Current enable state of the compilation cache.
static bool enabled = true;
static inline bool IsEnabled() {
return FLAG_compilation_cache && enabled;
}
static Handle<CompilationCacheTable> AllocateTable(int size) {
CALL_HEAP_FUNCTION(CompilationCacheTable::Allocate(size),
CompilationCacheTable); CompilationCacheTable);
} }
@ -75,16 +211,17 @@ Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
ASSERT(generation < generations_); ASSERT(generation < generations_);
Handle<CompilationCacheTable> result; Handle<CompilationCacheTable> result;
if (tables_[generation]->IsUndefined()) { if (tables_[generation]->IsUndefined()) {
result = AllocateTable(isolate(), kInitialCacheSize); result = AllocateTable(kInitialCacheSize);
tables_[generation] = *result; tables_[generation] = *result;
} else { } else {
CompilationCacheTable* table = CompilationCacheTable* table =
CompilationCacheTable::cast(tables_[generation]); CompilationCacheTable::cast(tables_[generation]);
result = Handle<CompilationCacheTable>(table, isolate()); result = Handle<CompilationCacheTable>(table);
} }
return result; return result;
} }
void CompilationSubCache::Age() { void CompilationSubCache::Age() {
// Age the generations implicitly killing off the oldest. // Age the generations implicitly killing off the oldest.
for (int i = generations_ - 1; i > 0; i--) { for (int i = generations_ - 1; i > 0; i--) {
@ -92,12 +229,12 @@ void CompilationSubCache::Age() {
} }
// Set the first generation as unborn. // Set the first generation as unborn.
tables_[0] = isolate()->heap()->undefined_value(); tables_[0] = Heap::undefined_value();
} }
void CompilationSubCache::IterateFunctions(ObjectVisitor* v) { void CompilationSubCache::IterateFunctions(ObjectVisitor* v) {
Object* undefined = isolate()->heap()->raw_unchecked_undefined_value(); Object* undefined = Heap::raw_unchecked_undefined_value();
for (int i = 0; i < generations_; i++) { for (int i = 0; i < generations_; i++) {
if (tables_[i] != undefined) { if (tables_[i] != undefined) {
reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v); reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v);
@ -112,14 +249,14 @@ void CompilationSubCache::Iterate(ObjectVisitor* v) {
void CompilationSubCache::Clear() { void CompilationSubCache::Clear() {
MemsetPointer(tables_, isolate()->heap()->undefined_value(), generations_); MemsetPointer(tables_, Heap::undefined_value(), generations_);
} }
void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) { void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
// Probe the script generation tables. Make sure not to leak handles // Probe the script generation tables. Make sure not to leak handles
// into the caller's handle scope. // into the caller's handle scope.
{ HandleScope scope(isolate()); { HandleScope scope;
for (int generation = 0; generation < generations(); generation++) { for (int generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation); Handle<CompilationCacheTable> table = GetTable(generation);
table->Remove(*function_info); table->Remove(*function_info);
@ -128,13 +265,6 @@ void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
} }
CompilationCacheScript::CompilationCacheScript(Isolate* isolate,
int generations)
: CompilationSubCache(isolate, generations),
script_histogram_(NULL),
script_histogram_initialized_(false) { }
// We only re-use a cached function for some script source code if the // We only re-use a cached function for some script source code if the
// script originates from the same place. This is to avoid issues // script originates from the same place. This is to avoid issues
// when reporting errors, etc. // when reporting errors, etc.
@ -144,7 +274,7 @@ bool CompilationCacheScript::HasOrigin(
int line_offset, int line_offset,
int column_offset) { int column_offset) {
Handle<Script> script = Handle<Script> script =
Handle<Script>(Script::cast(function_info->script()), isolate()); Handle<Script>(Script::cast(function_info->script()));
// If the script name isn't set, the boilerplate script should have // If the script name isn't set, the boilerplate script should have
// an undefined name to have the same origin. // an undefined name to have the same origin.
if (name.is_null()) { if (name.is_null()) {
@ -173,10 +303,10 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
// Probe the script generation tables. Make sure not to leak handles // Probe the script generation tables. Make sure not to leak handles
// into the caller's handle scope. // into the caller's handle scope.
{ HandleScope scope(isolate()); { HandleScope scope;
for (generation = 0; generation < generations(); generation++) { for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation); Handle<CompilationCacheTable> table = GetTable(generation);
Handle<Object> probe(table->Lookup(*source), isolate()); Handle<Object> probe(table->Lookup(*source));
if (probe->IsSharedFunctionInfo()) { if (probe->IsSharedFunctionInfo()) {
Handle<SharedFunctionInfo> function_info = Handle<SharedFunctionInfo> function_info =
Handle<SharedFunctionInfo>::cast(probe); Handle<SharedFunctionInfo>::cast(probe);
@ -190,34 +320,30 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
} }
} }
if (!script_histogram_initialized_) { static void* script_histogram = StatsTable::CreateHistogram(
script_histogram_ = isolate()->stats_table()->CreateHistogram(
"V8.ScriptCache", "V8.ScriptCache",
0, 0,
kScriptGenerations, kScriptGenerations,
kScriptGenerations + 1); kScriptGenerations + 1);
script_histogram_initialized_ = true;
}
if (script_histogram_ != NULL) { if (script_histogram != NULL) {
// The level NUMBER_OF_SCRIPT_GENERATIONS is equivalent to a cache miss. // The level NUMBER_OF_SCRIPT_GENERATIONS is equivalent to a cache miss.
isolate()->stats_table()->AddHistogramSample(script_histogram_, generation); StatsTable::AddHistogramSample(script_histogram, generation);
} }
// Once outside the manacles of the handle scope, we need to recheck // Once outside the manacles of the handle scope, we need to recheck
// to see if we actually found a cached script. If so, we return a // to see if we actually found a cached script. If so, we return a
// handle created in the caller's handle scope. // handle created in the caller's handle scope.
if (result != NULL) { if (result != NULL) {
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result), Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
isolate());
ASSERT(HasOrigin(shared, name, line_offset, column_offset)); ASSERT(HasOrigin(shared, name, line_offset, column_offset));
// If the script was found in a later generation, we promote it to // If the script was found in a later generation, we promote it to
// the first generation to let it survive longer in the cache. // the first generation to let it survive longer in the cache.
if (generation != 0) Put(source, shared); if (generation != 0) Put(source, shared);
isolate()->counters()->compilation_cache_hits()->Increment(); Counters::compilation_cache_hits.Increment();
return shared; return shared;
} else { } else {
isolate()->counters()->compilation_cache_misses()->Increment(); Counters::compilation_cache_misses.Increment();
return Handle<SharedFunctionInfo>::null(); return Handle<SharedFunctionInfo>::null();
} }
} }
@ -234,15 +360,13 @@ MaybeObject* CompilationCacheScript::TryTablePut(
Handle<CompilationCacheTable> CompilationCacheScript::TablePut( Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
Handle<String> source, Handle<String> source,
Handle<SharedFunctionInfo> function_info) { Handle<SharedFunctionInfo> function_info) {
CALL_HEAP_FUNCTION(isolate(), CALL_HEAP_FUNCTION(TryTablePut(source, function_info), CompilationCacheTable);
TryTablePut(source, function_info),
CompilationCacheTable);
} }
void CompilationCacheScript::Put(Handle<String> source, void CompilationCacheScript::Put(Handle<String> source,
Handle<SharedFunctionInfo> function_info) { Handle<SharedFunctionInfo> function_info) {
HandleScope scope(isolate()); HandleScope scope;
SetFirstTable(TablePut(source, function_info)); SetFirstTable(TablePut(source, function_info));
} }
@ -256,7 +380,7 @@ Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
// having cleared the cache. // having cleared the cache.
Object* result = NULL; Object* result = NULL;
int generation; int generation;
{ HandleScope scope(isolate()); { HandleScope scope;
for (generation = 0; generation < generations(); generation++) { for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation); Handle<CompilationCacheTable> table = GetTable(generation);
result = table->LookupEval(*source, *context, strict_mode); result = table->LookupEval(*source, *context, strict_mode);
@ -267,14 +391,14 @@ Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
} }
if (result->IsSharedFunctionInfo()) { if (result->IsSharedFunctionInfo()) {
Handle<SharedFunctionInfo> Handle<SharedFunctionInfo>
function_info(SharedFunctionInfo::cast(result), isolate()); function_info(SharedFunctionInfo::cast(result));
if (generation != 0) { if (generation != 0) {
Put(source, context, function_info); Put(source, context, function_info);
} }
isolate()->counters()->compilation_cache_hits()->Increment(); Counters::compilation_cache_hits.Increment();
return function_info; return function_info;
} else { } else {
isolate()->counters()->compilation_cache_misses()->Increment(); Counters::compilation_cache_misses.Increment();
return Handle<SharedFunctionInfo>::null(); return Handle<SharedFunctionInfo>::null();
} }
} }
@ -293,8 +417,7 @@ Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
Handle<String> source, Handle<String> source,
Handle<Context> context, Handle<Context> context,
Handle<SharedFunctionInfo> function_info) { Handle<SharedFunctionInfo> function_info) {
CALL_HEAP_FUNCTION(isolate(), CALL_HEAP_FUNCTION(TryTablePut(source, context, function_info),
TryTablePut(source, context, function_info),
CompilationCacheTable); CompilationCacheTable);
} }
@ -302,7 +425,7 @@ Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
void CompilationCacheEval::Put(Handle<String> source, void CompilationCacheEval::Put(Handle<String> source,
Handle<Context> context, Handle<Context> context,
Handle<SharedFunctionInfo> function_info) { Handle<SharedFunctionInfo> function_info) {
HandleScope scope(isolate()); HandleScope scope;
SetFirstTable(TablePut(source, context, function_info)); SetFirstTable(TablePut(source, context, function_info));
} }
@ -314,7 +437,7 @@ Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
// having cleared the cache. // having cleared the cache.
Object* result = NULL; Object* result = NULL;
int generation; int generation;
{ HandleScope scope(isolate()); { HandleScope scope;
for (generation = 0; generation < generations(); generation++) { for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation); Handle<CompilationCacheTable> table = GetTable(generation);
result = table->LookupRegExp(*source, flags); result = table->LookupRegExp(*source, flags);
@ -324,14 +447,14 @@ Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
} }
} }
if (result->IsFixedArray()) { if (result->IsFixedArray()) {
Handle<FixedArray> data(FixedArray::cast(result), isolate()); Handle<FixedArray> data(FixedArray::cast(result));
if (generation != 0) { if (generation != 0) {
Put(source, flags, data); Put(source, flags, data);
} }
isolate()->counters()->compilation_cache_hits()->Increment(); Counters::compilation_cache_hits.Increment();
return data; return data;
} else { } else {
isolate()->counters()->compilation_cache_misses()->Increment(); Counters::compilation_cache_misses.Increment();
return Handle<FixedArray>::null(); return Handle<FixedArray>::null();
} }
} }
@ -350,16 +473,14 @@ Handle<CompilationCacheTable> CompilationCacheRegExp::TablePut(
Handle<String> source, Handle<String> source,
JSRegExp::Flags flags, JSRegExp::Flags flags,
Handle<FixedArray> data) { Handle<FixedArray> data) {
CALL_HEAP_FUNCTION(isolate(), CALL_HEAP_FUNCTION(TryTablePut(source, flags, data), CompilationCacheTable);
TryTablePut(source, flags, data),
CompilationCacheTable);
} }
void CompilationCacheRegExp::Put(Handle<String> source, void CompilationCacheRegExp::Put(Handle<String> source,
JSRegExp::Flags flags, JSRegExp::Flags flags,
Handle<FixedArray> data) { Handle<FixedArray> data) {
HandleScope scope(isolate()); HandleScope scope;
SetFirstTable(TablePut(source, flags, data)); SetFirstTable(TablePut(source, flags, data));
} }
@ -367,9 +488,9 @@ void CompilationCacheRegExp::Put(Handle<String> source,
void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) { void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
if (!IsEnabled()) return; if (!IsEnabled()) return;
eval_global_.Remove(function_info); eval_global.Remove(function_info);
eval_contextual_.Remove(function_info); eval_contextual.Remove(function_info);
script_.Remove(function_info); script.Remove(function_info);
} }
@ -381,7 +502,7 @@ Handle<SharedFunctionInfo> CompilationCache::LookupScript(Handle<String> source,
return Handle<SharedFunctionInfo>::null(); return Handle<SharedFunctionInfo>::null();
} }
return script_.Lookup(source, name, line_offset, column_offset); return script.Lookup(source, name, line_offset, column_offset);
} }
@ -396,9 +517,9 @@ Handle<SharedFunctionInfo> CompilationCache::LookupEval(
Handle<SharedFunctionInfo> result; Handle<SharedFunctionInfo> result;
if (is_global) { if (is_global) {
result = eval_global_.Lookup(source, context, strict_mode); result = eval_global.Lookup(source, context, strict_mode);
} else { } else {
result = eval_contextual_.Lookup(source, context, strict_mode); result = eval_contextual.Lookup(source, context, strict_mode);
} }
return result; return result;
} }
@ -410,7 +531,7 @@ Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
return Handle<FixedArray>::null(); return Handle<FixedArray>::null();
} }
return reg_exp_.Lookup(source, flags); return reg_exp.Lookup(source, flags);
} }
@ -420,7 +541,7 @@ void CompilationCache::PutScript(Handle<String> source,
return; return;
} }
script_.Put(source, function_info); script.Put(source, function_info);
} }
@ -432,11 +553,11 @@ void CompilationCache::PutEval(Handle<String> source,
return; return;
} }
HandleScope scope(isolate()); HandleScope scope;
if (is_global) { if (is_global) {
eval_global_.Put(source, context, function_info); eval_global.Put(source, context, function_info);
} else { } else {
eval_contextual_.Put(source, context, function_info); eval_contextual.Put(source, context, function_info);
} }
} }
@ -449,45 +570,83 @@ void CompilationCache::PutRegExp(Handle<String> source,
return; return;
} }
reg_exp_.Put(source, flags, data); reg_exp.Put(source, flags, data);
}
static bool SourceHashCompare(void* key1, void* key2) {
return key1 == key2;
}
static HashMap* EagerOptimizingSet() {
static HashMap map(&SourceHashCompare);
return &map;
}
bool CompilationCache::ShouldOptimizeEagerly(Handle<JSFunction> function) {
if (FLAG_opt_eagerly) return true;
uint32_t hash = function->SourceHash();
void* key = reinterpret_cast<void*>(hash);
return EagerOptimizingSet()->Lookup(key, hash, false) != NULL;
}
void CompilationCache::MarkForEagerOptimizing(Handle<JSFunction> function) {
uint32_t hash = function->SourceHash();
void* key = reinterpret_cast<void*>(hash);
EagerOptimizingSet()->Lookup(key, hash, true);
}
void CompilationCache::MarkForLazyOptimizing(Handle<JSFunction> function) {
uint32_t hash = function->SourceHash();
void* key = reinterpret_cast<void*>(hash);
EagerOptimizingSet()->Remove(key, hash);
}
void CompilationCache::ResetEagerOptimizingData() {
HashMap* set = EagerOptimizingSet();
if (set->occupancy() > 0) set->Clear();
} }
void CompilationCache::Clear() { void CompilationCache::Clear() {
for (int i = 0; i < kSubCacheCount; i++) { for (int i = 0; i < kSubCacheCount; i++) {
subcaches_[i]->Clear(); subcaches[i]->Clear();
} }
} }
void CompilationCache::Iterate(ObjectVisitor* v) { void CompilationCache::Iterate(ObjectVisitor* v) {
for (int i = 0; i < kSubCacheCount; i++) { for (int i = 0; i < kSubCacheCount; i++) {
subcaches_[i]->Iterate(v); subcaches[i]->Iterate(v);
} }
} }
void CompilationCache::IterateFunctions(ObjectVisitor* v) { void CompilationCache::IterateFunctions(ObjectVisitor* v) {
for (int i = 0; i < kSubCacheCount; i++) { for (int i = 0; i < kSubCacheCount; i++) {
subcaches_[i]->IterateFunctions(v); subcaches[i]->IterateFunctions(v);
} }
} }
void CompilationCache::MarkCompactPrologue() { void CompilationCache::MarkCompactPrologue() {
for (int i = 0; i < kSubCacheCount; i++) { for (int i = 0; i < kSubCacheCount; i++) {
subcaches_[i]->Age(); subcaches[i]->Age();
} }
} }
void CompilationCache::Enable() { void CompilationCache::Enable() {
enabled_ = true; enabled = true;
} }
void CompilationCache::Disable() { void CompilationCache::Disable() {
enabled_ = false; enabled = false;
Clear(); Clear();
} }

207
deps/v8/src/compilation-cache.h

@ -31,152 +31,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class HashMap;
// The compilation cache consists of several generational sub-caches which uses
// this class as a base class. A sub-cache contains a compilation cache tables
// for each generation of the sub-cache. Since the same source code string has
// different compiled code for scripts and evals, we use separate sub-caches
// for different compilation modes, to avoid retrieving the wrong result.
class CompilationSubCache {
public:
CompilationSubCache(Isolate* isolate, int generations)
: isolate_(isolate),
generations_(generations) {
tables_ = NewArray<Object*>(generations);
}
~CompilationSubCache() { DeleteArray(tables_); }
// Index for the first generation in the cache.
static const int kFirstGeneration = 0;
// Get the compilation cache tables for a specific generation.
Handle<CompilationCacheTable> GetTable(int generation);
// Accessors for first generation.
Handle<CompilationCacheTable> GetFirstTable() {
return GetTable(kFirstGeneration);
}
void SetFirstTable(Handle<CompilationCacheTable> value) {
ASSERT(kFirstGeneration < generations_);
tables_[kFirstGeneration] = *value;
}
// Age the sub-cache by evicting the oldest generation and creating a new
// young generation.
void Age();
// GC support.
void Iterate(ObjectVisitor* v);
void IterateFunctions(ObjectVisitor* v);
// Clear this sub-cache evicting all its content.
void Clear();
// Remove given shared function info from sub-cache.
void Remove(Handle<SharedFunctionInfo> function_info);
// Number of generations in this sub-cache.
inline int generations() { return generations_; }
protected:
Isolate* isolate() { return isolate_; }
private:
Isolate* isolate_;
int generations_; // Number of generations.
Object** tables_; // Compilation cache tables - one for each generation.
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
};
// Sub-cache for scripts.
class CompilationCacheScript : public CompilationSubCache {
public:
CompilationCacheScript(Isolate* isolate, int generations);
Handle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset);
void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
private:
MUST_USE_RESULT MaybeObject* TryTablePut(
Handle<String> source, Handle<SharedFunctionInfo> function_info);
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(
Handle<String> source, Handle<SharedFunctionInfo> function_info);
bool HasOrigin(Handle<SharedFunctionInfo> function_info,
Handle<Object> name,
int line_offset,
int column_offset);
void* script_histogram_;
bool script_histogram_initialized_;
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
};
// Sub-cache for eval scripts.
class CompilationCacheEval: public CompilationSubCache {
public:
CompilationCacheEval(Isolate* isolate, int generations)
: CompilationSubCache(isolate, generations) { }
Handle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<Context> context,
StrictModeFlag strict_mode);
void Put(Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
private:
MUST_USE_RESULT MaybeObject* TryTablePut(
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
};
// Sub-cache for regular expressions.
class CompilationCacheRegExp: public CompilationSubCache {
public:
CompilationCacheRegExp(Isolate* isolate, int generations)
: CompilationSubCache(isolate, generations) { }
Handle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags);
void Put(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
private:
MUST_USE_RESULT MaybeObject* TryTablePut(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
};
// The compilation cache keeps shared function infos for compiled // The compilation cache keeps shared function infos for compiled
// scripts and evals. The shared function infos are looked up using // scripts and evals. The shared function infos are looked up using
@ -187,7 +41,7 @@ class CompilationCache {
// Finds the script shared function info for a source // Finds the script shared function info for a source
// string. Returns an empty handle if the cache doesn't contain a // string. Returns an empty handle if the cache doesn't contain a
// script for the given source string with the right origin. // script for the given source string with the right origin.
Handle<SharedFunctionInfo> LookupScript(Handle<String> source, static Handle<SharedFunctionInfo> LookupScript(Handle<String> source,
Handle<Object> name, Handle<Object> name,
int line_offset, int line_offset,
int column_offset); int column_offset);
@ -195,80 +49,61 @@ class CompilationCache {
// Finds the shared function info for a source string for eval in a // Finds the shared function info for a source string for eval in a
// given context. Returns an empty handle if the cache doesn't // given context. Returns an empty handle if the cache doesn't
// contain a script for the given source string. // contain a script for the given source string.
Handle<SharedFunctionInfo> LookupEval(Handle<String> source, static Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
bool is_global, bool is_global,
StrictModeFlag strict_mode); StrictModeFlag strict_mode);
// Returns the regexp data associated with the given regexp if it // Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle. // is in cache, otherwise an empty handle.
Handle<FixedArray> LookupRegExp(Handle<String> source, static Handle<FixedArray> LookupRegExp(Handle<String> source,
JSRegExp::Flags flags); JSRegExp::Flags flags);
// Associate the (source, kind) pair to the shared function // Associate the (source, kind) pair to the shared function
// info. This may overwrite an existing mapping. // info. This may overwrite an existing mapping.
void PutScript(Handle<String> source, static void PutScript(Handle<String> source,
Handle<SharedFunctionInfo> function_info); Handle<SharedFunctionInfo> function_info);
// Associate the (source, context->closure()->shared(), kind) triple // Associate the (source, context->closure()->shared(), kind) triple
// with the shared function info. This may overwrite an existing mapping. // with the shared function info. This may overwrite an existing mapping.
void PutEval(Handle<String> source, static void PutEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
bool is_global, bool is_global,
Handle<SharedFunctionInfo> function_info); Handle<SharedFunctionInfo> function_info);
// Associate the (source, flags) pair to the given regexp data. // Associate the (source, flags) pair to the given regexp data.
// This may overwrite an existing mapping. // This may overwrite an existing mapping.
void PutRegExp(Handle<String> source, static void PutRegExp(Handle<String> source,
JSRegExp::Flags flags, JSRegExp::Flags flags,
Handle<FixedArray> data); Handle<FixedArray> data);
// Support for eager optimization tracking.
static bool ShouldOptimizeEagerly(Handle<JSFunction> function);
static void MarkForEagerOptimizing(Handle<JSFunction> function);
static void MarkForLazyOptimizing(Handle<JSFunction> function);
// Reset the eager optimization tracking data.
static void ResetEagerOptimizingData();
// Clear the cache - also used to initialize the cache at startup. // Clear the cache - also used to initialize the cache at startup.
void Clear(); static void Clear();
// Remove given shared function info from all caches. // Remove given shared function info from all caches.
void Remove(Handle<SharedFunctionInfo> function_info); static void Remove(Handle<SharedFunctionInfo> function_info);
// GC support. // GC support.
void Iterate(ObjectVisitor* v); static void Iterate(ObjectVisitor* v);
void IterateFunctions(ObjectVisitor* v); static void IterateFunctions(ObjectVisitor* v);
// Notify the cache that a mark-sweep garbage collection is about to // Notify the cache that a mark-sweep garbage collection is about to
// take place. This is used to retire entries from the cache to // take place. This is used to retire entries from the cache to
// avoid keeping them alive too long without using them. // avoid keeping them alive too long without using them.
void MarkCompactPrologue(); static void MarkCompactPrologue();
// Enable/disable compilation cache. Used by debugger to disable compilation // Enable/disable compilation cache. Used by debugger to disable compilation
// cache during debugging to make sure new scripts are always compiled. // cache during debugging to make sure new scripts are always compiled.
void Enable(); static void Enable();
void Disable(); static void Disable();
private:
explicit CompilationCache(Isolate* isolate);
~CompilationCache();
HashMap* EagerOptimizingSet();
// The number of sub caches covering the different types to cache.
static const int kSubCacheCount = 4;
bool IsEnabled() { return FLAG_compilation_cache && enabled_; }
Isolate* isolate() { return isolate_; }
Isolate* isolate_;
CompilationCacheScript script_;
CompilationCacheEval eval_global_;
CompilationCacheEval eval_contextual_;
CompilationCacheRegExp reg_exp_;
CompilationSubCache* subcaches_[kSubCacheCount];
// Current enable state of the compilation cache.
bool enabled_;
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(CompilationCache);
}; };

328
deps/v8/src/compiler.cc

@ -30,8 +30,9 @@
#include "compiler.h" #include "compiler.h"
#include "bootstrapper.h" #include "bootstrapper.h"
#include "codegen.h" #include "codegen-inl.h"
#include "compilation-cache.h" #include "compilation-cache.h"
#include "data-flow.h"
#include "debug.h" #include "debug.h"
#include "full-codegen.h" #include "full-codegen.h"
#include "gdb-jit.h" #include "gdb-jit.h"
@ -50,8 +51,7 @@ namespace internal {
CompilationInfo::CompilationInfo(Handle<Script> script) CompilationInfo::CompilationInfo(Handle<Script> script)
: isolate_(script->GetIsolate()), : flags_(0),
flags_(0),
function_(NULL), function_(NULL),
scope_(NULL), scope_(NULL),
script_(script), script_(script),
@ -64,8 +64,7 @@ CompilationInfo::CompilationInfo(Handle<Script> script)
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info) CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
: isolate_(shared_info->GetIsolate()), : flags_(IsLazy::encode(true)),
flags_(IsLazy::encode(true)),
function_(NULL), function_(NULL),
scope_(NULL), scope_(NULL),
shared_info_(shared_info), shared_info_(shared_info),
@ -79,8 +78,7 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
CompilationInfo::CompilationInfo(Handle<JSFunction> closure) CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
: isolate_(closure->GetIsolate()), : flags_(IsLazy::encode(true)),
flags_(IsLazy::encode(true)),
function_(NULL), function_(NULL),
scope_(NULL), scope_(NULL),
closure_(closure), closure_(closure),
@ -94,21 +92,22 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
} }
// Disable optimization for the rest of the compilation pipeline.
void CompilationInfo::DisableOptimization() { void CompilationInfo::DisableOptimization() {
if (FLAG_optimize_closures) {
// If we allow closures optimizations and it's an optimizable closure
// mark it correspondingly.
bool is_closure = closure_.is_null() && !scope_->HasTrivialOuterContext();
if (is_closure) {
bool is_optimizable_closure = bool is_optimizable_closure =
FLAG_optimize_closures && !scope_->outer_scope_calls_eval() && !scope_->inside_with();
closure_.is_null() && if (is_optimizable_closure) {
!scope_->HasTrivialOuterContext() && SetMode(BASE);
!scope_->outer_scope_calls_non_strict_eval() && return;
!scope_->inside_with(); }
SetMode(is_optimizable_closure ? BASE : NONOPT); }
} }
SetMode(NONOPT);
void CompilationInfo::AbortOptimization() {
Handle<Code> code(shared_info()->code());
SetCode(code);
} }
@ -120,23 +119,19 @@ void CompilationInfo::AbortOptimization() {
// all. However crankshaft support recompilation of functions, so in this case // all. However crankshaft support recompilation of functions, so in this case
// the full compiler need not be be used if a debugger is attached, but only if // the full compiler need not be be used if a debugger is attached, but only if
// break points has actually been set. // break points has actually been set.
static bool is_debugging_active() { static bool AlwaysFullCompiler() {
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
Isolate* isolate = Isolate::Current(); if (V8::UseCrankshaft()) {
return V8::UseCrankshaft() ? return FLAG_always_full_compiler || Debug::has_break_points();
isolate->debug()->has_break_points() : } else {
isolate->debugger()->IsDebuggerActive(); return FLAG_always_full_compiler || Debugger::IsDebuggerActive();
}
#else #else
return false; return FLAG_always_full_compiler;
#endif #endif
} }
static bool AlwaysFullCompiler() {
return FLAG_always_full_compiler || is_debugging_active();
}
static void FinishOptimization(Handle<JSFunction> function, int64_t start) { static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
int opt_count = function->shared()->opt_count(); int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1); function->shared()->set_opt_count(opt_count + 1);
@ -163,6 +158,29 @@ static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
} }
static void AbortAndDisable(CompilationInfo* info) {
// Disable optimization for the shared function info and mark the
// code as non-optimizable. The marker on the shared function info
// is there because we flush non-optimized code thereby loosing the
// non-optimizable information for the code. When the code is
// regenerated and set on the shared function info it is marked as
// non-optimizable if optimization is disabled for the shared
// function info.
Handle<SharedFunctionInfo> shared = info->shared_info();
shared->set_optimization_disabled(true);
Handle<Code> code = Handle<Code>(shared->code());
ASSERT(code->kind() == Code::FUNCTION);
code->set_optimizable(false);
info->SetCode(code);
if (FLAG_trace_opt) {
PrintF("[disabled optimization for: ");
info->closure()->PrintName();
PrintF(" / %" V8PRIxPTR "]\n",
reinterpret_cast<intptr_t>(*info->closure()));
}
}
static bool MakeCrankshaftCode(CompilationInfo* info) { static bool MakeCrankshaftCode(CompilationInfo* info) {
// Test if we can optimize this function when asked to. We can only // Test if we can optimize this function when asked to. We can only
// do this after the scopes are computed. // do this after the scopes are computed.
@ -179,10 +197,6 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
Handle<Code> code(info->shared_info()->code()); Handle<Code> code(info->shared_info()->code());
ASSERT(code->kind() == Code::FUNCTION); ASSERT(code->kind() == Code::FUNCTION);
// We should never arrive here if optimization has been disabled on the
// shared function info.
ASSERT(!info->shared_info()->optimization_disabled());
// Fall back to using the full code generator if it's not possible // Fall back to using the full code generator if it's not possible
// to use the Hydrogen-based optimizing compiler. We already have // to use the Hydrogen-based optimizing compiler. We already have
// generated code for this from the shared function object. // generated code for this from the shared function object.
@ -196,9 +210,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
const int kMaxOptCount = const int kMaxOptCount =
FLAG_deopt_every_n_times == 0 ? Compiler::kDefaultMaxOptCount : 1000; FLAG_deopt_every_n_times == 0 ? Compiler::kDefaultMaxOptCount : 1000;
if (info->shared_info()->opt_count() > kMaxOptCount) { if (info->shared_info()->opt_count() > kMaxOptCount) {
info->AbortOptimization(); AbortAndDisable(info);
Handle<JSFunction> closure = info->closure();
info->shared_info()->DisableOptimization(*closure);
// True indicates the compilation pipeline is still going, not // True indicates the compilation pipeline is still going, not
// necessarily that we optimized the code. // necessarily that we optimized the code.
return true; return true;
@ -217,9 +229,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
if ((scope->num_parameters() + 1) > parameter_limit || if ((scope->num_parameters() + 1) > parameter_limit ||
(info->osr_ast_id() != AstNode::kNoNumber && (info->osr_ast_id() != AstNode::kNoNumber &&
scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit)) { scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit)) {
info->AbortOptimization(); AbortAndDisable(info);
Handle<JSFunction> closure = info->closure();
info->shared_info()->DisableOptimization(*closure);
// True indicates the compilation pipeline is still going, not // True indicates the compilation pipeline is still going, not
// necessarily that we optimized the code. // necessarily that we optimized the code.
return true; return true;
@ -240,7 +250,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
// performance of the hydrogen-based compiler. // performance of the hydrogen-based compiler.
int64_t start = OS::Ticks(); int64_t start = OS::Ticks();
bool should_recompile = !info->shared_info()->has_deoptimization_support(); bool should_recompile = !info->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_hydrogen_stats) { if (should_recompile || FLAG_time_hydrogen) {
HPhase phase(HPhase::kFullCodeGen); HPhase phase(HPhase::kFullCodeGen);
CompilationInfo unoptimized(info->shared_info()); CompilationInfo unoptimized(info->shared_info());
// Note that we use the same AST that we will use for generating the // Note that we use the same AST that we will use for generating the
@ -273,18 +283,18 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
HTracer::Instance()->TraceCompilation(info->function()); HTracer::Instance()->TraceCompilation(info->function());
} }
Handle<Context> global_context(info->closure()->context()->global_context()); TypeFeedbackOracle oracle(
TypeFeedbackOracle oracle(code, global_context); code, Handle<Context>(info->closure()->context()->global_context()));
HGraphBuilder builder(info, &oracle); HGraphBuilder builder(&oracle);
HPhase phase(HPhase::kTotal); HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph(); HGraph* graph = builder.CreateGraph(info);
if (info->isolate()->has_pending_exception()) { if (Top::has_pending_exception()) {
info->SetCode(Handle<Code>::null()); info->SetCode(Handle<Code>::null());
return false; return false;
} }
if (graph != NULL && FLAG_build_lithium) { if (graph != NULL && FLAG_build_lithium) {
Handle<Code> optimized_code = graph->Compile(info); Handle<Code> optimized_code = graph->Compile();
if (!optimized_code.is_null()) { if (!optimized_code.is_null()) {
info->SetCode(optimized_code); info->SetCode(optimized_code);
FinishOptimization(info->closure(), start); FinishOptimization(info->closure(), start);
@ -292,32 +302,49 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
} }
} }
// Keep using the shared code. // Compilation with the Hydrogen compiler failed. Keep using the
info->AbortOptimization(); // shared code but mark it as unoptimizable.
if (!builder.inline_bailout()) { AbortAndDisable(info);
// Mark the shared code as unoptimizable unless it was an inlined
// function that bailed out.
Handle<JSFunction> closure = info->closure();
info->shared_info()->DisableOptimization(*closure);
}
// True indicates the compilation pipeline is still going, not necessarily // True indicates the compilation pipeline is still going, not necessarily
// that we optimized the code. // that we optimized the code.
return true; return true;
} }
static bool GenerateCode(CompilationInfo* info) {
return V8::UseCrankshaft() ?
MakeCrankshaftCode(info) :
FullCodeGenerator::MakeCode(info);
}
static bool MakeCode(CompilationInfo* info) { static bool MakeCode(CompilationInfo* info) {
// Precondition: code has been parsed. Postcondition: the code field in // Precondition: code has been parsed. Postcondition: the code field in
// the compilation info is set if compilation succeeded. // the compilation info is set if compilation succeeded.
ASSERT(info->function() != NULL); ASSERT(info->function() != NULL);
return Rewriter::Rewrite(info) && Scope::Analyze(info) && GenerateCode(info);
if (Rewriter::Rewrite(info) && Scope::Analyze(info)) {
if (V8::UseCrankshaft()) return MakeCrankshaftCode(info);
// Generate code and return it. Code generator selection is governed by
// which backends are enabled and whether the function is considered
// run-once code or not.
//
// --full-compiler enables the dedicated backend for code we expect to
// be run once
//
// The normal choice of backend can be overridden with the flags
// --always-full-compiler.
if (Rewriter::Analyze(info)) {
Handle<SharedFunctionInfo> shared = info->shared_info();
bool is_run_once = (shared.is_null())
? info->scope()->is_global_scope()
: (shared->is_toplevel() || shared->try_full_codegen());
bool can_use_full =
FLAG_full_compiler && !info->function()->contains_loops();
if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
return FullCodeGenerator::MakeCode(info);
} else {
return AssignedVariablesAnalyzer::Analyze(info) &&
CodeGenerator::MakeCode(info);
}
}
}
return false;
} }
@ -337,13 +364,13 @@ bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) { static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
Isolate* isolate = info->isolate(); CompilationZoneScope zone_scope(DELETE_ON_EXIT);
ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
PostponeInterruptsScope postpone(isolate); PostponeInterruptsScope postpone;
ASSERT(!isolate->global_context().is_null()); ASSERT(!i::Top::global_context().is_null());
Handle<Script> script = info->script(); Handle<Script> script = info->script();
script->set_context_data((*isolate->global_context())->data()); script->set_context_data((*i::Top::global_context())->data());
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
if (info->is_eval()) { if (info->is_eval()) {
@ -352,20 +379,19 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
// For eval scripts add information on the function from which eval was // For eval scripts add information on the function from which eval was
// called. // called.
if (info->is_eval()) { if (info->is_eval()) {
StackTraceFrameIterator it(isolate); StackTraceFrameIterator it;
if (!it.done()) { if (!it.done()) {
script->set_eval_from_shared( script->set_eval_from_shared(
JSFunction::cast(it.frame()->function())->shared()); JSFunction::cast(it.frame()->function())->shared());
Code* code = it.frame()->LookupCode();
int offset = static_cast<int>( int offset = static_cast<int>(
it.frame()->pc() - code->instruction_start()); it.frame()->pc() - it.frame()->code()->instruction_start());
script->set_eval_from_instructions_offset(Smi::FromInt(offset)); script->set_eval_from_instructions_offset(Smi::FromInt(offset));
} }
} }
} }
// Notify debugger // Notify debugger
isolate->debugger()->OnBeforeCompile(script); Debugger::OnBeforeCompile(script);
#endif #endif
// Only allow non-global compiles for eval. // Only allow non-global compiles for eval.
@ -377,22 +403,22 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
// rest of the function into account to avoid overlap with the // rest of the function into account to avoid overlap with the
// parsing statistics. // parsing statistics.
HistogramTimer* rate = info->is_eval() HistogramTimer* rate = info->is_eval()
? info->isolate()->counters()->compile_eval() ? &Counters::compile_eval
: info->isolate()->counters()->compile(); : &Counters::compile;
HistogramTimerScope timer(rate); HistogramTimerScope timer(rate);
// Compile the code. // Compile the code.
FunctionLiteral* lit = info->function(); FunctionLiteral* lit = info->function();
LiveEditFunctionTracker live_edit_tracker(isolate, lit); LiveEditFunctionTracker live_edit_tracker(lit);
if (!MakeCode(info)) { if (!MakeCode(info)) {
isolate->StackOverflow(); Top::StackOverflow();
return Handle<SharedFunctionInfo>::null(); return Handle<SharedFunctionInfo>::null();
} }
// Allocate function. // Allocate function.
ASSERT(!info->code().is_null()); ASSERT(!info->code().is_null());
Handle<SharedFunctionInfo> result = Handle<SharedFunctionInfo> result =
isolate->factory()->NewSharedFunctionInfo( Factory::NewSharedFunctionInfo(
lit->name(), lit->name(),
lit->materialized_literal_count(), lit->materialized_literal_count(),
info->code(), info->code(),
@ -402,7 +428,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
Compiler::SetFunctionInfo(result, lit, true, script); Compiler::SetFunctionInfo(result, lit, true, script);
if (script->name()->IsString()) { if (script->name()->IsString()) {
PROFILE(isolate, CodeCreateEvent( PROFILE(CodeCreateEvent(
info->is_eval() info->is_eval()
? Logger::EVAL_TAG ? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
@ -411,17 +437,16 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
String::cast(script->name()))); String::cast(script->name())));
GDBJIT(AddCode(Handle<String>(String::cast(script->name())), GDBJIT(AddCode(Handle<String>(String::cast(script->name())),
script, script,
info->code(), info->code()));
info));
} else { } else {
PROFILE(isolate, CodeCreateEvent( PROFILE(CodeCreateEvent(
info->is_eval() info->is_eval()
? Logger::EVAL_TAG ? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script), : Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(), *info->code(),
*result, *result,
isolate->heap()->empty_string())); Heap::empty_string()));
GDBJIT(AddCode(Handle<String>(), script, info->code(), info)); GDBJIT(AddCode(Handle<String>(), script, info->code()));
} }
// Hint to the runtime system used when allocating space for initial // Hint to the runtime system used when allocating space for initial
@ -431,8 +456,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger // Notify debugger
isolate->debugger()->OnAfterCompile( Debugger::OnAfterCompile(script, Debugger::NO_AFTER_COMPILE_FLAGS);
script, Debugger::NO_AFTER_COMPILE_FLAGS);
#endif #endif
live_edit_tracker.RecordFunctionInfo(result, lit); live_edit_tracker.RecordFunctionInfo(result, lit);
@ -449,20 +473,17 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
ScriptDataImpl* input_pre_data, ScriptDataImpl* input_pre_data,
Handle<Object> script_data, Handle<Object> script_data,
NativesFlag natives) { NativesFlag natives) {
Isolate* isolate = source->GetIsolate();
int source_length = source->length(); int source_length = source->length();
isolate->counters()->total_load_size()->Increment(source_length); Counters::total_load_size.Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length); Counters::total_compile_size.Increment(source_length);
// The VM is in the COMPILER state until exiting this function. // The VM is in the COMPILER state until exiting this function.
VMState state(isolate, COMPILER); VMState state(COMPILER);
CompilationCache* compilation_cache = isolate->compilation_cache();
// Do a lookup in the compilation cache but not for extensions. // Do a lookup in the compilation cache but not for extensions.
Handle<SharedFunctionInfo> result; Handle<SharedFunctionInfo> result;
if (extension == NULL) { if (extension == NULL) {
result = compilation_cache->LookupScript(source, result = CompilationCache::LookupScript(source,
script_name, script_name,
line_offset, line_offset,
column_offset); column_offset);
@ -491,7 +512,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
} }
// Create a script object describing the script to be compiled. // Create a script object describing the script to be compiled.
Handle<Script> script = FACTORY->NewScript(source); Handle<Script> script = Factory::NewScript(source);
if (natives == NATIVES_CODE) { if (natives == NATIVES_CODE) {
script->set_type(Smi::FromInt(Script::TYPE_NATIVE)); script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
} }
@ -501,7 +522,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
script->set_column_offset(Smi::FromInt(column_offset)); script->set_column_offset(Smi::FromInt(column_offset));
} }
script->set_data(script_data.is_null() ? HEAP->undefined_value() script->set_data(script_data.is_null() ? Heap::undefined_value()
: *script_data); : *script_data);
// Compile the function and add it to the cache. // Compile the function and add it to the cache.
@ -509,13 +530,9 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
info.MarkAsGlobal(); info.MarkAsGlobal();
info.SetExtension(extension); info.SetExtension(extension);
info.SetPreParseData(pre_data); info.SetPreParseData(pre_data);
if (natives == NATIVES_CODE) {
info.MarkAsAllowingNativesSyntax();
info.MarkAsNative();
}
result = MakeFunctionInfo(&info); result = MakeFunctionInfo(&info);
if (extension == NULL && !result.is_null()) { if (extension == NULL && !result.is_null()) {
compilation_cache->PutScript(source, result); CompilationCache::PutScript(source, result);
} }
// Get rid of the pre-parsing data (if necessary). // Get rid of the pre-parsing data (if necessary).
@ -524,7 +541,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
} }
} }
if (result.is_null()) isolate->ReportPendingMessages(); if (result.is_null()) Top::ReportPendingMessages();
return result; return result;
} }
@ -533,39 +550,36 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
bool is_global, bool is_global,
StrictModeFlag strict_mode) { StrictModeFlag strict_mode) {
Isolate* isolate = source->GetIsolate();
int source_length = source->length(); int source_length = source->length();
isolate->counters()->total_eval_size()->Increment(source_length); Counters::total_eval_size.Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length); Counters::total_compile_size.Increment(source_length);
// The VM is in the COMPILER state until exiting this function. // The VM is in the COMPILER state until exiting this function.
VMState state(isolate, COMPILER); VMState state(COMPILER);
// Do a lookup in the compilation cache; if the entry is not there, invoke // Do a lookup in the compilation cache; if the entry is not there, invoke
// the compiler and add the result to the cache. // the compiler and add the result to the cache.
Handle<SharedFunctionInfo> result; Handle<SharedFunctionInfo> result;
CompilationCache* compilation_cache = isolate->compilation_cache(); result = CompilationCache::LookupEval(source,
result = compilation_cache->LookupEval(source,
context, context,
is_global, is_global,
strict_mode); strict_mode);
if (result.is_null()) { if (result.is_null()) {
// Create a script object describing the script to be compiled. // Create a script object describing the script to be compiled.
Handle<Script> script = isolate->factory()->NewScript(source); Handle<Script> script = Factory::NewScript(source);
CompilationInfo info(script); CompilationInfo info(script);
info.MarkAsEval(); info.MarkAsEval();
if (is_global) info.MarkAsGlobal(); if (is_global) info.MarkAsGlobal();
if (strict_mode == kStrictMode) info.MarkAsStrictMode(); if (strict_mode == kStrictMode) info.MarkAsStrict();
info.SetCallingContext(context); info.SetCallingContext(context);
result = MakeFunctionInfo(&info); result = MakeFunctionInfo(&info);
if (!result.is_null()) { if (!result.is_null()) {
CompilationCache* compilation_cache = isolate->compilation_cache();
// If caller is strict mode, the result must be strict as well, // If caller is strict mode, the result must be strict as well,
// but not the other way around. Consider: // but not the other way around. Consider:
// eval("'use strict'; ..."); // eval("'use strict'; ...");
ASSERT(strict_mode == kNonStrictMode || result->strict_mode()); ASSERT(strict_mode == kNonStrictMode || result->strict_mode());
compilation_cache->PutEval(source, context, is_global, result); CompilationCache::PutEval(source, context, is_global, result);
} }
} }
@ -574,50 +588,36 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
bool Compiler::CompileLazy(CompilationInfo* info) { bool Compiler::CompileLazy(CompilationInfo* info) {
Isolate* isolate = info->isolate(); CompilationZoneScope zone_scope(DELETE_ON_EXIT);
ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
// The VM is in the COMPILER state until exiting this function. // The VM is in the COMPILER state until exiting this function.
VMState state(isolate, COMPILER); VMState state(COMPILER);
PostponeInterruptsScope postpone(isolate); PostponeInterruptsScope postpone;
Handle<SharedFunctionInfo> shared = info->shared_info(); Handle<SharedFunctionInfo> shared = info->shared_info();
int compiled_size = shared->end_position() - shared->start_position(); int compiled_size = shared->end_position() - shared->start_position();
isolate->counters()->total_compile_size()->Increment(compiled_size); Counters::total_compile_size.Increment(compiled_size);
// Generate the AST for the lazily compiled function. // Generate the AST for the lazily compiled function.
if (ParserApi::Parse(info)) { if (ParserApi::Parse(info)) {
// Measure how long it takes to do the lazy compilation; only take the // Measure how long it takes to do the lazy compilation; only take the
// rest of the function into account to avoid overlap with the lazy // rest of the function into account to avoid overlap with the lazy
// parsing statistics. // parsing statistics.
HistogramTimerScope timer(isolate->counters()->compile_lazy()); HistogramTimerScope timer(&Counters::compile_lazy);
// After parsing we know function's strict mode. Remember it.
if (info->function()->strict_mode()) {
shared->set_strict_mode(true);
info->MarkAsStrictMode();
}
// Compile the code. // Compile the code.
if (!MakeCode(info)) { if (!MakeCode(info)) {
if (!isolate->has_pending_exception()) { if (!Top::has_pending_exception()) {
isolate->StackOverflow(); Top::StackOverflow();
} }
} else { } else {
ASSERT(!info->code().is_null()); ASSERT(!info->code().is_null());
Handle<Code> code = info->code(); Handle<Code> code = info->code();
// Set optimizable to false if this is disallowed by the shared
// function info, e.g., we might have flushed the code and must
// reset this bit when lazy compiling the code again.
if (shared->optimization_disabled()) code->set_optimizable(false);
Handle<JSFunction> function = info->closure(); Handle<JSFunction> function = info->closure();
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared); RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
if (info->IsOptimizing()) { if (info->IsOptimizing()) {
ASSERT(shared->scope_info() != SerializedScopeInfo::Empty());
function->ReplaceCode(*code); function->ReplaceCode(*code);
} else { } else {
// Update the shared function info with the compiled code and the // Update the shared function info with the compiled code and the
@ -650,15 +650,16 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
ASSERT(shared->is_compiled()); ASSERT(shared->is_compiled());
shared->set_code_age(0); shared->set_code_age(0);
if (info->AllowOptimize() && !shared->optimization_disabled()) { if (V8::UseCrankshaft() && info->AllowOptimize()) {
// If we're asked to always optimize, we compile the optimized // If we're asked to always optimize, we compile the optimized
// version of the function right away - unless the debugger is // version of the function right away - unless the debugger is
// active as it makes no sense to compile optimized code then. // active as it makes no sense to compile optimized code then.
if (FLAG_always_opt && if (FLAG_always_opt && !Debug::has_break_points()) {
!Isolate::Current()->DebuggerHasBreakPoints()) {
CompilationInfo optimized(function); CompilationInfo optimized(function);
optimized.SetOptimizing(AstNode::kNoNumber); optimized.SetOptimizing(AstNode::kNoNumber);
return CompileLazy(&optimized); return CompileLazy(&optimized);
} else if (CompilationCache::ShouldOptimizeEagerly(function)) {
RuntimeProfiler::OptimizeSoon(*function);
} }
} }
} }
@ -678,35 +679,56 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
CompilationInfo info(script); CompilationInfo info(script);
info.SetFunction(literal); info.SetFunction(literal);
info.SetScope(literal->scope()); info.SetScope(literal->scope());
if (literal->scope()->is_strict_mode()) info.MarkAsStrictMode();
if (script->type()->value() == Script::TYPE_NATIVE) info.MarkAsNative();
LiveEditFunctionTracker live_edit_tracker(info.isolate(), literal); LiveEditFunctionTracker live_edit_tracker(literal);
// Determine if the function can be lazily compiled. This is necessary to // Determine if the function can be lazily compiled. This is necessary to
// allow some of our builtin JS files to be lazily compiled. These // allow some of our builtin JS files to be lazily compiled. These
// builtins cannot be handled lazily by the parser, since we have to know // builtins cannot be handled lazily by the parser, since we have to know
// if a function uses the special natives syntax, which is something the // if a function uses the special natives syntax, which is something the
// parser records. // parser records.
bool allow_lazy = literal->AllowsLazyCompilation() && bool allow_lazy = literal->AllowsLazyCompilation() &&
!LiveEditFunctionTracker::IsActive(info.isolate()); !LiveEditFunctionTracker::IsActive();
Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty()); Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
// Generate code // Generate code
if (FLAG_lazy && allow_lazy) { if (FLAG_lazy && allow_lazy) {
Handle<Code> code = info.isolate()->builtins()->LazyCompile(); Handle<Code> code(Builtins::builtin(Builtins::LazyCompile));
info.SetCode(code); info.SetCode(code);
} else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
(!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
ASSERT(!info.code().is_null());
scope_info = SerializedScopeInfo::Create(info.scope());
} else { } else {
if (V8::UseCrankshaft()) {
if (!MakeCrankshaftCode(&info)) {
return Handle<SharedFunctionInfo>::null(); return Handle<SharedFunctionInfo>::null();
} }
} else {
// The bodies of function literals have not yet been visited by the
// AST optimizer/analyzer.
if (!Rewriter::Analyze(&info)) return Handle<SharedFunctionInfo>::null();
bool is_run_once = literal->try_full_codegen();
bool can_use_full = FLAG_full_compiler && !literal->contains_loops();
if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
if (!FullCodeGenerator::MakeCode(&info)) {
return Handle<SharedFunctionInfo>::null();
}
} else {
// We fall back to the classic V8 code generator.
if (!AssignedVariablesAnalyzer::Analyze(&info) ||
!CodeGenerator::MakeCode(&info)) {
return Handle<SharedFunctionInfo>::null();
}
}
}
ASSERT(!info.code().is_null());
// Function compilation complete.
scope_info = SerializedScopeInfo::Create(info.scope());
}
// Create a shared function info object. // Create a shared function info object.
Handle<SharedFunctionInfo> result = Handle<SharedFunctionInfo> result =
FACTORY->NewSharedFunctionInfo(literal->name(), Factory::NewSharedFunctionInfo(literal->name(),
literal->materialized_literal_count(), literal->materialized_literal_count(),
info.code(), info.code(),
scope_info); scope_info);
@ -743,10 +765,9 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->SetThisPropertyAssignmentsInfo( function_info->SetThisPropertyAssignmentsInfo(
lit->has_only_simple_this_property_assignments(), lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments()); *lit->this_property_assignments());
function_info->set_try_full_codegen(lit->try_full_codegen());
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation()); function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
function_info->set_strict_mode(lit->strict_mode()); function_info->set_strict_mode(lit->strict_mode());
function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
} }
@ -759,34 +780,29 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
// Log the code generation. If source information is available include // Log the code generation. If source information is available include
// script name and line number. Check explicitly whether logging is // script name and line number. Check explicitly whether logging is
// enabled as finding the line number is not free. // enabled as finding the line number is not free.
if (info->isolate()->logger()->is_logging() || if (Logger::is_logging() || CpuProfiler::is_profiling()) {
CpuProfiler::is_profiling(info->isolate())) {
Handle<Script> script = info->script(); Handle<Script> script = info->script();
Handle<Code> code = info->code(); Handle<Code> code = info->code();
if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile)) if (*code == Builtins::builtin(Builtins::LazyCompile)) return;
return;
if (script->name()->IsString()) { if (script->name()->IsString()) {
int line_num = GetScriptLineNumber(script, shared->start_position()) + 1; int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
USE(line_num); USE(line_num);
PROFILE(info->isolate(), PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code, *code,
*shared, *shared,
String::cast(script->name()), String::cast(script->name()),
line_num)); line_num));
} else { } else {
PROFILE(info->isolate(), PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code, *code,
*shared, *shared,
shared->DebugName())); shared->DebugName()));
} }
} }
GDBJIT(AddCode(Handle<String>(shared->DebugName()), GDBJIT(AddCode(name,
Handle<Script>(info->script()), Handle<Script>(info->script()),
Handle<Code>(info->code()), Handle<Code>(info->code())));
info));
} }
} } // namespace v8::internal } } // namespace v8::internal

64
deps/v8/src/compiler.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -28,8 +28,9 @@
#ifndef V8_COMPILER_H_ #ifndef V8_COMPILER_H_
#define V8_COMPILER_H_ #define V8_COMPILER_H_
#include "allocation.h"
#include "ast.h" #include "ast.h"
#include "frame-element.h"
#include "register-allocator.h"
#include "zone.h" #include "zone.h"
namespace v8 { namespace v8 {
@ -45,14 +46,10 @@ class CompilationInfo BASE_EMBEDDED {
explicit CompilationInfo(Handle<SharedFunctionInfo> shared_info); explicit CompilationInfo(Handle<SharedFunctionInfo> shared_info);
explicit CompilationInfo(Handle<JSFunction> closure); explicit CompilationInfo(Handle<JSFunction> closure);
Isolate* isolate() {
ASSERT(Isolate::Current() == isolate_);
return isolate_;
}
bool is_lazy() const { return (flags_ & IsLazy::mask()) != 0; } bool is_lazy() const { return (flags_ & IsLazy::mask()) != 0; }
bool is_eval() const { return (flags_ & IsEval::mask()) != 0; } bool is_eval() const { return (flags_ & IsEval::mask()) != 0; }
bool is_global() const { return (flags_ & IsGlobal::mask()) != 0; } bool is_global() const { return (flags_ & IsGlobal::mask()) != 0; }
bool is_strict_mode() const { return (flags_ & IsStrictMode::mask()) != 0; } bool is_strict() const { return (flags_ & IsStrict::mask()) != 0; }
bool is_in_loop() const { return (flags_ & IsInLoop::mask()) != 0; } bool is_in_loop() const { return (flags_ & IsInLoop::mask()) != 0; }
FunctionLiteral* function() const { return function_; } FunctionLiteral* function() const { return function_; }
Scope* scope() const { return scope_; } Scope* scope() const { return scope_; }
@ -73,28 +70,16 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(!is_lazy()); ASSERT(!is_lazy());
flags_ |= IsGlobal::encode(true); flags_ |= IsGlobal::encode(true);
} }
void MarkAsStrictMode() { void MarkAsStrict() {
flags_ |= IsStrictMode::encode(true); flags_ |= IsStrict::encode(true);
} }
StrictModeFlag StrictMode() { StrictModeFlag StrictMode() {
return is_strict_mode() ? kStrictMode : kNonStrictMode; return is_strict() ? kStrictMode : kNonStrictMode;
} }
void MarkAsInLoop() { void MarkAsInLoop() {
ASSERT(is_lazy()); ASSERT(is_lazy());
flags_ |= IsInLoop::encode(true); flags_ |= IsInLoop::encode(true);
} }
void MarkAsAllowingNativesSyntax() {
flags_ |= IsNativesSyntaxAllowed::encode(true);
}
bool allows_natives_syntax() const {
return IsNativesSyntaxAllowed::decode(flags_);
}
void MarkAsNative() {
flags_ |= IsNative::encode(true);
}
bool is_native() const {
return IsNative::decode(flags_);
}
void SetFunction(FunctionLiteral* literal) { void SetFunction(FunctionLiteral* literal) {
ASSERT(function_ == NULL); ASSERT(function_ == NULL);
function_ = literal; function_ = literal;
@ -150,13 +135,7 @@ class CompilationInfo BASE_EMBEDDED {
return V8::UseCrankshaft() && !closure_.is_null(); return V8::UseCrankshaft() && !closure_.is_null();
} }
// Disable all optimization attempts of this info for the rest of the
// current compilation pipeline.
void AbortOptimization();
private: private:
Isolate* isolate_;
// Compilation mode. // Compilation mode.
// BASE is generated by the full codegen, optionally prepared for bailouts. // BASE is generated by the full codegen, optionally prepared for bailouts.
// OPTIMIZE is optimized code generated by the Hydrogen-based backend. // OPTIMIZE is optimized code generated by the Hydrogen-based backend.
@ -173,9 +152,8 @@ class CompilationInfo BASE_EMBEDDED {
void Initialize(Mode mode) { void Initialize(Mode mode) {
mode_ = V8::UseCrankshaft() ? mode : NONOPT; mode_ = V8::UseCrankshaft() ? mode : NONOPT;
if (!shared_info_.is_null()) { if (!shared_info_.is_null() && shared_info_->strict_mode()) {
if (shared_info_->strict_mode()) MarkAsStrictMode(); MarkAsStrict();
if (shared_info_->native()) MarkAsNative();
} }
} }
@ -195,12 +173,7 @@ class CompilationInfo BASE_EMBEDDED {
// Flags that can be set for lazy compilation. // Flags that can be set for lazy compilation.
class IsInLoop: public BitField<bool, 3, 1> {}; class IsInLoop: public BitField<bool, 3, 1> {};
// Strict mode - used in eager compilation. // Strict mode - used in eager compilation.
class IsStrictMode: public BitField<bool, 4, 1> {}; class IsStrict: public BitField<bool, 4, 1> {};
// Native syntax (%-stuff) allowed?
class IsNativesSyntaxAllowed: public BitField<bool, 5, 1> {};
// Is this a function from our natives.
class IsNative: public BitField<bool, 6, 1> {};
unsigned flags_; unsigned flags_;
@ -252,8 +225,6 @@ class Compiler : public AllStatic {
// give up. // give up.
static const int kDefaultMaxOptCount = 10; static const int kDefaultMaxOptCount = 10;
static const int kMaxInliningLevels = 3;
// All routines return a SharedFunctionInfo. // All routines return a SharedFunctionInfo.
// If an error occurs an exception is raised and the return handle // If an error occurs an exception is raised and the return handle
// contains NULL. // contains NULL.
@ -299,6 +270,21 @@ class Compiler : public AllStatic {
}; };
// During compilation we need a global list of handles to constants
// for frame elements. When the zone gets deleted, we make sure to
// clear this list of handles as well.
class CompilationZoneScope : public ZoneScope {
public:
explicit CompilationZoneScope(ZoneScopeMode mode) : ZoneScope(mode) { }
virtual ~CompilationZoneScope() {
if (ShouldDeleteOnExit()) {
FrameElement::ClearConstantList();
Result::ClearConstantList();
}
}
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_COMPILER_H_ #endif // V8_COMPILER_H_

144
deps/v8/src/contexts.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -34,16 +34,6 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
Context* Context::declaration_context() {
Context* current = this;
while (!current->IsFunctionContext() && !current->IsGlobalContext()) {
current = current->previous();
ASSERT(current->closure() == closure());
}
return current;
}
JSBuiltinsObject* Context::builtins() { JSBuiltinsObject* Context::builtins() {
GlobalObject* object = global(); GlobalObject* object = global();
if (object->IsJSGlobalObject()) { if (object->IsJSGlobalObject()) {
@ -65,7 +55,7 @@ Context* Context::global_context() {
// During bootstrapping, the global object might not be set and we // During bootstrapping, the global object might not be set and we
// have to search the context chain to find the global context. // have to search the context chain to find the global context.
ASSERT(Isolate::Current()->bootstrapper()->IsActive()); ASSERT(Bootstrapper::IsActive());
Context* current = this; Context* current = this;
while (!current->IsGlobalContext()) { while (!current->IsGlobalContext()) {
JSFunction* closure = JSFunction::cast(current->closure()); JSFunction* closure = JSFunction::cast(current->closure());
@ -84,12 +74,9 @@ void Context::set_global_proxy(JSObject* object) {
} }
Handle<Object> Context::Lookup(Handle<String> name, Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
ContextLookupFlags flags, int* index_, PropertyAttributes* attributes) {
int* index_, Handle<Context> context(this);
PropertyAttributes* attributes) {
Isolate* isolate = GetIsolate();
Handle<Context> context(this, isolate);
bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0; bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
*index_ = -1; *index_ = -1;
@ -108,23 +95,9 @@ Handle<Object> Context::Lookup(Handle<String> name,
PrintF("\n"); PrintF("\n");
} }
// Check extension/with/global object. // check extension/with object
if (context->has_extension()) { if (context->has_extension()) {
if (context->IsCatchContext()) { Handle<JSObject> extension = Handle<JSObject>(context->extension());
// Catch contexts have the variable name in the extension slot.
if (name->Equals(String::cast(context->extension()))) {
if (FLAG_trace_contexts) {
PrintF("=> found in catch context\n");
}
*index_ = Context::THROWN_OBJECT_INDEX;
*attributes = NONE;
return context;
}
} else {
// Global, function, and with contexts may have an object in the
// extension slot.
Handle<JSObject> extension(JSObject::cast(context->extension()),
isolate);
// Context extension objects needs to behave as if they have no // Context extension objects needs to behave as if they have no
// prototype. So even if we want to follow prototype chains, we // prototype. So even if we want to follow prototype chains, we
// need to only do a local lookup for context extension objects. // need to only do a local lookup for context extension objects.
@ -143,17 +116,18 @@ Handle<Object> Context::Lookup(Handle<String> name,
return extension; return extension;
} }
} }
}
// Only functions can have locals, parameters, and a function name. if (context->is_function_context()) {
if (context->IsFunctionContext()) { // we have context-local slots
// We may have context-local slots. Check locals in the context.
// check non-parameter locals in context
Handle<SerializedScopeInfo> scope_info( Handle<SerializedScopeInfo> scope_info(
context->closure()->shared()->scope_info(), isolate); context->closure()->shared()->scope_info());
Variable::Mode mode; Variable::Mode mode;
int index = scope_info->ContextSlotIndex(*name, &mode); int index = scope_info->ContextSlotIndex(*name, &mode);
ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS); ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
if (index >= 0) { if (index >= 0) {
// slot found
if (FLAG_trace_contexts) { if (FLAG_trace_contexts) {
PrintF("=> found local in context slot %d (mode = %d)\n", PrintF("=> found local in context slot %d (mode = %d)\n",
index, mode); index, mode);
@ -166,28 +140,39 @@ Handle<Object> Context::Lookup(Handle<String> name,
// declared variables that were introduced through declaration nodes) // declared variables that were introduced through declaration nodes)
// must not appear here. // must not appear here.
switch (mode) { switch (mode) {
case Variable::INTERNAL: // Fall through. case Variable::INTERNAL: // fall through
case Variable::VAR: case Variable::VAR: *attributes = NONE; break;
*attributes = NONE; case Variable::CONST: *attributes = READ_ONLY; break;
break; case Variable::DYNAMIC: UNREACHABLE(); break;
case Variable::CONST: case Variable::DYNAMIC_GLOBAL: UNREACHABLE(); break;
*attributes = READ_ONLY; case Variable::DYNAMIC_LOCAL: UNREACHABLE(); break;
break; case Variable::TEMPORARY: UNREACHABLE(); break;
case Variable::DYNAMIC:
case Variable::DYNAMIC_GLOBAL:
case Variable::DYNAMIC_LOCAL:
case Variable::TEMPORARY:
UNREACHABLE();
break;
} }
return context; return context;
} }
// Check the slot corresponding to the intermediate context holding // check parameter locals in context
// only the function name variable. int param_index = scope_info->ParameterIndex(*name);
if (param_index >= 0) {
// slot found.
int index =
scope_info->ContextSlotIndex(Heap::arguments_shadow_symbol(), NULL);
ASSERT(index >= 0); // arguments must exist and be in the heap context
Handle<JSObject> arguments(JSObject::cast(context->get(index)));
ASSERT(arguments->HasLocalProperty(Heap::length_symbol()));
if (FLAG_trace_contexts) {
PrintF("=> found parameter %d in arguments object\n", param_index);
}
*index_ = param_index;
*attributes = NONE;
return arguments;
}
// check intermediate context (holding only the function name variable)
if (follow_context_chain) { if (follow_context_chain) {
int index = scope_info->FunctionContextSlotIndex(*name); int index = scope_info->FunctionContextSlotIndex(*name);
if (index >= 0) { if (index >= 0) {
// slot found
if (FLAG_trace_contexts) { if (FLAG_trace_contexts) {
PrintF("=> found intermediate function in context slot %d\n", PrintF("=> found intermediate function in context slot %d\n",
index); index);
@ -199,14 +184,17 @@ Handle<Object> Context::Lookup(Handle<String> name,
} }
} }
// Proceed with the previous context. // proceed with enclosing context
if (context->IsGlobalContext()) { if (context->IsGlobalContext()) {
follow_context_chain = false; follow_context_chain = false;
} else if (context->is_function_context()) {
context = Handle<Context>(Context::cast(context->closure()->context()));
} else { } else {
context = Handle<Context>(context->previous(), isolate); context = Handle<Context>(context->previous());
} }
} while (follow_context_chain); } while (follow_context_chain);
// slot not found
if (FLAG_trace_contexts) { if (FLAG_trace_contexts) {
PrintF("=> no property/slot found\n"); PrintF("=> no property/slot found\n");
} }
@ -221,12 +209,11 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
// before the global context and check that there are no context // before the global context and check that there are no context
// extension objects (conservative check for with statements). // extension objects (conservative check for with statements).
while (!context->IsGlobalContext()) { while (!context->IsGlobalContext()) {
// Check if the context is a catch or with context, or has introduced // Check if the context is a potentially a with context.
// bindings by calling non-strict eval.
if (context->has_extension()) return false; if (context->has_extension()) return false;
// Not a with context so it must be a function context. // Not a with context so it must be a function context.
ASSERT(context->IsFunctionContext()); ASSERT(context->is_function_context());
// Check non-parameter locals. // Check non-parameter locals.
Handle<SerializedScopeInfo> scope_info( Handle<SerializedScopeInfo> scope_info(
@ -243,7 +230,7 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
// Check context only holding the function name variable. // Check context only holding the function name variable.
index = scope_info->FunctionContextSlotIndex(*name); index = scope_info->FunctionContextSlotIndex(*name);
if (index >= 0) return false; if (index >= 0) return false;
context = context->previous(); context = Context::cast(context->closure()->context());
} }
// No local or potential with statement found so the variable is // No local or potential with statement found so the variable is
@ -252,30 +239,6 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
} }
void Context::ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
bool* outer_scope_calls_non_strict_eval) {
// Skip up the context chain checking all the function contexts to see
// whether they call eval.
Context* context = this;
while (!context->IsGlobalContext()) {
if (context->IsFunctionContext()) {
Handle<SerializedScopeInfo> scope_info(
context->closure()->shared()->scope_info());
if (scope_info->CallsEval()) {
*outer_scope_calls_eval = true;
if (!scope_info->IsStrictMode()) {
// No need to go further since the answers will not change from
// here.
*outer_scope_calls_non_strict_eval = true;
return;
}
}
}
context = context->previous();
}
}
void Context::AddOptimizedFunction(JSFunction* function) { void Context::AddOptimizedFunction(JSFunction* function) {
ASSERT(IsGlobalContext()); ASSERT(IsGlobalContext());
#ifdef DEBUG #ifdef DEBUG
@ -289,7 +252,7 @@ void Context::AddOptimizedFunction(JSFunction* function) {
// Check that the context belongs to the weak global contexts list. // Check that the context belongs to the weak global contexts list.
bool found = false; bool found = false;
Object* context = GetHeap()->global_contexts_list(); Object* context = Heap::global_contexts_list();
while (!context->IsUndefined()) { while (!context->IsUndefined()) {
if (context == this) { if (context == this) {
found = true; found = true;
@ -318,7 +281,7 @@ void Context::RemoveOptimizedFunction(JSFunction* function) {
} else { } else {
prev->set_next_function_link(element_function->next_function_link()); prev->set_next_function_link(element_function->next_function_link());
} }
element_function->set_next_function_link(GetHeap()->undefined_value()); element_function->set_next_function_link(Heap::undefined_value());
return; return;
} }
prev = element_function; prev = element_function;
@ -335,7 +298,7 @@ Object* Context::OptimizedFunctionsListHead() {
void Context::ClearOptimizedFunctions() { void Context::ClearOptimizedFunctions() {
set(OPTIMIZED_FUNCTIONS_LIST, GetHeap()->undefined_value()); set(OPTIMIZED_FUNCTIONS_LIST, Heap::undefined_value());
} }
@ -343,17 +306,14 @@ void Context::ClearOptimizedFunctions() {
bool Context::IsBootstrappingOrContext(Object* object) { bool Context::IsBootstrappingOrContext(Object* object) {
// During bootstrapping we allow all objects to pass as // During bootstrapping we allow all objects to pass as
// contexts. This is necessary to fix circular dependencies. // contexts. This is necessary to fix circular dependencies.
return Isolate::Current()->bootstrapper()->IsActive() || object->IsContext(); return Bootstrapper::IsActive() || object->IsContext();
} }
bool Context::IsBootstrappingOrGlobalObject(Object* object) { bool Context::IsBootstrappingOrGlobalObject(Object* object) {
// During bootstrapping we allow all objects to pass as global // During bootstrapping we allow all objects to pass as global
// objects. This is necessary to fix circular dependencies. // objects. This is necessary to fix circular dependencies.
Isolate* isolate = Isolate::Current(); return Bootstrapper::IsActive() || object->IsGlobalObject();
return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
isolate->bootstrapper()->IsActive() ||
object->IsGlobalObject();
} }
#endif #endif

100
deps/v8/src/contexts.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -78,20 +78,11 @@ enum ContextLookupFlags {
V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \ V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \ V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
V(FUNCTION_MAP_INDEX, Map, function_map) \ V(FUNCTION_MAP_INDEX, Map, function_map) \
V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \
V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \ V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
V(STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
strict_mode_function_without_prototype_map) \
V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \ V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
V(STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX, Map, \
strict_mode_function_instance_map) \
V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\ V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\ V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \ V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
aliased_arguments_boilerplate) \
V(STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
strict_mode_arguments_boilerplate) \
V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \ V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \ V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \ V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
@ -108,10 +99,7 @@ enum ContextLookupFlags {
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \ V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \ V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
V(MAP_CACHE_INDEX, Object, map_cache) \ V(MAP_CACHE_INDEX, Object, map_cache) \
V(CONTEXT_DATA_INDEX, Object, data) \ V(CONTEXT_DATA_INDEX, Object, data)
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap)
// JSFunctions are pairs (context, function code), sometimes also called // JSFunctions are pairs (context, function code), sometimes also called
// closures. A Context object is used to represent function contexts and // closures. A Context object is used to represent function contexts and
@ -130,6 +118,13 @@ enum ContextLookupFlags {
// statically allocated context slots. The names are needed // statically allocated context slots. The names are needed
// for dynamic lookups in the presence of 'with' or 'eval'. // for dynamic lookups in the presence of 'with' or 'eval'.
// //
// [ fcontext ] A pointer to the innermost enclosing function context.
// It is the same for all contexts *allocated* inside a
// function, and the function context's fcontext points
// to itself. It is only needed for fast access of the
// function context (used for declarations, and static
// context slot access).
//
// [ previous ] A pointer to the previous context. It is NULL for // [ previous ] A pointer to the previous context. It is NULL for
// function contexts, and non-NULL for 'with' contexts. // function contexts, and non-NULL for 'with' contexts.
// Used to implement the 'with' statement. // Used to implement the 'with' statement.
@ -151,6 +146,19 @@ enum ContextLookupFlags {
// (via static context addresses) or through 'eval' (dynamic context lookups). // (via static context addresses) or through 'eval' (dynamic context lookups).
// Finally, the global context contains additional slots for fast access to // Finally, the global context contains additional slots for fast access to
// global properties. // global properties.
//
// We may be able to simplify the implementation:
//
// - We may be able to get rid of 'fcontext': We can always use the fact that
// previous == NULL for function contexts and so we can search for them. They
// are only needed when doing dynamic declarations, and the context chains
// tend to be very very short (depth of nesting of 'with' statements). At
// the moment we also use it in generated code for context slot accesses -
// and there we don't want a loop because of code bloat - but we may not
// need it there after all (see comment in codegen_*.cc).
//
// - If we cannot get rid of fcontext, consider making 'previous' never NULL
// except for the global context. This could simplify Context::Lookup.
class Context: public FixedArray { class Context: public FixedArray {
public: public:
@ -164,31 +172,21 @@ class Context: public FixedArray {
enum { enum {
// These slots are in all contexts. // These slots are in all contexts.
CLOSURE_INDEX, CLOSURE_INDEX,
FCONTEXT_INDEX,
PREVIOUS_INDEX, PREVIOUS_INDEX,
// The extension slot is used for either the global object (in global
// contexts), eval extension object (function contexts), subject of with
// (with contexts), or the variable name (catch contexts).
EXTENSION_INDEX, EXTENSION_INDEX,
GLOBAL_INDEX, GLOBAL_INDEX,
MIN_CONTEXT_SLOTS, MIN_CONTEXT_SLOTS,
// This slot holds the thrown value in catch contexts.
THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS,
// These slots are only in global contexts. // These slots are only in global contexts.
GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS, GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS,
SECURITY_TOKEN_INDEX, SECURITY_TOKEN_INDEX,
ARGUMENTS_BOILERPLATE_INDEX, ARGUMENTS_BOILERPLATE_INDEX,
ALIASED_ARGUMENTS_BOILERPLATE_INDEX,
STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX,
JS_ARRAY_MAP_INDEX, JS_ARRAY_MAP_INDEX,
REGEXP_RESULT_MAP_INDEX, REGEXP_RESULT_MAP_INDEX,
FUNCTION_MAP_INDEX, FUNCTION_MAP_INDEX,
STRICT_MODE_FUNCTION_MAP_INDEX,
FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
FUNCTION_INSTANCE_MAP_INDEX, FUNCTION_INSTANCE_MAP_INDEX,
STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX,
INITIAL_OBJECT_PROTOTYPE_INDEX, INITIAL_OBJECT_PROTOTYPE_INDEX,
BOOLEAN_FUNCTION_INDEX, BOOLEAN_FUNCTION_INDEX,
NUMBER_FUNCTION_INDEX, NUMBER_FUNCTION_INDEX,
@ -225,15 +223,12 @@ class Context: public FixedArray {
OPAQUE_REFERENCE_FUNCTION_INDEX, OPAQUE_REFERENCE_FUNCTION_INDEX,
CONTEXT_EXTENSION_FUNCTION_INDEX, CONTEXT_EXTENSION_FUNCTION_INDEX,
OUT_OF_MEMORY_INDEX, OUT_OF_MEMORY_INDEX,
MAP_CACHE_INDEX,
CONTEXT_DATA_INDEX, CONTEXT_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
DERIVED_GET_TRAP_INDEX,
DERIVED_SET_TRAP_INDEX,
// Properties from here are treated as weak references by the full GC. // Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references. // Scavenge treats them as strong references.
OPTIMIZED_FUNCTIONS_LIST, // Weak. OPTIMIZED_FUNCTIONS_LIST, // Weak.
MAP_CACHE_INDEX, // Weak.
NEXT_CONTEXT_LINK, // Weak. NEXT_CONTEXT_LINK, // Weak.
// Total number of slots. // Total number of slots.
@ -246,6 +241,9 @@ class Context: public FixedArray {
JSFunction* closure() { return JSFunction::cast(get(CLOSURE_INDEX)); } JSFunction* closure() { return JSFunction::cast(get(CLOSURE_INDEX)); }
void set_closure(JSFunction* closure) { set(CLOSURE_INDEX, closure); } void set_closure(JSFunction* closure) { set(CLOSURE_INDEX, closure); }
Context* fcontext() { return Context::cast(get(FCONTEXT_INDEX)); }
void set_fcontext(Context* context) { set(FCONTEXT_INDEX, context); }
Context* previous() { Context* previous() {
Object* result = unchecked_previous(); Object* result = unchecked_previous();
ASSERT(IsBootstrappingOrContext(result)); ASSERT(IsBootstrappingOrContext(result));
@ -253,17 +251,14 @@ class Context: public FixedArray {
} }
void set_previous(Context* context) { set(PREVIOUS_INDEX, context); } void set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
bool has_extension() { return extension() != NULL; } bool has_extension() { return unchecked_extension() != NULL; }
Object* extension() { return get(EXTENSION_INDEX); } JSObject* extension() { return JSObject::cast(unchecked_extension()); }
void set_extension(Object* object) { set(EXTENSION_INDEX, object); } void set_extension(JSObject* object) { set(EXTENSION_INDEX, object); }
// Get the context where var declarations will be hoisted to, which
// may be the context itself.
Context* declaration_context();
GlobalObject* global() { GlobalObject* global() {
Object* result = get(GLOBAL_INDEX); Object* result = get(GLOBAL_INDEX);
ASSERT(IsBootstrappingOrGlobalObject(result)); ASSERT(Heap::gc_state() != Heap::NOT_IN_GC ||
IsBootstrappingOrGlobalObject(result));
return reinterpret_cast<GlobalObject*>(result); return reinterpret_cast<GlobalObject*>(result);
} }
void set_global(GlobalObject* global) { set(GLOBAL_INDEX, global); } void set_global(GlobalObject* global) { set(GLOBAL_INDEX, global); }
@ -278,27 +273,18 @@ class Context: public FixedArray {
// Compute the global context by traversing the context chain. // Compute the global context by traversing the context chain.
Context* global_context(); Context* global_context();
// Predicates for context types. IsGlobalContext is defined on Object // Tells if this is a function context (as opposed to a 'with' context).
// because we frequently have to know if arbitrary objects are global bool is_function_context() { return unchecked_previous() == NULL; }
// contexts.
bool IsFunctionContext() {
Map* map = this->map();
return map == map->GetHeap()->function_context_map();
}
bool IsCatchContext() {
Map* map = this->map();
return map == map->GetHeap()->catch_context_map();
}
bool IsWithContext() {
Map* map = this->map();
return map == map->GetHeap()->with_context_map();
}
// Tells whether the global context is marked with out of memory. // Tells whether the global context is marked with out of memory.
inline bool has_out_of_memory(); bool has_out_of_memory() {
return global_context()->out_of_memory() == Heap::true_value();
}
// Mark the global context with out of memory. // Mark the global context with out of memory.
inline void mark_out_of_memory(); void mark_out_of_memory() {
global_context()->set_out_of_memory(Heap::true_value());
}
// The exception holder is the object used as a with object in // The exception holder is the object used as a with object in
// the implementation of a catch block. // the implementation of a catch block.
@ -357,11 +343,6 @@ class Context: public FixedArray {
// eval. // eval.
bool GlobalIfNotShadowedByEval(Handle<String> name); bool GlobalIfNotShadowedByEval(Handle<String> name);
// Determine if any function scope in the context call eval and if
// any of those calls are in non-strict mode.
void ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
bool* outer_scope_calls_non_strict_eval);
// Code generation support. // Code generation support.
static int SlotOffset(int index) { static int SlotOffset(int index) {
return kHeaderSize + index * kPointerSize - kHeapObjectTag; return kHeaderSize + index * kPointerSize - kHeapObjectTag;
@ -381,6 +362,7 @@ class Context: public FixedArray {
private: private:
// Unchecked access to the slots. // Unchecked access to the slots.
Object* unchecked_previous() { return get(PREVIOUS_INDEX); } Object* unchecked_previous() { return get(PREVIOUS_INDEX); }
Object* unchecked_extension() { return get(EXTENSION_INDEX); }
#ifdef DEBUG #ifdef DEBUG
// Bootstrapping-aware type checks. // Bootstrapping-aware type checks.

4
deps/v8/src/conversions-inl.h

@ -60,7 +60,11 @@ static inline unsigned int FastD2UI(double x) {
if (x < k2Pow52) { if (x < k2Pow52) {
x += k2Pow52; x += k2Pow52;
uint32_t result; uint32_t result;
#ifdef BIG_ENDIAN_FLOATING_POINT
Address mantissa_ptr = reinterpret_cast<Address>(&x) + kIntSize;
#else
Address mantissa_ptr = reinterpret_cast<Address>(&x); Address mantissa_ptr = reinterpret_cast<Address>(&x);
#endif
// Copy least significant 32 bits of mantissa. // Copy least significant 32 bits of mantissa.
memcpy(&result, mantissa_ptr, sizeof(result)); memcpy(&result, mantissa_ptr, sizeof(result));
return negative ? ~result + 1 : result; return negative ? ~result + 1 : result;

132
deps/v8/src/conversions.cc

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -109,11 +109,9 @@ static const double JUNK_STRING_VALUE = OS::nan_value();
// Returns true if a nonspace found and false if the end has reached. // Returns true if a nonspace found and false if the end has reached.
template <class Iterator, class EndMark> template <class Iterator, class EndMark>
static inline bool AdvanceToNonspace(UnicodeCache* unicode_cache, static inline bool AdvanceToNonspace(Iterator* current, EndMark end) {
Iterator* current,
EndMark end) {
while (*current != end) { while (*current != end) {
if (!unicode_cache->IsWhiteSpace(**current)) return true; if (!ScannerConstants::kIsWhiteSpace.get(**current)) return true;
++*current; ++*current;
} }
return false; return false;
@ -134,8 +132,7 @@ static double SignedZero(bool negative) {
// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end. // Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
template <int radix_log_2, class Iterator, class EndMark> template <int radix_log_2, class Iterator, class EndMark>
static double InternalStringToIntDouble(UnicodeCache* unicode_cache, static double InternalStringToIntDouble(Iterator current,
Iterator current,
EndMark end, EndMark end,
bool negative, bool negative,
bool allow_trailing_junk) { bool allow_trailing_junk) {
@ -160,8 +157,7 @@ static double InternalStringToIntDouble(UnicodeCache* unicode_cache,
} else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) { } else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) {
digit = static_cast<char>(*current) - 'A' + 10; digit = static_cast<char>(*current) - 'A' + 10;
} else { } else {
if (allow_trailing_junk || if (allow_trailing_junk || !AdvanceToNonspace(&current, end)) {
!AdvanceToNonspace(unicode_cache, &current, end)) {
break; break;
} else { } else {
return JUNK_STRING_VALUE; return JUNK_STRING_VALUE;
@ -192,8 +188,7 @@ static double InternalStringToIntDouble(UnicodeCache* unicode_cache,
exponent += radix_log_2; exponent += radix_log_2;
} }
if (!allow_trailing_junk && if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE; return JUNK_STRING_VALUE;
} }
@ -237,16 +232,11 @@ static double InternalStringToIntDouble(UnicodeCache* unicode_cache,
template <class Iterator, class EndMark> template <class Iterator, class EndMark>
static double InternalStringToInt(UnicodeCache* unicode_cache, static double InternalStringToInt(Iterator current, EndMark end, int radix) {
Iterator current,
EndMark end,
int radix) {
const bool allow_trailing_junk = true; const bool allow_trailing_junk = true;
const double empty_string_val = JUNK_STRING_VALUE; const double empty_string_val = JUNK_STRING_VALUE;
if (!AdvanceToNonspace(unicode_cache, &current, end)) { if (!AdvanceToNonspace(&current, end)) return empty_string_val;
return empty_string_val;
}
bool negative = false; bool negative = false;
bool leading_zero = false; bool leading_zero = false;
@ -254,14 +244,10 @@ static double InternalStringToInt(UnicodeCache* unicode_cache,
if (*current == '+') { if (*current == '+') {
// Ignore leading sign; skip following spaces. // Ignore leading sign; skip following spaces.
++current; ++current;
if (current == end) { if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
return JUNK_STRING_VALUE;
}
} else if (*current == '-') { } else if (*current == '-') {
++current; ++current;
if (current == end) { if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
return JUNK_STRING_VALUE;
}
negative = true; negative = true;
} }
@ -312,21 +298,21 @@ static double InternalStringToInt(UnicodeCache* unicode_cache,
switch (radix) { switch (radix) {
case 2: case 2:
return InternalStringToIntDouble<1>( return InternalStringToIntDouble<1>(
unicode_cache, current, end, negative, allow_trailing_junk); current, end, negative, allow_trailing_junk);
case 4: case 4:
return InternalStringToIntDouble<2>( return InternalStringToIntDouble<2>(
unicode_cache, current, end, negative, allow_trailing_junk); current, end, negative, allow_trailing_junk);
case 8: case 8:
return InternalStringToIntDouble<3>( return InternalStringToIntDouble<3>(
unicode_cache, current, end, negative, allow_trailing_junk); current, end, negative, allow_trailing_junk);
case 16: case 16:
return InternalStringToIntDouble<4>( return InternalStringToIntDouble<4>(
unicode_cache, current, end, negative, allow_trailing_junk); current, end, negative, allow_trailing_junk);
case 32: case 32:
return InternalStringToIntDouble<5>( return InternalStringToIntDouble<5>(
unicode_cache, current, end, negative, allow_trailing_junk); current, end, negative, allow_trailing_junk);
default: default:
UNREACHABLE(); UNREACHABLE();
} }
@ -351,8 +337,7 @@ static double InternalStringToInt(UnicodeCache* unicode_cache,
if (current == end) break; if (current == end) break;
} }
if (!allow_trailing_junk && if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE; return JUNK_STRING_VALUE;
} }
@ -417,8 +402,7 @@ static double InternalStringToInt(UnicodeCache* unicode_cache,
v = v * multiplier + part; v = v * multiplier + part;
} while (!done); } while (!done);
if (!allow_trailing_junk && if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE; return JUNK_STRING_VALUE;
} }
@ -432,8 +416,7 @@ static double InternalStringToInt(UnicodeCache* unicode_cache,
// 2. *current - gets the current character in the sequence. // 2. *current - gets the current character in the sequence.
// 3. ++current (advances the position). // 3. ++current (advances the position).
template <class Iterator, class EndMark> template <class Iterator, class EndMark>
static double InternalStringToDouble(UnicodeCache* unicode_cache, static double InternalStringToDouble(Iterator current,
Iterator current,
EndMark end, EndMark end,
int flags, int flags,
double empty_string_val) { double empty_string_val) {
@ -445,9 +428,7 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache,
// 'parsing_done'. // 'parsing_done'.
// 4. 'current' is not dereferenced after the 'parsing_done' label. // 4. 'current' is not dereferenced after the 'parsing_done' label.
// 5. Code before 'parsing_done' may rely on 'current != end'. // 5. Code before 'parsing_done' may rely on 'current != end'.
if (!AdvanceToNonspace(unicode_cache, &current, end)) { if (!AdvanceToNonspace(&current, end)) return empty_string_val;
return empty_string_val;
}
const bool allow_trailing_junk = (flags & ALLOW_TRAILING_JUNK) != 0; const bool allow_trailing_junk = (flags & ALLOW_TRAILING_JUNK) != 0;
@ -482,8 +463,7 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache,
return JUNK_STRING_VALUE; return JUNK_STRING_VALUE;
} }
if (!allow_trailing_junk && if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE; return JUNK_STRING_VALUE;
} }
@ -505,8 +485,7 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache,
return JUNK_STRING_VALUE; // "0x". return JUNK_STRING_VALUE; // "0x".
} }
return InternalStringToIntDouble<4>(unicode_cache, return InternalStringToIntDouble<4>(current,
current,
end, end,
negative, negative,
allow_trailing_junk); allow_trailing_junk);
@ -642,8 +621,7 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache,
exponent += (sign == '-' ? -num : num); exponent += (sign == '-' ? -num : num);
} }
if (!allow_trailing_junk && if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE; return JUNK_STRING_VALUE;
} }
@ -651,8 +629,7 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache,
exponent += insignificant_digits; exponent += insignificant_digits;
if (octal) { if (octal) {
return InternalStringToIntDouble<3>(unicode_cache, return InternalStringToIntDouble<3>(buffer,
buffer,
buffer + buffer_pos, buffer + buffer_pos,
negative, negative,
allow_trailing_junk); allow_trailing_junk);
@ -671,23 +648,19 @@ static double InternalStringToDouble(UnicodeCache* unicode_cache,
} }
double StringToDouble(UnicodeCache* unicode_cache, double StringToDouble(String* str, int flags, double empty_string_val) {
String* str, int flags, double empty_string_val) {
StringShape shape(str); StringShape shape(str);
if (shape.IsSequentialAscii()) { if (shape.IsSequentialAscii()) {
const char* begin = SeqAsciiString::cast(str)->GetChars(); const char* begin = SeqAsciiString::cast(str)->GetChars();
const char* end = begin + str->length(); const char* end = begin + str->length();
return InternalStringToDouble(unicode_cache, begin, end, flags, return InternalStringToDouble(begin, end, flags, empty_string_val);
empty_string_val);
} else if (shape.IsSequentialTwoByte()) { } else if (shape.IsSequentialTwoByte()) {
const uc16* begin = SeqTwoByteString::cast(str)->GetChars(); const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
const uc16* end = begin + str->length(); const uc16* end = begin + str->length();
return InternalStringToDouble(unicode_cache, begin, end, flags, return InternalStringToDouble(begin, end, flags, empty_string_val);
empty_string_val);
} else { } else {
StringInputBuffer buffer(str); StringInputBuffer buffer(str);
return InternalStringToDouble(unicode_cache, return InternalStringToDouble(StringInputBufferIterator(&buffer),
StringInputBufferIterator(&buffer),
StringInputBufferIterator::EndMarker(), StringInputBufferIterator::EndMarker(),
flags, flags,
empty_string_val); empty_string_val);
@ -695,52 +668,36 @@ double StringToDouble(UnicodeCache* unicode_cache,
} }
double StringToInt(UnicodeCache* unicode_cache, double StringToInt(String* str, int radix) {
String* str,
int radix) {
StringShape shape(str); StringShape shape(str);
if (shape.IsSequentialAscii()) { if (shape.IsSequentialAscii()) {
const char* begin = SeqAsciiString::cast(str)->GetChars(); const char* begin = SeqAsciiString::cast(str)->GetChars();
const char* end = begin + str->length(); const char* end = begin + str->length();
return InternalStringToInt(unicode_cache, begin, end, radix); return InternalStringToInt(begin, end, radix);
} else if (shape.IsSequentialTwoByte()) { } else if (shape.IsSequentialTwoByte()) {
const uc16* begin = SeqTwoByteString::cast(str)->GetChars(); const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
const uc16* end = begin + str->length(); const uc16* end = begin + str->length();
return InternalStringToInt(unicode_cache, begin, end, radix); return InternalStringToInt(begin, end, radix);
} else { } else {
StringInputBuffer buffer(str); StringInputBuffer buffer(str);
return InternalStringToInt(unicode_cache, return InternalStringToInt(StringInputBufferIterator(&buffer),
StringInputBufferIterator(&buffer),
StringInputBufferIterator::EndMarker(), StringInputBufferIterator::EndMarker(),
radix); radix);
} }
} }
double StringToDouble(UnicodeCache* unicode_cache, double StringToDouble(const char* str, int flags, double empty_string_val) {
const char* str, int flags, double empty_string_val) {
const char* end = str + StrLength(str); const char* end = str + StrLength(str);
return InternalStringToDouble(unicode_cache, str, end, flags, return InternalStringToDouble(str, end, flags, empty_string_val);
empty_string_val);
} }
double StringToDouble(UnicodeCache* unicode_cache, double StringToDouble(Vector<const char> str,
Vector<const char> str,
int flags, int flags,
double empty_string_val) { double empty_string_val) {
const char* end = str.start() + str.length(); const char* end = str.start() + str.length();
return InternalStringToDouble(unicode_cache, str.start(), end, flags, return InternalStringToDouble(str.start(), end, flags, empty_string_val);
empty_string_val);
}
double StringToDouble(UnicodeCache* unicode_cache,
Vector<const uc16> str,
int flags,
double empty_string_val) {
const uc16* end = str.start() + str.length();
return InternalStringToDouble(unicode_cache, str.start(), end, flags,
empty_string_val);
} }
@ -1109,23 +1066,4 @@ char* DoubleToRadixCString(double value, int radix) {
} }
static Mutex* dtoa_lock_one = OS::CreateMutex();
static Mutex* dtoa_lock_zero = OS::CreateMutex();
} } // namespace v8::internal } } // namespace v8::internal
extern "C" {
void ACQUIRE_DTOA_LOCK(int n) {
ASSERT(n == 0 || n == 1);
(n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)->Lock();
}
void FREE_DTOA_LOCK(int n) {
ASSERT(n == 0 || n == 1);
(n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)->
Unlock();
}
}

23
deps/v8/src/conversions.h

@ -1,4 +1,4 @@
// Copyright 2011 the V8 project authors. All rights reserved. // Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without // Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are // modification, are permitted provided that the following conditions are
// met: // met:
@ -28,8 +28,6 @@
#ifndef V8_CONVERSIONS_H_ #ifndef V8_CONVERSIONS_H_
#define V8_CONVERSIONS_H_ #define V8_CONVERSIONS_H_
#include "scanner-base.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -93,26 +91,15 @@ static inline uint32_t NumberToUint32(Object* number);
// Converts a string into a double value according to ECMA-262 9.3.1 // Converts a string into a double value according to ECMA-262 9.3.1
double StringToDouble(UnicodeCache* unicode_cache, double StringToDouble(String* str, int flags, double empty_string_val = 0);
String* str, double StringToDouble(Vector<const char> str,
int flags,
double empty_string_val = 0);
double StringToDouble(UnicodeCache* unicode_cache,
Vector<const char> str,
int flags,
double empty_string_val = 0);
double StringToDouble(UnicodeCache* unicode_cache,
Vector<const uc16> str,
int flags, int flags,
double empty_string_val = 0); double empty_string_val = 0);
// This version expects a zero-terminated character array. // This version expects a zero-terminated character array.
double StringToDouble(UnicodeCache* unicode_cache, double StringToDouble(const char* str, int flags, double empty_string_val = 0);
const char* str,
int flags,
double empty_string_val = 0);
// Converts a string into an integer. // Converts a string into an integer.
double StringToInt(UnicodeCache* unicode_cache, String* str, int radix); double StringToInt(String* str, int radix);
// Converts a double to a string value according to ECMA-262 9.8.1. // Converts a double to a string value according to ECMA-262 9.8.1.
// The buffer should be large enough for any floating point number. // The buffer should be large enough for any floating point number.

23
deps/v8/src/counters.cc

@ -28,22 +28,14 @@
#include "v8.h" #include "v8.h"
#include "counters.h" #include "counters.h"
#include "isolate.h"
#include "platform.h" #include "platform.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
StatsTable::StatsTable() CounterLookupCallback StatsTable::lookup_function_ = NULL;
: lookup_function_(NULL), CreateHistogramCallback StatsTable::create_histogram_function_ = NULL;
create_histogram_function_(NULL), AddHistogramSampleCallback StatsTable::add_histogram_sample_function_ = NULL;
add_histogram_sample_function_(NULL) {}
int* StatsCounter::FindLocationInStatsTable() const {
return Isolate::Current()->stats_table()->FindLocation(name_);
}
// Start the timer. // Start the timer.
void StatsCounterTimer::Start() { void StatsCounterTimer::Start() {
@ -79,15 +71,8 @@ void HistogramTimer::Stop() {
// Compute the delta between start and stop, in milliseconds. // Compute the delta between start and stop, in milliseconds.
int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000; int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
Isolate::Current()->stats_table()-> StatsTable::AddHistogramSample(histogram_, milliseconds);
AddHistogramSample(histogram_, milliseconds);
} }
} }
void* HistogramTimer::CreateHistogram() const {
return Isolate::Current()->stats_table()->
CreateHistogram(name_, 0, 10000, 50);
}
} } // namespace v8::internal } } // namespace v8::internal

38
deps/v8/src/counters.h

@ -38,27 +38,27 @@ namespace internal {
// counters for monitoring. Counters can be looked up and // counters for monitoring. Counters can be looked up and
// manipulated by name. // manipulated by name.
class StatsTable { class StatsTable : public AllStatic {
public: public:
// Register an application-defined function where // Register an application-defined function where
// counters can be looked up. // counters can be looked up.
void SetCounterFunction(CounterLookupCallback f) { static void SetCounterFunction(CounterLookupCallback f) {
lookup_function_ = f; lookup_function_ = f;
} }
// Register an application-defined function to create // Register an application-defined function to create
// a histogram for passing to the AddHistogramSample function // a histogram for passing to the AddHistogramSample function
void SetCreateHistogramFunction(CreateHistogramCallback f) { static void SetCreateHistogramFunction(CreateHistogramCallback f) {
create_histogram_function_ = f; create_histogram_function_ = f;
} }
// Register an application-defined function to add a sample // Register an application-defined function to add a sample
// to a histogram created with CreateHistogram function // to a histogram created with CreateHistogram function
void SetAddHistogramSampleFunction(AddHistogramSampleCallback f) { static void SetAddHistogramSampleFunction(AddHistogramSampleCallback f) {
add_histogram_sample_function_ = f; add_histogram_sample_function_ = f;
} }
bool HasCounterFunction() const { static bool HasCounterFunction() {
return lookup_function_ != NULL; return lookup_function_ != NULL;
} }
@ -68,7 +68,7 @@ class StatsTable {
// may receive a different location to store it's counter. // may receive a different location to store it's counter.
// The return value must not be cached and re-used across // The return value must not be cached and re-used across
// threads, although a single thread is free to cache it. // threads, although a single thread is free to cache it.
int* FindLocation(const char* name) { static int* FindLocation(const char* name) {
if (!lookup_function_) return NULL; if (!lookup_function_) return NULL;
return lookup_function_(name); return lookup_function_(name);
} }
@ -78,7 +78,7 @@ class StatsTable {
// function. min and max define the expected minimum and maximum // function. min and max define the expected minimum and maximum
// sample values. buckets is the maximum number of buckets // sample values. buckets is the maximum number of buckets
// that the samples will be grouped into. // that the samples will be grouped into.
void* CreateHistogram(const char* name, static void* CreateHistogram(const char* name,
int min, int min,
int max, int max,
size_t buckets) { size_t buckets) {
@ -88,21 +88,15 @@ class StatsTable {
// Add a sample to a histogram created with the CreateHistogram // Add a sample to a histogram created with the CreateHistogram
// function. // function.
void AddHistogramSample(void* histogram, int sample) { static void AddHistogramSample(void* histogram, int sample) {
if (!add_histogram_sample_function_) return; if (!add_histogram_sample_function_) return;
return add_histogram_sample_function_(histogram, sample); return add_histogram_sample_function_(histogram, sample);
} }
private: private:
StatsTable(); static CounterLookupCallback lookup_function_;
static CreateHistogramCallback create_histogram_function_;
CounterLookupCallback lookup_function_; static AddHistogramSampleCallback add_histogram_sample_function_;
CreateHistogramCallback create_histogram_function_;
AddHistogramSampleCallback add_histogram_sample_function_;
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(StatsTable);
}; };
// StatsCounters are dynamically created values which can be tracked in // StatsCounters are dynamically created values which can be tracked in
@ -172,12 +166,9 @@ struct StatsCounter {
if (lookup_done_) if (lookup_done_)
return ptr_; return ptr_;
lookup_done_ = true; lookup_done_ = true;
ptr_ = FindLocationInStatsTable(); ptr_ = StatsTable::FindLocation(name_);
return ptr_; return ptr_;
} }
private:
int* FindLocationInStatsTable() const;
}; };
// StatsCounterTimer t = { { L"t:foo", NULL, false }, 0, 0 }; // StatsCounterTimer t = { { L"t:foo", NULL, false }, 0, 0 };
@ -225,13 +216,10 @@ struct HistogramTimer {
void* GetHistogram() { void* GetHistogram() {
if (!lookup_done_) { if (!lookup_done_) {
lookup_done_ = true; lookup_done_ = true;
histogram_ = CreateHistogram(); histogram_ = StatsTable::CreateHistogram(name_, 0, 10000, 50);
} }
return histogram_; return histogram_;
} }
private:
void* CreateHistogram() const;
}; };
// Helper class for scoping a HistogramTimer. // Helper class for scoping a HistogramTimer.

22
deps/v8/src/cpu-profiler-inl.h

@ -32,7 +32,6 @@
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
#include <new>
#include "circular-queue-inl.h" #include "circular-queue-inl.h"
#include "profile-generator-inl.h" #include "profile-generator-inl.h"
#include "unbound-queue-inl.h" #include "unbound-queue-inl.h"
@ -42,8 +41,8 @@ namespace internal {
void CodeCreateEventRecord::UpdateCodeMap(CodeMap* code_map) { void CodeCreateEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->AddCode(start, entry, size); code_map->AddCode(start, entry, size);
if (shared != NULL) { if (sfi_address != NULL) {
entry->set_shared_id(code_map->GetSharedId(shared)); entry->set_shared_id(code_map->GetSFITag(sfi_address));
} }
} }
@ -58,15 +57,28 @@ void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) {
} }
void SharedFunctionInfoMoveEventRecord::UpdateCodeMap(CodeMap* code_map) { void SFIMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->MoveCode(from, to); code_map->MoveCode(from, to);
} }
TickSampleEventRecord* TickSampleEventRecord::init(void* value) {
TickSampleEventRecord* result =
reinterpret_cast<TickSampleEventRecord*>(value);
result->filler = 1;
ASSERT(result->filler != SamplingCircularQueue::kClear);
// Init the required fields only.
result->sample.pc = NULL;
result->sample.frames_count = 0;
return result;
}
TickSample* ProfilerEventsProcessor::TickSampleEvent() { TickSample* ProfilerEventsProcessor::TickSampleEvent() {
generator_->Tick(); generator_->Tick();
TickSampleEventRecord* evt = TickSampleEventRecord* evt =
new(ticks_buffer_.Enqueue()) TickSampleEventRecord(enqueue_order_); TickSampleEventRecord::init(ticks_buffer_.Enqueue());
evt->order = enqueue_order_; // No increment!
return &evt->sample; return &evt->sample;
} }

188
deps/v8/src/cpu-profiler.cc

@ -69,7 +69,7 @@ void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start; rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, prefix, name); rec->entry = generator_->NewCodeEntry(tag, prefix, name);
rec->size = 1; rec->size = 1;
rec->shared = NULL; rec->sfi_address = NULL;
events_buffer_.Enqueue(evt_rec); events_buffer_.Enqueue(evt_rec);
} }
@ -80,7 +80,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
int line_number, int line_number,
Address start, Address start,
unsigned size, unsigned size,
Address shared) { Address sfi_address) {
if (FilterOutCodeCreateEvent(tag)) return; if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec; CodeEventsContainer evt_rec;
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_; CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
@ -89,7 +89,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start; rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number); rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
rec->size = size; rec->size = size;
rec->shared = shared; rec->sfi_address = sfi_address;
events_buffer_.Enqueue(evt_rec); events_buffer_.Enqueue(evt_rec);
} }
@ -106,7 +106,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start; rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, name); rec->entry = generator_->NewCodeEntry(tag, name);
rec->size = size; rec->size = size;
rec->shared = NULL; rec->sfi_address = NULL;
events_buffer_.Enqueue(evt_rec); events_buffer_.Enqueue(evt_rec);
} }
@ -123,7 +123,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start; rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, args_count); rec->entry = generator_->NewCodeEntry(tag, args_count);
rec->size = size; rec->size = size;
rec->shared = NULL; rec->sfi_address = NULL;
events_buffer_.Enqueue(evt_rec); events_buffer_.Enqueue(evt_rec);
} }
@ -149,12 +149,10 @@ void ProfilerEventsProcessor::CodeDeleteEvent(Address from) {
} }
void ProfilerEventsProcessor::SharedFunctionInfoMoveEvent(Address from, void ProfilerEventsProcessor::SFIMoveEvent(Address from, Address to) {
Address to) {
CodeEventsContainer evt_rec; CodeEventsContainer evt_rec;
SharedFunctionInfoMoveEventRecord* rec = SFIMoveEventRecord* rec = &evt_rec.SFIMoveEventRecord_;
&evt_rec.SharedFunctionInfoMoveEventRecord_; rec->type = CodeEventRecord::SFI_MOVE;
rec->type = CodeEventRecord::SHARED_FUNC_MOVE;
rec->order = ++enqueue_order_; rec->order = ++enqueue_order_;
rec->from = from; rec->from = from;
rec->to = to; rec->to = to;
@ -181,16 +179,18 @@ void ProfilerEventsProcessor::RegExpCodeCreateEvent(
void ProfilerEventsProcessor::AddCurrentStack() { void ProfilerEventsProcessor::AddCurrentStack() {
TickSampleEventRecord record(enqueue_order_); TickSampleEventRecord record;
TickSample* sample = &record.sample; TickSample* sample = &record.sample;
Isolate* isolate = Isolate::Current(); sample->state = Top::current_vm_state();
sample->state = isolate->current_vm_state();
sample->pc = reinterpret_cast<Address>(sample); // Not NULL. sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
for (StackTraceFrameIterator it(isolate); sample->tos = NULL;
sample->frames_count = 0;
for (StackTraceFrameIterator it;
!it.done() && sample->frames_count < TickSample::kMaxFramesCount; !it.done() && sample->frames_count < TickSample::kMaxFramesCount;
it.Advance()) { it.Advance()) {
sample->stack[sample->frames_count++] = it.frame()->pc(); sample->stack[sample->frames_count++] = it.frame()->pc();
} }
record.order = enqueue_order_;
ticks_from_vm_buffer_.Enqueue(record); ticks_from_vm_buffer_.Enqueue(record);
} }
@ -239,7 +239,7 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
// A paranoid check to make sure that we don't get a memory overrun // A paranoid check to make sure that we don't get a memory overrun
// in case of frames_count having a wild value. // in case of frames_count having a wild value.
if (record.sample.frames_count < 0 if (record.sample.frames_count < 0
|| record.sample.frames_count > TickSample::kMaxFramesCount) || record.sample.frames_count >= TickSample::kMaxFramesCount)
record.sample.frames_count = 0; record.sample.frames_count = 0;
generator_->RecordTickSample(record.sample); generator_->RecordTickSample(record.sample);
ticks_buffer_.FinishDequeue(); ticks_buffer_.FinishDequeue();
@ -270,109 +270,82 @@ void ProfilerEventsProcessor::Run() {
} }
CpuProfiler* CpuProfiler::singleton_ = NULL;
Atomic32 CpuProfiler::is_profiling_ = false;
void CpuProfiler::StartProfiling(const char* title) { void CpuProfiler::StartProfiling(const char* title) {
ASSERT(Isolate::Current()->cpu_profiler() != NULL); ASSERT(singleton_ != NULL);
Isolate::Current()->cpu_profiler()->StartCollectingProfile(title); singleton_->StartCollectingProfile(title);
} }
void CpuProfiler::StartProfiling(String* title) { void CpuProfiler::StartProfiling(String* title) {
ASSERT(Isolate::Current()->cpu_profiler() != NULL); ASSERT(singleton_ != NULL);
Isolate::Current()->cpu_profiler()->StartCollectingProfile(title); singleton_->StartCollectingProfile(title);
} }
CpuProfile* CpuProfiler::StopProfiling(const char* title) { CpuProfile* CpuProfiler::StopProfiling(const char* title) {
Isolate* isolate = Isolate::Current(); return is_profiling() ? singleton_->StopCollectingProfile(title) : NULL;
return is_profiling(isolate) ?
isolate->cpu_profiler()->StopCollectingProfile(title) : NULL;
} }
CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) { CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
Isolate* isolate = Isolate::Current(); return is_profiling() ?
return is_profiling(isolate) ? singleton_->StopCollectingProfile(security_token, title) : NULL;
isolate->cpu_profiler()->StopCollectingProfile(
security_token, title) : NULL;
} }
int CpuProfiler::GetProfilesCount() { int CpuProfiler::GetProfilesCount() {
ASSERT(Isolate::Current()->cpu_profiler() != NULL); ASSERT(singleton_ != NULL);
// The count of profiles doesn't depend on a security token. // The count of profiles doesn't depend on a security token.
return Isolate::Current()->cpu_profiler()->profiles_->Profiles( return singleton_->profiles_->Profiles(
TokenEnumerator::kNoSecurityToken)->length(); TokenEnumerator::kNoSecurityToken)->length();
} }
CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) { CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
ASSERT(Isolate::Current()->cpu_profiler() != NULL); ASSERT(singleton_ != NULL);
CpuProfiler* profiler = Isolate::Current()->cpu_profiler(); const int token = singleton_->token_enumerator_->GetTokenId(security_token);
const int token = profiler->token_enumerator_->GetTokenId(security_token); return singleton_->profiles_->Profiles(token)->at(index);
return profiler->profiles_->Profiles(token)->at(index);
} }
CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) { CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
ASSERT(Isolate::Current()->cpu_profiler() != NULL); ASSERT(singleton_ != NULL);
CpuProfiler* profiler = Isolate::Current()->cpu_profiler(); const int token = singleton_->token_enumerator_->GetTokenId(security_token);
const int token = profiler->token_enumerator_->GetTokenId(security_token); return singleton_->profiles_->GetProfile(token, uid);
return profiler->profiles_->GetProfile(token, uid);
} }
TickSample* CpuProfiler::TickSampleEvent(Isolate* isolate) { TickSample* CpuProfiler::TickSampleEvent() {
if (CpuProfiler::is_profiling(isolate)) { if (CpuProfiler::is_profiling()) {
return isolate->cpu_profiler()->processor_->TickSampleEvent(); return singleton_->processor_->TickSampleEvent();
} else { } else {
return NULL; return NULL;
} }
} }
void CpuProfiler::DeleteAllProfiles() {
Isolate* isolate = Isolate::Current();
ASSERT(isolate->cpu_profiler() != NULL);
if (is_profiling(isolate)) {
isolate->cpu_profiler()->StopProcessor();
}
isolate->cpu_profiler()->ResetProfiles();
}
void CpuProfiler::DeleteProfile(CpuProfile* profile) {
ASSERT(Isolate::Current()->cpu_profiler() != NULL);
Isolate::Current()->cpu_profiler()->profiles_->RemoveProfile(profile);
delete profile;
}
bool CpuProfiler::HasDetachedProfiles() {
ASSERT(Isolate::Current()->cpu_profiler() != NULL);
return Isolate::Current()->cpu_profiler()->profiles_->HasDetachedProfiles();
}
void CpuProfiler::CallbackEvent(String* name, Address entry_point) { void CpuProfiler::CallbackEvent(String* name, Address entry_point) {
Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent( singleton_->processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point); Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
} }
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, const char* comment) { Code* code, const char* comment) {
Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent( singleton_->processor_->CodeCreateEvent(
tag, comment, code->address(), code->ExecutableSize()); tag, comment, code->address(), code->ExecutableSize());
} }
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, String* name) { Code* code, String* name) {
Isolate* isolate = Isolate::Current(); singleton_->processor_->CodeCreateEvent(
isolate->cpu_profiler()->processor_->CodeCreateEvent(
tag, tag,
name, name,
isolate->heap()->empty_string(), Heap::empty_string(),
v8::CpuProfileNode::kNoLineNumberInfo, v8::CpuProfileNode::kNoLineNumberInfo,
code->address(), code->address(),
code->ExecutableSize(), code->ExecutableSize(),
@ -384,11 +357,10 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, Code* code,
SharedFunctionInfo* shared, SharedFunctionInfo* shared,
String* name) { String* name) {
Isolate* isolate = Isolate::Current(); singleton_->processor_->CodeCreateEvent(
isolate->cpu_profiler()->processor_->CodeCreateEvent(
tag, tag,
name, name,
isolate->heap()->empty_string(), Heap::empty_string(),
v8::CpuProfileNode::kNoLineNumberInfo, v8::CpuProfileNode::kNoLineNumberInfo,
code->address(), code->address(),
code->ExecutableSize(), code->ExecutableSize(),
@ -400,7 +372,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, Code* code,
SharedFunctionInfo* shared, SharedFunctionInfo* shared,
String* source, int line) { String* source, int line) {
Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent( singleton_->processor_->CodeCreateEvent(
tag, tag,
shared->DebugName(), shared->DebugName(),
source, source,
@ -413,7 +385,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag, void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count) { Code* code, int args_count) {
Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent( singleton_->processor_->CodeCreateEvent(
tag, tag,
args_count, args_count,
code->address(), code->address(),
@ -422,29 +394,28 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
void CpuProfiler::CodeMoveEvent(Address from, Address to) { void CpuProfiler::CodeMoveEvent(Address from, Address to) {
Isolate::Current()->cpu_profiler()->processor_->CodeMoveEvent(from, to); singleton_->processor_->CodeMoveEvent(from, to);
} }
void CpuProfiler::CodeDeleteEvent(Address from) { void CpuProfiler::CodeDeleteEvent(Address from) {
Isolate::Current()->cpu_profiler()->processor_->CodeDeleteEvent(from); singleton_->processor_->CodeDeleteEvent(from);
} }
void CpuProfiler::SharedFunctionInfoMoveEvent(Address from, Address to) { void CpuProfiler::SFIMoveEvent(Address from, Address to) {
CpuProfiler* profiler = Isolate::Current()->cpu_profiler(); singleton_->processor_->SFIMoveEvent(from, to);
profiler->processor_->SharedFunctionInfoMoveEvent(from, to);
} }
void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) { void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) {
Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent( singleton_->processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, "get ", name, entry_point); Logger::CALLBACK_TAG, "get ", name, entry_point);
} }
void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) { void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
Isolate::Current()->cpu_profiler()->processor_->RegExpCodeCreateEvent( singleton_->processor_->RegExpCodeCreateEvent(
Logger::REG_EXP_TAG, Logger::REG_EXP_TAG,
"RegExp: ", "RegExp: ",
source, source,
@ -454,7 +425,7 @@ void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) { void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent( singleton_->processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, "set ", name, entry_point); Logger::CALLBACK_TAG, "set ", name, entry_point);
} }
@ -464,9 +435,7 @@ CpuProfiler::CpuProfiler()
next_profile_uid_(1), next_profile_uid_(1),
token_enumerator_(new TokenEnumerator()), token_enumerator_(new TokenEnumerator()),
generator_(NULL), generator_(NULL),
processor_(NULL), processor_(NULL) {
need_to_stop_sampler_(false),
is_profiling_(false) {
} }
@ -476,11 +445,6 @@ CpuProfiler::~CpuProfiler() {
} }
void CpuProfiler::ResetProfiles() {
delete profiles_;
profiles_ = new CpuProfilesCollection();
}
void CpuProfiler::StartCollectingProfile(const char* title) { void CpuProfiler::StartCollectingProfile(const char* title) {
if (profiles_->StartProfiling(title, next_profile_uid_++)) { if (profiles_->StartProfiling(title, next_profile_uid_++)) {
StartProcessorIfNotStarted(); StartProcessorIfNotStarted();
@ -496,32 +460,27 @@ void CpuProfiler::StartCollectingProfile(String* title) {
void CpuProfiler::StartProcessorIfNotStarted() { void CpuProfiler::StartProcessorIfNotStarted() {
if (processor_ == NULL) { if (processor_ == NULL) {
Isolate* isolate = Isolate::Current();
// Disable logging when using the new implementation. // Disable logging when using the new implementation.
saved_logging_nesting_ = isolate->logger()->logging_nesting_; saved_logging_nesting_ = Logger::logging_nesting_;
isolate->logger()->logging_nesting_ = 0; Logger::logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_); generator_ = new ProfileGenerator(profiles_);
processor_ = new ProfilerEventsProcessor(generator_); processor_ = new ProfilerEventsProcessor(generator_);
NoBarrier_Store(&is_profiling_, true); NoBarrier_Store(&is_profiling_, true);
processor_->Start(); processor_->Start();
// Enumerate stuff we already have in the heap. // Enumerate stuff we already have in the heap.
if (isolate->heap()->HasBeenSetup()) { if (Heap::HasBeenSetup()) {
if (!FLAG_prof_browser_mode) { if (!FLAG_prof_browser_mode) {
bool saved_log_code_flag = FLAG_log_code; bool saved_log_code_flag = FLAG_log_code;
FLAG_log_code = true; FLAG_log_code = true;
isolate->logger()->LogCodeObjects(); Logger::LogCodeObjects();
FLAG_log_code = saved_log_code_flag; FLAG_log_code = saved_log_code_flag;
} }
isolate->logger()->LogCompiledFunctions(); Logger::LogCompiledFunctions();
isolate->logger()->LogAccessorCallbacks(); Logger::LogAccessorCallbacks();
} }
// Enable stack sampling. // Enable stack sampling.
Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_); Sampler* sampler = reinterpret_cast<Sampler*>(Logger::ticker_);
if (!sampler->IsActive()) { if (!sampler->IsActive()) sampler->Start();
sampler->Start();
need_to_stop_sampler_ = true;
}
sampler->IncreaseProfilingDepth(); sampler->IncreaseProfilingDepth();
} }
} }
@ -552,18 +511,10 @@ CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
void CpuProfiler::StopProcessorIfLastProfile(const char* title) { void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
if (profiles_->IsLastProfile(title)) StopProcessor(); if (profiles_->IsLastProfile(title)) {
} Sampler* sampler = reinterpret_cast<Sampler*>(Logger::ticker_);
void CpuProfiler::StopProcessor() {
Logger* logger = Isolate::Current()->logger();
Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
sampler->DecreaseProfilingDepth(); sampler->DecreaseProfilingDepth();
if (need_to_stop_sampler_) {
sampler->Stop(); sampler->Stop();
need_to_stop_sampler_ = false;
}
processor_->Stop(); processor_->Stop();
processor_->Join(); processor_->Join();
delete processor_; delete processor_;
@ -571,7 +522,8 @@ void CpuProfiler::StopProcessor() {
processor_ = NULL; processor_ = NULL;
NoBarrier_Store(&is_profiling_, false); NoBarrier_Store(&is_profiling_, false);
generator_ = NULL; generator_ = NULL;
logger->logging_nesting_ = saved_logging_nesting_; Logger::logging_nesting_ = saved_logging_nesting_;
}
} }
} } // namespace v8::internal } } // namespace v8::internal
@ -583,9 +535,8 @@ namespace internal {
void CpuProfiler::Setup() { void CpuProfiler::Setup() {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
Isolate* isolate = Isolate::Current(); if (singleton_ == NULL) {
if (isolate->cpu_profiler() == NULL) { singleton_ = new CpuProfiler();
isolate->set_cpu_profiler(new CpuProfiler());
} }
#endif #endif
} }
@ -593,11 +544,10 @@ void CpuProfiler::Setup() {
void CpuProfiler::TearDown() { void CpuProfiler::TearDown() {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
Isolate* isolate = Isolate::Current(); if (singleton_ != NULL) {
if (isolate->cpu_profiler() != NULL) { delete singleton_;
delete isolate->cpu_profiler();
} }
isolate->set_cpu_profiler(NULL); singleton_ = NULL;
#endif #endif
} }

56
deps/v8/src/cpu-profiler.h

@ -30,7 +30,6 @@
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
#include "allocation.h"
#include "atomicops.h" #include "atomicops.h"
#include "circular-queue.h" #include "circular-queue.h"
#include "unbound-queue.h" #include "unbound-queue.h"
@ -51,7 +50,7 @@ class TokenEnumerator;
V(CODE_CREATION, CodeCreateEventRecord) \ V(CODE_CREATION, CodeCreateEventRecord) \
V(CODE_MOVE, CodeMoveEventRecord) \ V(CODE_MOVE, CodeMoveEventRecord) \
V(CODE_DELETE, CodeDeleteEventRecord) \ V(CODE_DELETE, CodeDeleteEventRecord) \
V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord) V(SFI_MOVE, SFIMoveEventRecord)
class CodeEventRecord { class CodeEventRecord {
@ -74,7 +73,7 @@ class CodeCreateEventRecord : public CodeEventRecord {
Address start; Address start;
CodeEntry* entry; CodeEntry* entry;
unsigned size; unsigned size;
Address shared; Address sfi_address;
INLINE(void UpdateCodeMap(CodeMap* code_map)); INLINE(void UpdateCodeMap(CodeMap* code_map));
}; };
@ -97,7 +96,7 @@ class CodeDeleteEventRecord : public CodeEventRecord {
}; };
class SharedFunctionInfoMoveEventRecord : public CodeEventRecord { class SFIMoveEventRecord : public CodeEventRecord {
public: public:
Address from; Address from;
Address to; Address to;
@ -106,14 +105,10 @@ class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
}; };
class TickSampleEventRecord { class TickSampleEventRecord BASE_EMBEDDED {
public: public:
// The parameterless constructor is used when we dequeue data from TickSampleEventRecord()
// the ticks buffer. : filler(1) {
TickSampleEventRecord() { }
explicit TickSampleEventRecord(unsigned order)
: filler(1),
order(order) {
ASSERT(filler != SamplingCircularQueue::kClear); ASSERT(filler != SamplingCircularQueue::kClear);
} }
@ -129,6 +124,8 @@ class TickSampleEventRecord {
static TickSampleEventRecord* cast(void* value) { static TickSampleEventRecord* cast(void* value) {
return reinterpret_cast<TickSampleEventRecord*>(value); return reinterpret_cast<TickSampleEventRecord*>(value);
} }
INLINE(static TickSampleEventRecord* init(void* value));
}; };
@ -152,7 +149,7 @@ class ProfilerEventsProcessor : public Thread {
String* name, String* name,
String* resource_name, int line_number, String* resource_name, int line_number,
Address start, unsigned size, Address start, unsigned size,
Address shared); Address sfi_address);
void CodeCreateEvent(Logger::LogEventsAndTags tag, void CodeCreateEvent(Logger::LogEventsAndTags tag,
const char* name, const char* name,
Address start, unsigned size); Address start, unsigned size);
@ -161,7 +158,7 @@ class ProfilerEventsProcessor : public Thread {
Address start, unsigned size); Address start, unsigned size);
void CodeMoveEvent(Address from, Address to); void CodeMoveEvent(Address from, Address to);
void CodeDeleteEvent(Address from); void CodeDeleteEvent(Address from);
void SharedFunctionInfoMoveEvent(Address from, Address to); void SFIMoveEvent(Address from, Address to);
void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag, void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
const char* prefix, String* name, const char* prefix, String* name,
Address start, unsigned size); Address start, unsigned size);
@ -199,23 +196,21 @@ class ProfilerEventsProcessor : public Thread {
} } // namespace v8::internal } } // namespace v8::internal
#define PROFILE(isolate, Call) \ #define PROFILE(Call) \
LOG(isolate, Call); \ LOG(Call); \
do { \ do { \
if (v8::internal::CpuProfiler::is_profiling(isolate)) { \ if (v8::internal::CpuProfiler::is_profiling()) { \
v8::internal::CpuProfiler::Call; \ v8::internal::CpuProfiler::Call; \
} \ } \
} while (false) } while (false)
#else #else
#define PROFILE(isolate, Call) LOG(isolate, Call) #define PROFILE(Call) LOG(Call)
#endif // ENABLE_LOGGING_AND_PROFILING #endif // ENABLE_LOGGING_AND_PROFILING
namespace v8 { namespace v8 {
namespace internal { namespace internal {
// TODO(isolates): isolatify this class.
class CpuProfiler { class CpuProfiler {
public: public:
static void Setup(); static void Setup();
@ -229,12 +224,9 @@ class CpuProfiler {
static int GetProfilesCount(); static int GetProfilesCount();
static CpuProfile* GetProfile(Object* security_token, int index); static CpuProfile* GetProfile(Object* security_token, int index);
static CpuProfile* FindProfile(Object* security_token, unsigned uid); static CpuProfile* FindProfile(Object* security_token, unsigned uid);
static void DeleteAllProfiles();
static void DeleteProfile(CpuProfile* profile);
static bool HasDetachedProfiles();
// Invoked from stack sampler (thread or signal handler.) // Invoked from stack sampler (thread or signal handler.)
static TickSample* TickSampleEvent(Isolate* isolate); static TickSample* TickSampleEvent();
// Must be called via PROFILE macro, otherwise will crash when // Must be called via PROFILE macro, otherwise will crash when
// profiling is not enabled. // profiling is not enabled.
@ -259,13 +251,10 @@ class CpuProfiler {
static void GetterCallbackEvent(String* name, Address entry_point); static void GetterCallbackEvent(String* name, Address entry_point);
static void RegExpCodeCreateEvent(Code* code, String* source); static void RegExpCodeCreateEvent(Code* code, String* source);
static void SetterCallbackEvent(String* name, Address entry_point); static void SetterCallbackEvent(String* name, Address entry_point);
static void SharedFunctionInfoMoveEvent(Address from, Address to); static void SFIMoveEvent(Address from, Address to);
// TODO(isolates): this doesn't have to use atomics anymore. static INLINE(bool is_profiling()) {
return NoBarrier_Load(&is_profiling_);
static INLINE(bool is_profiling(Isolate* isolate)) {
CpuProfiler* profiler = isolate->cpu_profiler();
return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
} }
private: private:
@ -277,8 +266,6 @@ class CpuProfiler {
CpuProfile* StopCollectingProfile(const char* title); CpuProfile* StopCollectingProfile(const char* title);
CpuProfile* StopCollectingProfile(Object* security_token, String* title); CpuProfile* StopCollectingProfile(Object* security_token, String* title);
void StopProcessorIfLastProfile(const char* title); void StopProcessorIfLastProfile(const char* title);
void StopProcessor();
void ResetProfiles();
CpuProfilesCollection* profiles_; CpuProfilesCollection* profiles_;
unsigned next_profile_uid_; unsigned next_profile_uid_;
@ -286,11 +273,12 @@ class CpuProfiler {
ProfileGenerator* generator_; ProfileGenerator* generator_;
ProfilerEventsProcessor* processor_; ProfilerEventsProcessor* processor_;
int saved_logging_nesting_; int saved_logging_nesting_;
bool need_to_stop_sampler_;
Atomic32 is_profiling_; static CpuProfiler* singleton_;
static Atomic32 is_profiling_;
#else #else
static INLINE(bool is_profiling(Isolate* isolate)) { return false; } static INLINE(bool is_profiling()) { return false; }
#endif // ENABLE_LOGGING_AND_PROFILING #endif // ENABLE_LOGGING_AND_PROFILING
private: private:

4
deps/v8/src/cpu.h

@ -36,8 +36,6 @@
#ifndef V8_CPU_H_ #ifndef V8_CPU_H_
#define V8_CPU_H_ #define V8_CPU_H_
#include "allocation.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -55,8 +53,6 @@ class CPU : public AllStatic {
// Initializes the cpu architecture support. Called once at VM startup. // Initializes the cpu architecture support. Called once at VM startup.
static void Setup(); static void Setup();
static bool SupportsCrankshaft();
// Flush instruction cache. // Flush instruction cache.
static void FlushICache(void* start, size_t size); static void FlushICache(void* start, size_t size);

2
deps/v8/src/d8-debug.cc

@ -272,7 +272,6 @@ RemoteDebuggerEvent* RemoteDebugger::GetEvent() {
void RemoteDebugger::HandleMessageReceived(char* message) { void RemoteDebugger::HandleMessageReceived(char* message) {
Locker lock;
HandleScope scope; HandleScope scope;
// Print the event details. // Print the event details.
@ -301,7 +300,6 @@ void RemoteDebugger::HandleMessageReceived(char* message) {
void RemoteDebugger::HandleKeyboardCommand(char* command) { void RemoteDebugger::HandleKeyboardCommand(char* command) {
Locker lock;
HandleScope scope; HandleScope scope;
// Convert the debugger command to a JSON debugger request. // Convert the debugger command to a JSON debugger request.

13
deps/v8/src/d8-posix.cc

@ -311,6 +311,10 @@ static Handle<Value> GetStdout(int child_fd,
int read_timeout, int read_timeout,
int total_timeout) { int total_timeout) {
Handle<String> accumulator = String::Empty(); Handle<String> accumulator = String::Empty();
const char* source = "(function(a, b) { return a + b; })";
Handle<Value> cons_as_obj(Script::Compile(String::New(source))->Run());
Handle<Function> cons_function(Function::Cast(*cons_as_obj));
Handle<Value> cons_args[2];
int fullness = 0; int fullness = 0;
static const int kStdoutReadBufferSize = 4096; static const int kStdoutReadBufferSize = 4096;
@ -346,7 +350,12 @@ static Handle<Value> GetStdout(int child_fd,
bytes_read + fullness : bytes_read + fullness :
LengthWithoutIncompleteUtf8(buffer, bytes_read + fullness); LengthWithoutIncompleteUtf8(buffer, bytes_read + fullness);
Handle<String> addition = String::New(buffer, length); Handle<String> addition = String::New(buffer, length);
accumulator = String::Concat(accumulator, addition); cons_args[0] = accumulator;
cons_args[1] = addition;
accumulator = Handle<String>::Cast(cons_function->Call(
Shell::utility_context()->Global(),
2,
cons_args));
fullness = bytes_read + fullness - length; fullness = bytes_read + fullness - length;
memcpy(buffer, buffer + length, fullness); memcpy(buffer, buffer + length, fullness);
} }
@ -366,10 +375,8 @@ static Handle<Value> GetStdout(int child_fd,
// a parent process hangs on waiting while a child process is already a zombie. // a parent process hangs on waiting while a child process is already a zombie.
// See http://code.google.com/p/v8/issues/detail?id=401. // See http://code.google.com/p/v8/issues/detail?id=401.
#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__) #if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__)
#if !defined(__FreeBSD__)
#define HAS_WAITID 1 #define HAS_WAITID 1
#endif #endif
#endif
// Get exit status of child. // Get exit status of child.

2
deps/v8/src/d8-readline.cc

@ -30,8 +30,6 @@
#include <readline/readline.h> // NOLINT #include <readline/readline.h> // NOLINT
#include <readline/history.h> // NOLINT #include <readline/history.h> // NOLINT
// The readline includes leaves RETURN defined which breaks V8 compilation.
#undef RETURN
#include "d8.h" #include "d8.h"

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save