Browse Source

Upgrade V8 to 3.4.8

v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
33af2720f2
  1. 4
      deps/v8/.gitignore
  2. 7
      deps/v8/AUTHORS
  3. 428
      deps/v8/ChangeLog
  4. 561
      deps/v8/SConstruct
  5. 45
      deps/v8/include/v8-debug.h
  6. 13
      deps/v8/include/v8-preparser.h
  7. 188
      deps/v8/include/v8-profiler.h
  8. 5
      deps/v8/include/v8-testing.h
  9. 636
      deps/v8/include/v8.h
  10. 38
      deps/v8/preparser/SConscript
  11. 407
      deps/v8/preparser/preparser-process.cc
  12. 6
      deps/v8/samples/process.cc
  13. 453
      deps/v8/samples/shell.cc
  14. 86
      deps/v8/src/SConscript
  15. 309
      deps/v8/src/accessors.cc
  16. 2
      deps/v8/src/accessors.h
  17. 19
      deps/v8/src/allocation-inl.h
  18. 82
      deps/v8/src/allocation.cc
  19. 51
      deps/v8/src/allocation.h
  20. 3485
      deps/v8/src/api.cc
  21. 111
      deps/v8/src/api.h
  22. 10
      deps/v8/src/apinatives.js
  23. 7
      deps/v8/src/apiutils.h
  24. 29
      deps/v8/src/arguments.h
  25. 19
      deps/v8/src/arm/assembler-arm-inl.h
  26. 692
      deps/v8/src/arm/assembler-arm.cc
  27. 253
      deps/v8/src/arm/assembler-arm.h
  28. 170
      deps/v8/src/arm/builtins-arm.cc
  29. 3702
      deps/v8/src/arm/code-stubs-arm.cc
  30. 615
      deps/v8/src/arm/code-stubs-arm.h
  31. 48
      deps/v8/src/arm/codegen-arm-inl.h
  32. 7360
      deps/v8/src/arm/codegen-arm.cc
  33. 512
      deps/v8/src/arm/codegen-arm.h
  34. 24
      deps/v8/src/arm/constants-arm.h
  35. 93
      deps/v8/src/arm/cpu-arm.cc
  36. 14
      deps/v8/src/arm/debug-arm.cc
  37. 80
      deps/v8/src/arm/deoptimizer-arm.cc
  38. 145
      deps/v8/src/arm/disasm-arm.cc
  39. 5
      deps/v8/src/arm/frames-arm.h
  40. 1069
      deps/v8/src/arm/full-codegen-arm.cc
  41. 685
      deps/v8/src/arm/ic-arm.cc
  42. 174
      deps/v8/src/arm/jump-target-arm.cc
  43. 648
      deps/v8/src/arm/lithium-arm.cc
  44. 570
      deps/v8/src/arm/lithium-arm.h
  45. 1832
      deps/v8/src/arm/lithium-codegen-arm.cc
  46. 52
      deps/v8/src/arm/lithium-codegen-arm.h
  47. 2
      deps/v8/src/arm/lithium-gap-resolver-arm.cc
  48. 844
      deps/v8/src/arm/macro-assembler-arm.cc
  49. 217
      deps/v8/src/arm/macro-assembler-arm.h
  50. 45
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  51. 3
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  52. 555
      deps/v8/src/arm/simulator-arm.cc
  53. 87
      deps/v8/src/arm/simulator-arm.h
  54. 1558
      deps/v8/src/arm/stub-cache-arm.cc
  55. 843
      deps/v8/src/arm/virtual-frame-arm.cc
  56. 520
      deps/v8/src/arm/virtual-frame-arm.h
  57. 189
      deps/v8/src/array.js
  58. 593
      deps/v8/src/assembler.cc
  59. 334
      deps/v8/src/assembler.h
  60. 11
      deps/v8/src/ast-inl.h
  61. 393
      deps/v8/src/ast.cc
  62. 459
      deps/v8/src/ast.h
  63. 2
      deps/v8/src/atomicops.h
  64. 169
      deps/v8/src/atomicops_internals_mips_gcc.h
  65. 13
      deps/v8/src/atomicops_internals_x86_gcc.cc
  66. 6
      deps/v8/src/atomicops_internals_x86_gcc.h
  67. 1210
      deps/v8/src/bootstrapper.cc
  68. 123
      deps/v8/src/bootstrapper.h
  69. 580
      deps/v8/src/builtins.cc
  70. 294
      deps/v8/src/builtins.h
  71. 4
      deps/v8/src/char-predicates.h
  72. 4
      deps/v8/src/checks.cc
  73. 4
      deps/v8/src/checks.h
  74. 73
      deps/v8/src/code-stubs.cc
  75. 226
      deps/v8/src/code-stubs.h
  76. 2
      deps/v8/src/code.h
  77. 315
      deps/v8/src/codegen.cc
  78. 163
      deps/v8/src/codegen.h
  79. 298
      deps/v8/src/compilation-cache.cc
  80. 235
      deps/v8/src/compilation-cache.h
  81. 340
      deps/v8/src/compiler.cc
  82. 64
      deps/v8/src/compiler.h
  83. 172
      deps/v8/src/contexts.cc
  84. 98
      deps/v8/src/contexts.h
  85. 4
      deps/v8/src/conversions-inl.h
  86. 132
      deps/v8/src/conversions.cc
  87. 23
      deps/v8/src/conversions.h
  88. 23
      deps/v8/src/counters.cc
  89. 44
      deps/v8/src/counters.h
  90. 22
      deps/v8/src/cpu-profiler-inl.h
  91. 202
      deps/v8/src/cpu-profiler.cc
  92. 70
      deps/v8/src/cpu-profiler.h
  93. 4
      deps/v8/src/cpu.h
  94. 2
      deps/v8/src/d8-debug.cc
  95. 13
      deps/v8/src/d8-posix.cc
  96. 2
      deps/v8/src/d8-readline.cc
  97. 443
      deps/v8/src/d8.cc
  98. 8
      deps/v8/src/d8.gyp
  99. 26
      deps/v8/src/d8.h
  100. 10
      deps/v8/src/d8.js

4
deps/v8/.gitignore

@ -19,13 +19,17 @@ d8
d8_g
shell
shell_g
/build/gyp
/obj/
/test/es5conform/data/
/test/mozilla/data/
/test/sputnik/sputniktests/
/test/test262/data/
/tools/oom_dump/oom_dump
/tools/oom_dump/oom_dump.o
/tools/visual_studio/Debug
/tools/visual_studio/Release
/xcodebuild/
TAGS
Makefile
*.Makefile

7
deps/v8/AUTHORS

@ -7,7 +7,9 @@ Google Inc.
Sigma Designs Inc.
ARM Ltd.
Hewlett-Packard Development Company, LP
Igalia, S.L.
Akinori MUSHA <knu@FreeBSD.org>
Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexander Karpinsky <homm86@gmail.com>
Alexandre Vassalotti <avassalotti@gmail.com>
@ -24,12 +26,14 @@ Jay Freeman <saurik@saurik.com>
Joel Stanley <joel.stan@gmail.com>
John Jozwiak <jjozwiak@codeaurora.org>
Kun Zhang <zhangk@codeaurora.org>
Matt Hanselman <mjhanselman@gmail.com>
Martyn Capewell <martyn.capewell@arm.com>
Matt Hanselman <mjhanselman@gmail.com>
Maxim Mossienko <maxim.mossienko@gmail.com>
Michael Smith <mike@w3.org>
Mike Gilbert <floppymaster@gmail.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Peter Varga <pvarga@inf.u-szeged.hu>
Rafal Krypa <rafal@krypa.net>
Rene Rebe <rene@exactcode.de>
Rodolph Perfetta <rodolph.perfetta@arm.com>
@ -37,4 +41,5 @@ Ryan Dahl <coldredlemur@gmail.com>
Sanjoy Das <sanjoy@playingwithpointers.com>
Subrato K De <subratokde@codeaurora.org>
Vlad Burlik <vladbph@gmail.com>
Yuqiang Xian <yuqiang.xian@intel.com>
Zaheer Ahmad <zahmad@codeaurora.org>

428
deps/v8/ChangeLog

@ -1,3 +1,428 @@
2011-06-29: Version 3.4.8
Ensure 16-byte stack alignment on Solaris (issue 1505).
Fix "illegal access" when calling parseInt with a radix
that is not a smi. (issue 1246).
2011-06-27: Version 3.4.7
Fixed 64-bit build on FreeBSD.
Added API to set the property attributes for the prototype
property on functions created from FunctionTemplates.
Bugfixes and performance work.
2011-06-22: Version 3.4.6
Lowered limit on code space for systems with low memory supply.
Allowed compiling v8_shell with the 'host' toolset (issue 82437).
Extended setBreakpoint API to accept partial script name (issue 1418).
Made multi-line comments not count when deciding whether the '-->'
comment starter is first on a line. This matches Safari.
Made handling of non-array recievers in Array length setter correct
(issue 1491).
Added ability to heap profiler to iterate over snapshot's node
(issue 1481).
2011-06-20: Version 3.4.5
Fixed issues 794, 1097, 1215(partial), 1417, 1435, 1472, 1473,
1476, and 1477.
Improved code generation for !0 and !1.
Reduced memory usage for regular expressions with nested qualifiers.
(issue 1472)
Fixed V8 to count line terminators in multi-line comments.
(Chromium issue 86431)
Fixed disassembler=on option for release-mode builds. (issue 1473)
Performance improvements on all platforms.
2011-06-15: Version 3.4.4
Added snapshot compression support and --stress-opt flag to d8.
Improved performance of try/catch.
Several GYP-related changes: Added support for building Xcode project
files. Make the ARM simulator build with GYP again. Generate Makefiles
for all architectures on Linux.
Fixed Array.prototype.{reduce,reduceRight} to pass undefined as the
receiver for strict mode callbacks. (issue 1436)
Fixed a bug where an array load was incorrectly hoisted by GVN.
Handle 'undefined' correctly when === has been specialized for doubles.
(issue 1434)
Corrected the limit of local variables in an optimized function from 64
to 63.
Correctly set ReadOnly flag on indexed properties when using the API Set
method. (issue 1470)
Give the correct error message when Object.isExtensible is called on a
non-object. (issue 1452)
Added GetOwnPropertyNames method for Object in the API. Patch by Peter
Varga.
Do not redefine properties unneccesarily in seal and freeze. (issue
1447)
IsExecutionTerminating has an Isolate parameter now.
Distinguish keyed loads with a symbol key from fast elements loads,
avoiding some useless deoptimizations. (issue 1471)
2011-06-08: Version 3.4.3
Clear the global thread table when an isolate is disposed
(issue 1433).
Converted time zone name to UTF8 on Windows (issue 1290).
Limited the number of arguments in a function call to 32766
(issue 1413).
Compress sources of JS libraries in addition to the snapshot.
Fixed a bug in Lithium environment iteration.
Performance improvements on all platforms.
2011-06-06: Version 3.4.2
More work on ES-Harmony proxies. Still hidden behind a flag.
Fixed some crash bugs and improved performance.
Fixed building with gdb debugging support.
Do not install SIGPROF handler until it is needed.
Added DateTimeFormat to i18n API.
Fixed compilation on OpenBSD.
Take the ulimit into account when sizing the heap. OpenBSD users
may still have to increase the default ulimit to run heavy pages in
the browser.
2011-06-01: Version 3.4.1
Fixed JSON stringify issue with arrays.
Changed calls to JS builtins to be passed undefined when called with
implicit receiver.
Implemented the set trap for Harmony proxies. Proxies still need to
be enabled with the --harmony-proxies flag.
2011-05-30: Version 3.4.0
Changed calls to undefined property setters to not throw (issue 1355).
Made RegExp objects not callable.
Fixed issues on special case large JSON strings in new json parser
(issues http://crbug.com/83877 and http://crbug.com/84186).
Performance improvements on all platforms.
2011-05-25: Version 3.3.10
Fixed calls of strict mode function with an implicit receiver.
Fixed fast handling of arrays to properly deal with changes to the
Object prototype (issue 1403).
Changed strict mode poison pill to be the same type error function
(issue 1387).
Fixed a debug crash in arguments object handling (issue 1227).
Fixed a bug in deoptimization on x64 (issue 1404).
Performance improvements and bug fixes on all platforms.
2011-05-23: Version 3.3.9
Added DateTimeFormat class to experimental i18n API.
Extended preparser to give early errors for some strict mode
restrictions.
Removed legacy execScript function from V8.
Extended isolate API with the ability to add embedder-specific
data to an isolate.
Added basic support for polymorphic loads from JS and external
arrays.
Fixed bug in handling of switch statements in the optimizing
compiler.
2011-05-18: Version 3.3.8
Added MarkIndependent to the persistent handle API. Independent
handles are independent of all other persistent handles and can be
garbage collected more frequently.
Implemented the get trap for Harmony proxies. Proxies are enabled
with the --harmony-proxies flag.
Performance improvements and bug fixes on all platforms.
2011-05-16: Version 3.3.7
Updated MIPS infrastructure files.
Performance improvements and bug fixes on all platforms.
2011-05-11: Version 3.3.6
Updated MIPS infrastructure files.
Added method IsCallable for Object to the API.
Patch by Peter Varga.
2011-05-09: Version 3.3.5
Fixed build on FreeBSD. Patch by Akinori MUSHA.
Added check that receiver is JSObject on API calls.
Implemented CallAsConstructor method for Object in the API (Issue 1348).
Patch by Peter Varga.
Added CallAsFunction method to the Object class in the API (Issue 1336).
Patch by Peter Varga.
Added per-isolate locking and unlocking.
Fixed bug in x64 >>> operator (Issue 1359).
2011-05-04: Version 3.3.4
Implemented API to disallow code generation from strings for a context
(issue 1258).
Fixed bug with whitespaces in parseInt (issue 955).
Fixed bug with == comparison of Date objects (issue 1356).
Added GYP variables for ARM code generation:
v8_can_use_vfp_instructions, v8_can_use_unaligned_accesses
and v8_use_arm_eabi_hardfloat.
2011-05-02: Version 3.3.3
Added support for generating Visual Studio solution and project files
using GYP.
Implemented support for ARM EABI calling convention variation where
floating-point arguments are passed in registers (hardfloat).
Added Object::HasOwnProperty() to the API.
Added support for compressing startup data to reduce binary size. This
includes build time support and an API for the embedder to decompress
the startup data before initializing V8.
Reduced the profiling hooks overhead from >400% to 25% when using
ll_prof.
Performance improvements and bug fixes on all platforms.
2011-04-27: Version 3.3.2
Fixed crash bug on ARM with no VFP3 hardware.
Fixed compilation of V8 without debugger support.
Improved performance on JSLint.
Added support Float64 WebGL arrays.
Fixed crash bug in regexp replace.
2011-04-20: Version 3.3.1
Reduced V8 binary size by removing virtual functions from hydrogen.
Fixed crash bug on x64.
Performance improvements on ARM and IA32.
2011-04-18: Version 3.3.0
Fixed bug in floating point rounding in Crankshaft on ARM
(issue 958)
Fixed a number of issues with running without VFPv3 support on ARM
(issue 1315)
Introduced v8Locale.Collator, a partial implementation of Collator
per last ECMAScript meeting + mailing list.
Minor performance improvements and bug fixes.
2011-04-13: Version 3.2.10
Fixed bug in external float arrays on ARM (issue 1323).
Minor performance improvements and bug fixes.
2011-04-11: Version 3.2.9
Removed support for ABI prior to EABI on ARM.
Fixed multiple crash bugs.
Added GCMole to the repository, a simple static analysis tool that
searches for GC-unsafe evaluation order dependent callsites.
Made preparser API be exported in shared libraries.
Fixed multiple issues in EcmaScript 5 strict mode implementation.
Fixed mutable __proto__ property if object is not extensible
(Issue 1309).
Fixed auto suspension of the sampler thread.
2011-04-06: Version 3.2.8
Exposed WebGL typed array constructors in the shell sample.
Performance improvements on all platforms.
2011-04-04: Version 3.2.7
Disabled the original 'classic' V8 code generator. Crankshaft is
now the default on all platforms.
Changed the heap profiler to use more descriptive names.
Performance and stability improvements to isolates on all platforms.
2011-03-30: Version 3.2.6
Fixed xcode build warning in shell.cc (out of order initialization).
Fixed null-pointer dereference in the compiler when running without
SSE3 support (Chromium issue 77654).
Fixed x64 compilation error due to some dead code. (Issue 1286)
Introduced scons target to build the preparser stand-alone example.
Made FreeBSD build and pass all tests.
2011-03-28: Version 3.2.5
Fixed build with Irregexp interpreter (issue 1266).
Added Crankshaft support for external arrays.
Fixed two potential crash bugs.
2011-03-23: Version 3.2.4
Added isolates which allows several V8 instances in the same process.
This is controlled through the new Isolate class in the API.
Implemented more of EcmaScript 5 strict mode.
Reduced the time it takes to make detailed heap snapshot.
Added a number of commands to the ARM simulator and enhanced the ARM
disassembler.
2011-03-17: Version 3.2.3
Fixed a number of crash bugs.
Fixed Array::New(length) to return an array with a length (issue 1256).
Fixed FreeBSD build.
Changed __defineGetter__ to not throw (matching the behavior of Safari).
Implemented more of EcmaScript 5 strict mode.
Improved Crankshaft performance on all platforms.
2011-03-14: Version 3.2.2
Fixed a number of crash and correctness bugs.
Improved Crankshaft performance on all platforms.
Fixed Crankshaft on Solaris/Illumos.
2011-03-10: Version 3.2.1
Fixed a number of crash bugs.
Improved Crankshaft for x64 and ARM.
Implemented more of EcmaScript 5 strict mode.
2011-03-07: Version 3.2.0
Fixed a number of crash bugs.
Turned on Crankshaft by default on x64 and ARM.
Improved Crankshaft for x64 and ARM.
Implemented more of EcmaScript 5 strict mode.
2011-03-02: Version 3.1.8
Fixed a number of crash bugs.
@ -2564,3 +2989,6 @@
Initial export.
# Local Variables:
# mode:text
# End:

561
deps/v8/SConstruct

@ -1,4 +1,4 @@
# Copyright 2010 the V8 project authors. All rights reserved.
# Copyright 2011 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
@ -36,13 +36,6 @@ root_dir = dirname(File('SConstruct').rfile().abspath)
sys.path.insert(0, join(root_dir, 'tools'))
import js2c, utils
# ANDROID_TOP is the top of the Android checkout, fetched from the environment
# variable 'TOP'. You will also need to set the CXX, CC, AR and RANLIB
# environment variables to the cross-compiling tools.
ANDROID_TOP = os.environ.get('TOP')
if ANDROID_TOP is None:
ANDROID_TOP=""
# ARM_TARGET_LIB is the path to the dynamic library to use on the target
# machine if cross-compiling to an arm machine. You will also need to set
# the additional cross-compiling environment variables to the cross compiler.
@ -58,50 +51,6 @@ else:
GCC_EXTRA_CCFLAGS = []
GCC_DTOA_EXTRA_CCFLAGS = []
ANDROID_FLAGS = ['-march=armv7-a',
'-mtune=cortex-a8',
'-mfloat-abi=softfp',
'-mfpu=vfp',
'-fpic',
'-mthumb-interwork',
'-funwind-tables',
'-fstack-protector',
'-fno-short-enums',
'-fmessage-length=0',
'-finline-functions',
'-fno-inline-functions-called-once',
'-fgcse-after-reload',
'-frerun-cse-after-loop',
'-frename-registers',
'-fomit-frame-pointer',
'-finline-limit=64',
'-DCAN_USE_VFP_INSTRUCTIONS=1',
'-DCAN_USE_ARMV7_INSTRUCTIONS=1',
'-DCAN_USE_UNALIGNED_ACCESSES=1',
'-MD']
ANDROID_INCLUDES = [ANDROID_TOP + '/bionic/libc/arch-arm/include',
ANDROID_TOP + '/bionic/libc/include',
ANDROID_TOP + '/bionic/libstdc++/include',
ANDROID_TOP + '/bionic/libc/kernel/common',
ANDROID_TOP + '/bionic/libc/kernel/arch-arm',
ANDROID_TOP + '/bionic/libm/include',
ANDROID_TOP + '/bionic/libm/include/arch/arm',
ANDROID_TOP + '/bionic/libthread_db/include',
ANDROID_TOP + '/frameworks/base/include',
ANDROID_TOP + '/system/core/include']
ANDROID_LINKFLAGS = ['-nostdlib',
'-Bdynamic',
'-Wl,-T,' + ANDROID_TOP + '/build/core/armelf.x',
'-Wl,-dynamic-linker,/system/bin/linker',
'-Wl,--gc-sections',
'-Wl,-z,nocopyreloc',
'-Wl,-rpath-link=' + ANDROID_TOP + '/out/target/product/generic/obj/lib',
ANDROID_TOP + '/out/target/product/generic/obj/lib/crtbegin_dynamic.o',
ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork/libgcc.a',
ANDROID_TOP + '/out/target/product/generic/obj/lib/crtend_android.o'];
LIBRARY_FLAGS = {
'all': {
'CPPPATH': [join(root_dir, 'src')],
@ -129,6 +78,9 @@ LIBRARY_FLAGS = {
'inspector:on': {
'CPPDEFINES': ['INSPECTOR'],
},
'fasttls:on': {
'CPPDEFINES': ['V8_FAST_TLS'],
},
'liveobjectlist:on': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT', 'INSPECTOR',
'LIVE_OBJECT_LIST', 'OBJECT_PRINT'],
@ -149,17 +101,10 @@ LIBRARY_FLAGS = {
'mode:debug': {
'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG'],
'os:android': {
'CCFLAGS': ['-mthumb']
}
},
'mode:release': {
'CCFLAGS': ['-O3', '-fomit-frame-pointer', '-fdata-sections',
'-ffunction-sections'],
'os:android': {
'CCFLAGS': ['-mthumb', '-Os'],
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
},
'os:linux': {
'CCFLAGS': ['-ansi'] + GCC_EXTRA_CCFLAGS,
@ -178,6 +123,7 @@ LIBRARY_FLAGS = {
'CPPPATH' : ['/usr/local/include'],
'LIBPATH' : ['/usr/local/lib'],
'CCFLAGS': ['-ansi'],
'LIBS': ['execinfo']
},
'os:openbsd': {
'CPPPATH' : ['/usr/local/include'],
@ -196,14 +142,6 @@ LIBRARY_FLAGS = {
'CCFLAGS': ['-DWIN32'],
'CXXFLAGS': ['-DWIN32'],
},
'os:android': {
'CPPDEFINES': ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
'__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
'CCFLAGS': ANDROID_FLAGS,
'WARNINGFLAGS': ['-Wall', '-Wno-unused', '-Werror=return-type',
'-Wstrict-aliasing=2'],
'CPPPATH': ANDROID_INCLUDES,
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32'],
'CCFLAGS': ['-m32'],
@ -216,6 +154,24 @@ LIBRARY_FLAGS = {
},
'unalignedaccesses:off' : {
'CPPDEFINES' : ['CAN_USE_UNALIGNED_ACCESSES=0']
},
'armeabi:soft' : {
'CPPDEFINES' : ['USE_EABI_HARDFLOAT=0'],
'simulator:none': {
'CCFLAGS': ['-mfloat-abi=soft'],
}
},
'armeabi:softfp' : {
'CPPDEFINES' : ['USE_EABI_HARDFLOAT=0', 'CAN_USE_VFP_INSTRUCTIONS'],
'simulator:none': {
'CCFLAGS': ['-mfloat-abi=softfp'],
}
},
'armeabi:hard' : {
'CPPDEFINES' : ['USE_EABI_HARDFLOAT=1', 'CAN_USE_VFP_INSTRUCTIONS'],
'simulator:none': {
'CCFLAGS': ['-mfloat-abi=hard'],
}
}
},
'simulator:arm': {
@ -224,14 +180,40 @@ LIBRARY_FLAGS = {
},
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
'simulator:none': {
'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
'LDFLAGS': ['-EL']
'CCFLAGS': ['-EL'],
'LINKFLAGS': ['-EL'],
'mips_arch_variant:mips32r2': {
'CCFLAGS': ['-mips32r2', '-Wa,-mips32r2']
},
'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32']
},
'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc']
},
'mipsabi:softfloat': {
'CCFLAGS': ['-msoft-float'],
'LINKFLAGS': ['-msoft-float']
},
'mipsabi:hardfloat': {
'CCFLAGS': ['-mhard-float'],
'LINKFLAGS': ['-mhard-float']
}
}
},
'simulator:mips': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32'],
'mipsabi:softfloat': {
'CPPDEFINES': ['__mips_soft_float=1'],
},
'mipsabi:hardfloat': {
'CPPDEFINES': ['__mips_hard_float=1'],
}
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
@ -240,6 +222,9 @@ LIBRARY_FLAGS = {
},
'gdbjit:on': {
'CPPDEFINES': ['ENABLE_GDB_JIT_INTERFACE']
},
'compress_startup_data:bz2': {
'CPPDEFINES': ['COMPRESS_STARTUP_DATA_BZ2']
}
},
'msvc': {
@ -306,6 +291,7 @@ V8_EXTRA_FLAGS = {
'gcc': {
'all': {
'WARNINGFLAGS': ['-Wall',
'-Werror',
'-W',
'-Wno-unused-parameter',
'-Wnon-virtual-dtor']
@ -324,6 +310,11 @@ V8_EXTRA_FLAGS = {
'os:macos': {
'WARNINGFLAGS': ['-pedantic']
},
'arch:arm': {
# This is to silence warnings about ABI changes that some versions of the
# CodeSourcery G++ tool chain produce for each occurrence of varargs.
'WARNINGFLAGS': ['-Wno-abi']
},
'disassembler:on': {
'CPPDEFINES': ['ENABLE_DISASSEMBLER']
}
@ -344,6 +335,9 @@ V8_EXTRA_FLAGS = {
},
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
},
'disassembler:on': {
'CPPDEFINES': ['ENABLE_DISASSEMBLER']
@ -373,6 +367,11 @@ MKSNAPSHOT_EXTRA_FLAGS = {
'os:win32': {
'LIBS': ['winmm', 'ws2_32'],
},
'compress_startup_data:bz2': {
'os:linux': {
'LIBS': ['bz2']
}
},
},
'msvc': {
'all': {
@ -404,7 +403,10 @@ CCTEST_EXTRA_FLAGS = {
},
'gcc': {
'all': {
'LIBPATH': [abspath('.')]
'LIBPATH': [abspath('.')],
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
'LINKFLAGS': ['$CCFLAGS'],
},
'os:linux': {
'LIBS': ['pthread'],
@ -425,19 +427,6 @@ CCTEST_EXTRA_FLAGS = {
'os:win32': {
'LIBS': ['winmm', 'ws2_32']
},
'os:android': {
'CPPDEFINES': ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
'__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
'CCFLAGS': ANDROID_FLAGS,
'CPPPATH': ANDROID_INCLUDES,
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib',
ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork'],
'LINKFLAGS': ANDROID_LINKFLAGS,
'LIBS': ['log', 'c', 'stdc++', 'm', 'gcc'],
'mode:release': {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
},
'arch:arm': {
'LINKFLAGS': ARM_LINK_FLAGS
},
@ -467,8 +456,10 @@ SAMPLE_FLAGS = {
},
'gcc': {
'all': {
'LIBPATH': ['.'],
'CCFLAGS': ['-fno-rtti', '-fno-exceptions']
'LIBPATH': ['.'],
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
'LINKFLAGS': ['$CCFLAGS'],
},
'os:linux': {
'LIBS': ['pthread'],
@ -477,36 +468,193 @@ SAMPLE_FLAGS = {
'LIBS': ['pthread'],
},
'os:freebsd': {
'LIBPATH' : ['/usr/local/lib'],
'LIBS': ['execinfo', 'pthread']
'LIBPATH' : ['/usr/local/lib'],
'LIBS': ['execinfo', 'pthread']
},
'os:solaris': {
'LIBPATH' : ['/usr/local/lib'],
'LIBS': ['m', 'pthread', 'socket', 'nsl', 'rt'],
'LINKFLAGS': ['-mt']
'LIBPATH' : ['/usr/local/lib'],
'LIBS': ['m', 'pthread', 'socket', 'nsl', 'rt'],
'LINKFLAGS': ['-mt']
},
'os:openbsd': {
'LIBPATH' : ['/usr/local/lib'],
'LIBS': ['execinfo', 'pthread']
'LIBPATH' : ['/usr/local/lib'],
'LIBS': ['execinfo', 'pthread']
},
'os:win32': {
'LIBS': ['winmm', 'ws2_32']
},
'os:android': {
'CPPDEFINES': ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
'__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
'CCFLAGS': ANDROID_FLAGS,
'CPPPATH': ANDROID_INCLUDES,
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib',
ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork'],
'LINKFLAGS': ANDROID_LINKFLAGS,
'LIBS': ['log', 'c', 'stdc++', 'm', 'gcc'],
'mode:release': {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
'arch:arm': {
'LINKFLAGS': ARM_LINK_FLAGS,
'armeabi:soft' : {
'CPPDEFINES' : ['USE_EABI_HARDFLOAT=0'],
'simulator:none': {
'CCFLAGS': ['-mfloat-abi=soft'],
}
},
'armeabi:softfp' : {
'CPPDEFINES' : ['USE_EABI_HARDFLOAT=0'],
'simulator:none': {
'CCFLAGS': ['-mfloat-abi=softfp'],
}
},
'armeabi:hard' : {
'CPPDEFINES' : ['USE_EABI_HARDFLOAT=1', 'CAN_USE_VFP_INSTRUCTIONS'],
'simulator:none': {
'CCFLAGS': ['-mfloat-abi=hard'],
}
}
},
'arch:ia32': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'arch:x64': {
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64']
},
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
'simulator:none': {
'CCFLAGS': ['-EL'],
'LINKFLAGS': ['-EL'],
'mips_arch_variant:mips32r2': {
'CCFLAGS': ['-mips32r2', '-Wa,-mips32r2']
},
'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32']
},
'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc']
},
'mipsabi:softfloat': {
'CCFLAGS': ['-msoft-float'],
'LINKFLAGS': ['-msoft-float']
},
'mipsabi:hardfloat': {
'CCFLAGS': ['-mhard-float'],
'LINKFLAGS': ['-mhard-float']
}
}
},
'simulator:arm': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'simulator:mips': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'mode:release': {
'CCFLAGS': ['-O2']
},
'mode:debug': {
'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['DEBUG']
},
'compress_startup_data:bz2': {
'CPPDEFINES': ['COMPRESS_STARTUP_DATA_BZ2'],
'os:linux': {
'LIBS': ['bz2']
}
},
},
'msvc': {
'all': {
'LIBS': ['winmm', 'ws2_32']
},
'verbose:off': {
'CCFLAGS': ['/nologo'],
'LINKFLAGS': ['/NOLOGO']
},
'verbose:on': {
'LINKFLAGS': ['/VERBOSE']
},
'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED']
},
'prof:on': {
'LINKFLAGS': ['/MAP']
},
'mode:release': {
'CCFLAGS': ['/O2'],
'LINKFLAGS': ['/OPT:REF', '/OPT:ICF'],
'msvcrt:static': {
'CCFLAGS': ['/MT']
},
'msvcrt:shared': {
'CCFLAGS': ['/MD']
},
'msvcltcg:on': {
'CCFLAGS': ['/GL'],
'pgo:off': {
'LINKFLAGS': ['/LTCG'],
},
},
'pgo:instrument': {
'LINKFLAGS': ['/LTCG:PGI']
},
'pgo:optimize': {
'LINKFLAGS': ['/LTCG:PGO']
}
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32', 'WIN32'],
'LINKFLAGS': ['/MACHINE:X86']
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64', 'WIN32'],
'LINKFLAGS': ['/MACHINE:X64', '/STACK:2091752']
},
'mode:debug': {
'CCFLAGS': ['/Od'],
'LINKFLAGS': ['/DEBUG'],
'CPPDEFINES': ['DEBUG'],
'msvcrt:static': {
'CCFLAGS': ['/MTd']
},
'msvcrt:shared': {
'CCFLAGS': ['/MDd']
}
}
}
}
PREPARSER_FLAGS = {
'all': {
'CPPPATH': [join(abspath('.'), 'include'), join(abspath('.'), 'src')]
},
'gcc': {
'all': {
'LIBPATH': ['.'],
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
'LINKFLAGS': ['$CCFLAGS'],
},
'os:win32': {
'LIBS': ['winmm', 'ws2_32']
},
'arch:arm': {
'LINKFLAGS': ARM_LINK_FLAGS
'LINKFLAGS': ARM_LINK_FLAGS,
'armeabi:soft' : {
'CPPDEFINES' : ['USE_EABI_HARDFLOAT=0'],
'simulator:none': {
'CCFLAGS': ['-mfloat-abi=soft'],
}
},
'armeabi:softfp' : {
'simulator:none': {
'CCFLAGS': ['-mfloat-abi=softfp'],
}
},
'armeabi:hard' : {
'simulator:none': {
'CCFLAGS': ['-mfloat-abi=hard'],
}
}
},
'arch:ia32': {
'CCFLAGS': ['-m32'],
@ -518,10 +666,29 @@ SAMPLE_FLAGS = {
},
'arch:mips': {
'CPPDEFINES': ['V8_TARGET_ARCH_MIPS'],
'mips_arch_variant:mips32r2': {
'CPPDEFINES': ['_MIPS_ARCH_MIPS32R2']
},
'simulator:none': {
'CCFLAGS': ['-EL', '-mips32r2', '-Wa,-mips32r2', '-fno-inline'],
'CCFLAGS': ['-EL'],
'LINKFLAGS': ['-EL'],
'LDFLAGS': ['-EL']
'mips_arch_variant:mips32r2': {
'CCFLAGS': ['-mips32r2', '-Wa,-mips32r2']
},
'mips_arch_variant:mips32r1': {
'CCFLAGS': ['-mips32', '-Wa,-mips32']
},
'library:static': {
'LINKFLAGS': ['-static', '-static-libgcc']
},
'mipsabi:softfloat': {
'CCFLAGS': ['-msoft-float'],
'LINKFLAGS': ['-msoft-float']
},
'mipsabi:hardfloat': {
'CCFLAGS': ['-mhard-float'],
'LINKFLAGS': ['-mhard-float']
}
}
},
'simulator:arm': {
@ -530,7 +697,13 @@ SAMPLE_FLAGS = {
},
'simulator:mips': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
'LINKFLAGS': ['-m32'],
'mipsabi:softfloat': {
'CPPDEFINES': ['__mips_soft_float=1'],
},
'mipsabi:hardfloat': {
'CPPDEFINES': ['__mips_hard_float=1'],
}
},
'mode:release': {
'CCFLAGS': ['-O2']
@ -539,6 +712,9 @@ SAMPLE_FLAGS = {
'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['DEBUG']
},
'os:freebsd': {
'LIBPATH' : ['/usr/local/lib'],
},
},
'msvc': {
'all': {
@ -580,11 +756,11 @@ SAMPLE_FLAGS = {
}
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32'],
'CPPDEFINES': ['V8_TARGET_ARCH_IA32', 'WIN32'],
'LINKFLAGS': ['/MACHINE:X86']
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64'],
'CPPDEFINES': ['V8_TARGET_ARCH_X64', 'WIN32'],
'LINKFLAGS': ['/MACHINE:X64', '/STACK:2091752']
},
'mode:debug': {
@ -604,6 +780,11 @@ SAMPLE_FLAGS = {
D8_FLAGS = {
'gcc': {
'all': {
'CCFLAGS': ['$DIALECTFLAGS', '$WARNINGFLAGS'],
'CXXFLAGS': ['$CCFLAGS', '-fno-rtti', '-fno-exceptions'],
'LINKFLAGS': ['$CCFLAGS'],
},
'console:readline': {
'LIBS': ['readline']
},
@ -623,18 +804,18 @@ D8_FLAGS = {
'os:openbsd': {
'LIBS': ['pthread'],
},
'os:android': {
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib',
ANDROID_TOP + '/prebuilt/linux-x86/toolchain/arm-eabi-4.4.0/lib/gcc/arm-eabi/4.4.0/interwork'],
'LINKFLAGS': ANDROID_LINKFLAGS,
'LIBS': ['log', 'c', 'stdc++', 'm', 'gcc'],
},
'os:win32': {
'LIBS': ['winmm', 'ws2_32'],
},
'arch:arm': {
'LINKFLAGS': ARM_LINK_FLAGS
},
'compress_startup_data:bz2': {
'CPPDEFINES': ['COMPRESS_STARTUP_DATA_BZ2'],
'os:linux': {
'LIBS': ['bz2']
}
}
},
'msvc': {
'all': {
@ -686,12 +867,14 @@ def GuessVisibility(env):
def GuessStrictAliasing(env):
# There seems to be a problem with gcc 4.5.x
# see http://code.google.com/p/v8/issues/detail?id=884
# it can be worked around by disabling strict aliasing
# There seems to be a problem with gcc 4.5.x.
# See http://code.google.com/p/v8/issues/detail?id=884
# It can be worked around by disabling strict aliasing.
toolchain = env['toolchain'];
if toolchain == 'gcc':
env = Environment(tools=['gcc'])
# The gcc version should be available in env['CCVERSION'],
# but when scons detects msvc this value is not set.
version = subprocess.Popen([env['CC'], '-dumpversion'],
stdout=subprocess.PIPE).communicate()[0]
if version.find('4.5') == 0:
@ -699,22 +882,25 @@ def GuessStrictAliasing(env):
return 'default'
SIMPLE_OPTIONS = {
'toolchain': {
'values': ['gcc', 'msvc'],
'guess': GuessToolchain,
'help': 'the toolchain to use'
PLATFORM_OPTIONS = {
'arch': {
'values': ['arm', 'ia32', 'x64', 'mips'],
'guess': GuessArch,
'help': 'the architecture to build for'
},
'os': {
'values': ['freebsd', 'linux', 'macos', 'win32', 'android', 'openbsd', 'solaris', 'cygwin'],
'values': ['freebsd', 'linux', 'macos', 'win32', 'openbsd', 'solaris', 'cygwin'],
'guess': GuessOS,
'help': 'the os to build for'
},
'arch': {
'values':['arm', 'ia32', 'x64', 'mips'],
'guess': GuessArch,
'help': 'the architecture to build for'
},
'toolchain': {
'values': ['gcc', 'msvc'],
'guess': GuessToolchain,
'help': 'the toolchain to use'
}
}
SIMPLE_OPTIONS = {
'regexp': {
'values': ['native', 'interpreted'],
'default': 'native',
@ -805,6 +991,12 @@ SIMPLE_OPTIONS = {
'default': 'off',
'help': 'enable the disassembler to inspect generated code'
},
'fasttls': {
'values': ['on', 'off'],
'default': 'on',
'help': 'enable fast thread local storage support '
'(if available on the current architecture/platform)'
},
'sourcesignatures': {
'values': ['MD5', 'timestamp'],
'default': 'MD5',
@ -823,41 +1015,55 @@ SIMPLE_OPTIONS = {
'visibility': {
'values': ['default', 'hidden'],
'guess': GuessVisibility,
'depends': ['os', 'toolchain'],
'help': 'shared library symbol visibility'
},
'strictaliasing': {
'values': ['default', 'off'],
'guess': GuessStrictAliasing,
'depends': ['toolchain'],
'help': 'assume strict aliasing while optimizing'
},
'pgo': {
'values': ['off', 'instrument', 'optimize'],
'default': 'off',
'help': 'select profile guided optimization variant',
}
},
'armeabi': {
'values': ['hard', 'softfp', 'soft'],
'default': 'softfp',
'help': 'generate calling conventiont according to selected ARM EABI variant'
},
'mipsabi': {
'values': ['hardfloat', 'softfloat', 'none'],
'default': 'hardfloat',
'help': 'generate calling conventiont according to selected mips ABI'
},
'mips_arch_variant': {
'values': ['mips32r2', 'mips32r1'],
'default': 'mips32r2',
'help': 'mips variant'
},
'compress_startup_data': {
'values': ['off', 'bz2'],
'default': 'off',
'help': 'compress startup data (snapshot) [Linux only]'
},
}
ALL_OPTIONS = dict(PLATFORM_OPTIONS, **SIMPLE_OPTIONS)
def AddOption(result, name, option):
if 'guess' in option:
# Option has a guess function
guess = option.get('guess')
guess_env = Environment(options=result)
# Check if all options that the guess function depends on are set
if 'depends' in option:
for dependency in option.get('depends'):
if not dependency in guess_env:
return False
default = guess(guess_env)
else:
# Option has a fixed default
default = option.get('default')
help = '%s (%s)' % (option.get('help'), ", ".join(option['values']))
result.Add(name, help, default)
return True
def AddOptions(options, result):
guess_env = Environment(options=result)
for (name, option) in options.iteritems():
if 'guess' in option:
# Option has a guess function
guess = option.get('guess')
default = guess(guess_env)
else:
# Option has a fixed default
default = option.get('default')
help = '%s (%s)' % (option.get('help'), ", ".join(option['values']))
result.Add(name, help, default)
def GetOptions():
@ -867,13 +1073,8 @@ def GetOptions():
result.Add('cache', 'directory to use for scons build cache', '')
result.Add('env', 'override environment settings (NAME0:value0,NAME1:value1,...)', '')
result.Add('importenv', 'import environment settings (NAME0,NAME1,...)', '')
options = SIMPLE_OPTIONS
while len(options):
postpone = {}
for (name, option) in options.iteritems():
if not AddOption(result, name, option):
postpone[name] = option
options = postpone
AddOptions(PLATFORM_OPTIONS, result)
AddOptions(SIMPLE_OPTIONS, result)
return result
@ -968,7 +1169,9 @@ def VerifyOptions(env):
print env['arch']
print env['simulator']
Abort("Option unalignedaccesses only supported for the ARM architecture.")
for (name, option) in SIMPLE_OPTIONS.iteritems():
if env['os'] != 'linux' and env['compress_startup_data'] != 'off':
Abort("Startup data compression is only available on Linux")
for (name, option) in ALL_OPTIONS.iteritems():
if (not name in env):
message = ("A value for option %s must be specified (%s)." %
(name, ", ".join(option['values'])))
@ -990,6 +1193,7 @@ class BuildContext(object):
self.options = options
self.env_overrides = env_overrides
self.samples = samples
self.preparser_targets = []
self.use_snapshot = (options['snapshot'] != 'off')
self.build_snapshot = (options['snapshot'] == 'on')
self.flags = None
@ -1068,11 +1272,8 @@ def PostprocessOptions(options, os):
if 'msvcltcg' in ARGUMENTS:
print "Warning: forcing msvcltcg on as it is required for pgo (%s)" % options['pgo']
options['msvcltcg'] = 'on'
if options['arch'] == 'mips':
if ('regexp' in ARGUMENTS) and options['regexp'] == 'native':
# Print a warning if native regexp is specified for mips
print "Warning: forcing regexp to interpreted for mips"
options['regexp'] = 'interpreted'
if (options['mipsabi'] != 'none') and (options['arch'] != 'mips') and (options['simulator'] != 'mips'):
options['mipsabi'] = 'none'
if options['liveobjectlist'] == 'on':
if (options['debuggersupport'] != 'on') or (options['mode'] == 'release'):
# Print a warning that liveobjectlist will implicitly enable the debugger
@ -1099,7 +1300,7 @@ def ParseEnvOverrides(arg, imports):
def BuildSpecific(env, mode, env_overrides, tools):
options = {'mode': mode}
for option in SIMPLE_OPTIONS:
for option in ALL_OPTIONS:
options[option] = env[option]
PostprocessOptions(options, env['os'])
@ -1119,6 +1320,7 @@ def BuildSpecific(env, mode, env_overrides, tools):
dtoa_flags = context.AddRelevantFlags(library_flags, DTOA_EXTRA_FLAGS)
cctest_flags = context.AddRelevantFlags(v8_flags, CCTEST_EXTRA_FLAGS)
sample_flags = context.AddRelevantFlags(user_environ, SAMPLE_FLAGS)
preparser_flags = context.AddRelevantFlags(user_environ, PREPARSER_FLAGS)
d8_flags = context.AddRelevantFlags(library_flags, D8_FLAGS)
context.flags = {
@ -1127,13 +1329,15 @@ def BuildSpecific(env, mode, env_overrides, tools):
'dtoa': dtoa_flags,
'cctest': cctest_flags,
'sample': sample_flags,
'd8': d8_flags
'd8': d8_flags,
'preparser': preparser_flags
}
# Generate library base name.
target_id = mode
suffix = SUFFIXES[target_id]
library_name = 'v8' + suffix
preparser_library_name = 'v8preparser' + suffix
version = GetVersion()
if context.options['soname'] == 'on':
# When building shared object with SONAME version the library name.
@ -1147,7 +1351,7 @@ def BuildSpecific(env, mode, env_overrides, tools):
env['SONAME'] = soname
# Build the object files by invoking SCons recursively.
(object_files, shell_files, mksnapshot) = env.SConscript(
(object_files, shell_files, mksnapshot, preparser_files) = env.SConscript(
join('src', 'SConscript'),
build_dir=join('obj', target_id),
exports='context tools',
@ -1162,13 +1366,22 @@ def BuildSpecific(env, mode, env_overrides, tools):
context.ApplyEnvOverrides(env)
if context.options['library'] == 'static':
library = env.StaticLibrary(library_name, object_files)
preparser_library = env.StaticLibrary(preparser_library_name,
preparser_files)
else:
# There seems to be a glitch in the way scons decides where to put
# PDB files when compiling using MSVC so we specify it manually.
# This should not affect any other platforms.
pdb_name = library_name + '.dll.pdb'
library = env.SharedLibrary(library_name, object_files, PDB=pdb_name)
preparser_pdb_name = preparser_library_name + '.dll.pdb';
preparser_soname = 'lib' + preparser_library_name + '.so';
preparser_library = env.SharedLibrary(preparser_library_name,
preparser_files,
PDB=preparser_pdb_name,
SONAME=preparser_soname)
context.library_targets.append(library)
context.library_targets.append(preparser_library)
d8_env = Environment(tools=tools)
d8_env.Replace(**context.flags['d8'])
@ -1202,6 +1415,21 @@ def BuildSpecific(env, mode, env_overrides, tools):
)
context.cctest_targets.append(cctest_program)
preparser_env = env.Copy()
preparser_env.Replace(**context.flags['preparser'])
preparser_env.Prepend(LIBS=[preparser_library_name])
context.ApplyEnvOverrides(preparser_env)
preparser_object = preparser_env.SConscript(
join('preparser', 'SConscript'),
build_dir=join('obj', 'preparser', target_id),
exports='context',
duplicate=False
)
preparser_name = join('obj', 'preparser', target_id, 'preparser')
preparser_program = preparser_env.Program(preparser_name, preparser_object);
preparser_env.Depends(preparser_program, preparser_library)
context.preparser_targets.append(preparser_program)
return context
@ -1220,6 +1448,7 @@ def Build():
mksnapshots = []
cctests = []
samples = []
preparsers = []
d8s = []
modes = SplitList(env['mode'])
for mode in modes:
@ -1228,6 +1457,7 @@ def Build():
mksnapshots += context.mksnapshot_targets
cctests += context.cctest_targets
samples += context.sample_targets
preparsers += context.preparser_targets
d8s += context.d8_targets
env.Alias('library', libraries)
@ -1235,6 +1465,7 @@ def Build():
env.Alias('cctests', cctests)
env.Alias('sample', samples)
env.Alias('d8', d8s)
env.Alias('preparser', preparsers)
if env['sample']:
env.Default('sample')

45
deps/v8/include/v8-debug.h

@ -127,7 +127,7 @@ class EXPORT Debug {
/**
* Get the context active when the debug event happened. Note this is not
* the current active context as the JavaScript part of the debugger is
* running in it's own context which is entered at this point.
* running in its own context which is entered at this point.
*/
virtual Handle<Context> GetEventContext() const = 0;
@ -164,12 +164,13 @@ class EXPORT Debug {
/**
* Get the context active when the debug event happened. Note this is not
* the current active context as the JavaScript part of the debugger is
* running in it's own context which is entered at this point.
* running in its own context which is entered at this point.
*/
virtual Handle<Context> GetEventContext() const = 0;
/**
* Client data passed with the corresponding callbak whet it was registered.
* Client data passed with the corresponding callback when it was
* registered.
*/
virtual Handle<Value> GetCallbackData() const = 0;
@ -227,7 +228,7 @@ class EXPORT Debug {
* Debug message callback function.
*
* \param message the debug message handler message object
*
* A MessageHandler does not take possession of the message data,
* and must not rely on the data persisting after the handler returns.
*/
@ -253,25 +254,35 @@ class EXPORT Debug {
static bool SetDebugEventListener(v8::Handle<v8::Object> that,
Handle<Value> data = Handle<Value>());
// Schedule a debugger break to happen when JavaScript code is run.
static void DebugBreak();
// Schedule a debugger break to happen when JavaScript code is run
// in the given isolate. If no isolate is provided the default
// isolate is used.
static void DebugBreak(Isolate* isolate = NULL);
// Remove scheduled debugger break if it has not happened yet.
static void CancelDebugBreak();
// Remove scheduled debugger break in given isolate if it has not
// happened yet. If no isolate is provided the default isolate is
// used.
static void CancelDebugBreak(Isolate* isolate = NULL);
// Break execution of JavaScript (this method can be invoked from a
// non-VM thread) for further client command execution on a VM
// thread. Client data is then passed in EventDetails to
// EventCallback at the moment when the VM actually stops.
static void DebugBreakForCommand(ClientData* data = NULL);
// Break execution of JavaScript in the given isolate (this method
// can be invoked from a non-VM thread) for further client command
// execution on a VM thread. Client data is then passed in
// EventDetails to EventCallback at the moment when the VM actually
// stops. If no isolate is provided the default isolate is used.
static void DebugBreakForCommand(ClientData* data = NULL,
Isolate* isolate = NULL);
// Message based interface. The message protocol is JSON. NOTE the message
// handler thread is not supported any more parameter must be false.
static void SetMessageHandler(MessageHandler handler,
bool message_handler_thread = false);
static void SetMessageHandler2(MessageHandler2 handler);
// If no isolate is provided the default isolate is
// used.
static void SendCommand(const uint16_t* command, int length,
ClientData* client_data = NULL);
ClientData* client_data = NULL,
Isolate* isolate = NULL);
// Dispatch interface.
static void SetHostDispatchHandler(HostDispatchHandler handler,
@ -300,7 +311,7 @@ class EXPORT Debug {
* get access to information otherwise not available during normal JavaScript
* execution e.g. details on stack frames. Receiver of the function call will
* be the debugger context global object, however this is a subject to change.
* The following example show a JavaScript function which when passed to
* The following example shows a JavaScript function which when passed to
* v8::Debug::Call will return the current line of JavaScript execution.
*
* \code
@ -342,7 +353,7 @@ class EXPORT Debug {
* 2. V8 is suspended on debug breakpoint; in this state V8 is dedicated
* to reading and processing debug messages;
* 3. V8 is not running at all or has called some long-working C++ function;
* by default it means that processing of all debug message will be deferred
* by default it means that processing of all debug messages will be deferred
* until V8 gets control again; however, embedding application may improve
* this by manually calling this method.
*
@ -366,7 +377,7 @@ class EXPORT Debug {
static void ProcessDebugMessages();
/**
* Debugger is running in it's own context which is entered while debugger
* Debugger is running in its own context which is entered while debugger
* messages are being dispatched. This is an explicit getter for this
* debugger context. Note that the content of the debugger context is subject
* to change.

13
deps/v8/include/v8-preparser.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -66,17 +66,18 @@
namespace v8 {
class PreParserData {
// The result of preparsing is either a stack overflow error, or an opaque
// blob of data that can be passed back into the parser.
class V8EXPORT PreParserData {
public:
PreParserData(size_t size, const uint8_t* data)
: data_(data), size_(size) { }
// Create a PreParserData value where stack_overflow reports true.
static PreParserData StackOverflow() { return PreParserData(NULL, 0); }
static PreParserData StackOverflow() { return PreParserData(0, NULL); }
// Whether the pre-parser stopped due to a stack overflow.
// If this is the case, size() and data() should not be used.
bool stack_overflow() { return size_ == 0u; }
// The size of the data in bytes.
@ -92,7 +93,7 @@ class PreParserData {
// Interface for a stream of Unicode characters.
class UnicodeInputStream {
class V8EXPORT UnicodeInputStream { // NOLINT - Thinks V8EXPORT is class name.
public:
virtual ~UnicodeInputStream();

188
deps/v8/include/v8-profiler.h

@ -131,6 +131,16 @@ class V8EXPORT CpuProfile {
/** Returns the root node of the top down call tree. */
const CpuProfileNode* GetTopDownRoot() const;
/**
* Deletes the profile and removes it from CpuProfiler's list.
* All pointers to nodes previously returned become invalid.
* Profiles with the same uid but obtained using different
* security token are not deleted, but become inaccessible
* using FindProfile method. It is embedder's responsibility
* to call Delete on these profiles.
*/
void Delete();
};
@ -181,6 +191,13 @@ class V8EXPORT CpuProfiler {
static const CpuProfile* StopProfiling(
Handle<String> title,
Handle<Value> security_token = Handle<Value>());
/**
* Deletes all existing profiles, also cancelling all profiling
* activity. All previously returned pointers to profiles and their
* contents become invalid after this call.
*/
static void DeleteAllProfiles();
};
@ -189,7 +206,7 @@ class HeapGraphNode;
/**
* HeapSnapshotEdge represents a directed connection between heap
* graph nodes: from retaners to retained nodes.
* graph nodes: from retainers to retained nodes.
*/
class V8EXPORT HeapGraphEdge {
public:
@ -223,36 +240,21 @@ class V8EXPORT HeapGraphEdge {
};
class V8EXPORT HeapGraphPath {
public:
/** Returns the number of edges in the path. */
int GetEdgesCount() const;
/** Returns an edge from the path. */
const HeapGraphEdge* GetEdge(int index) const;
/** Returns origin node. */
const HeapGraphNode* GetFromNode() const;
/** Returns destination node. */
const HeapGraphNode* GetToNode() const;
};
/**
* HeapGraphNode represents a node in a heap graph.
*/
class V8EXPORT HeapGraphNode {
public:
enum Type {
kHidden = 0, // Hidden node, may be filtered when shown to user.
kArray = 1, // An array of elements.
kString = 2, // A string.
kObject = 3, // A JS object (except for arrays and strings).
kCode = 4, // Compiled code.
kClosure = 5, // Function closure.
kRegExp = 6, // RegExp.
kHeapNumber = 7 // Number stored in the heap.
kHidden = 0, // Hidden node, may be filtered when shown to user.
kArray = 1, // An array of elements.
kString = 2, // A string.
kObject = 3, // A JS object (except for arrays and strings).
kCode = 4, // Compiled code.
kClosure = 5, // Function closure.
kRegExp = 6, // RegExp.
kHeapNumber = 7, // Number stored in the heap.
kNative = 8 // Native object (not from V8 heap).
};
/** Returns node type (see HeapGraphNode::Type). */
@ -267,17 +269,10 @@ class V8EXPORT HeapGraphNode {
/**
* Returns node id. For the same heap object, the id remains the same
* across all snapshots. Not applicable to aggregated heap snapshots
* as they only contain aggregated instances.
* across all snapshots.
*/
uint64_t GetId() const;
/**
* Returns the number of instances. Only applicable to aggregated
* heap snapshots.
*/
int GetInstancesCount() const;
/** Returns node's own size, in bytes. */
int GetSelfSize() const;
@ -307,12 +302,6 @@ class V8EXPORT HeapGraphNode {
/** Returns a retainer by index. */
const HeapGraphEdge* GetRetainer(int index) const;
/** Returns the number of simple retaining paths from the root to the node. */
int GetRetainingPathsCount() const;
/** Returns a retaining path by index. */
const HeapGraphPath* GetRetainingPath(int index) const;
/**
* Returns a dominator node. This is the node that participates in every
* path from the snapshot root to the current node.
@ -321,25 +310,13 @@ class V8EXPORT HeapGraphNode {
};
class V8EXPORT HeapSnapshotsDiff {
public:
/** Returns the root node for added nodes. */
const HeapGraphNode* GetAdditionsRoot() const;
/** Returns the root node for deleted nodes. */
const HeapGraphNode* GetDeletionsRoot() const;
};
/**
* HeapSnapshots record the state of the JS heap at some moment.
*/
class V8EXPORT HeapSnapshot {
public:
enum Type {
kFull = 0, // Heap snapshot with all instances and references.
kAggregated = 1 // Snapshot doesn't contain individual heap entries,
// instead they are grouped by constructor name.
kFull = 0 // Heap snapshot with all instances and references.
};
enum SerializationFormat {
kJSON = 0 // See format description near 'Serialize' method.
@ -360,17 +337,24 @@ class V8EXPORT HeapSnapshot {
/** Returns a node by its id. */
const HeapGraphNode* GetNodeById(uint64_t id) const;
/** Returns total nodes count in the snapshot. */
int GetNodesCount() const;
/** Returns a node by index. */
const HeapGraphNode* GetNode(int index) const;
/**
* Returns a diff between this snapshot and another one. Only snapshots
* of the same type can be compared.
* Deletes the snapshot and removes it from HeapProfiler's list.
* All pointers to nodes, edges and paths previously returned become
* invalid.
*/
const HeapSnapshotsDiff* CompareWith(const HeapSnapshot* snapshot) const;
void Delete();
/**
* Prepare a serialized representation of the snapshot. The result
* is written into the stream provided in chunks of specified size.
* The total length of the serialized snapshot is unknown in
* advance, it is can be roughly equal to JS heap size (that means,
* advance, it can be roughly equal to JS heap size (that means,
* it can be really big - tens of megabytes).
*
* For the JSON format, heap contents are represented as an object
@ -392,11 +376,22 @@ class V8EXPORT HeapSnapshot {
};
class RetainedObjectInfo;
/**
* Interface for controlling heap profiling.
*/
class V8EXPORT HeapProfiler {
public:
/**
* Callback function invoked for obtaining RetainedObjectInfo for
* the given JavaScript wrapper object. It is prohibited to enter V8
* while the callback is running: only getters on the handle and
* GetPointerFromInternalField on the objects are allowed.
*/
typedef RetainedObjectInfo* (*WrapperInfoCallback)
(uint16_t class_id, Handle<Value> wrapper);
/** Returns the number of snapshots taken. */
static int GetSnapshotsCount();
@ -414,6 +409,87 @@ class V8EXPORT HeapProfiler {
Handle<String> title,
HeapSnapshot::Type type = HeapSnapshot::kFull,
ActivityControl* control = NULL);
/**
* Deletes all snapshots taken. All previously returned pointers to
* snapshots and their contents become invalid after this call.
*/
static void DeleteAllSnapshots();
/** Binds a callback to embedder's class ID. */
static void DefineWrapperClass(
uint16_t class_id,
WrapperInfoCallback callback);
/**
* Default value of persistent handle class ID. Must not be used to
* define a class. Can be used to reset a class of a persistent
* handle.
*/
static const uint16_t kPersistentHandleNoClassId = 0;
};
/**
* Interface for providing information about embedder's objects
* held by global handles. This information is reported in two ways:
*
* 1. When calling AddObjectGroup, an embedder may pass
* RetainedObjectInfo instance describing the group. To collect
* this information while taking a heap snapshot, V8 calls GC
* prologue and epilogue callbacks.
*
* 2. When a heap snapshot is collected, V8 additionally
* requests RetainedObjectInfos for persistent handles that
* were not previously reported via AddObjectGroup.
*
* Thus, if an embedder wants to provide information about native
* objects for heap snapshots, he can do it in a GC prologue
* handler, and / or by assigning wrapper class ids in the following way:
*
* 1. Bind a callback to class id by calling DefineWrapperClass.
* 2. Call SetWrapperClassId on certain persistent handles.
*
* V8 takes ownership of RetainedObjectInfo instances passed to it and
* keeps them alive only during snapshot collection. Afterwards, they
* are freed by calling the Dispose class function.
*/
class V8EXPORT RetainedObjectInfo { // NOLINT
public:
/** Called by V8 when it no longer needs an instance. */
virtual void Dispose() = 0;
/** Returns whether two instances are equivalent. */
virtual bool IsEquivalent(RetainedObjectInfo* other) = 0;
/**
* Returns hash value for the instance. Equivalent instances
* must have the same hash value.
*/
virtual intptr_t GetHash() = 0;
/**
* Returns human-readable label. It must be a NUL-terminated UTF-8
* encoded string. V8 copies its contents during a call to GetLabel.
*/
virtual const char* GetLabel() = 0;
/**
* Returns element count in case if a global handle retains
* a subgraph by holding one of its nodes.
*/
virtual intptr_t GetElementCount() { return -1; }
/** Returns embedder's object size in bytes. */
virtual intptr_t GetSizeInBytes() { return -1; }
protected:
RetainedObjectInfo() {}
virtual ~RetainedObjectInfo() {}
private:
RetainedObjectInfo(const RetainedObjectInfo&);
RetainedObjectInfo& operator=(const RetainedObjectInfo&);
};

5
deps/v8/include/v8-testing.h

@ -87,6 +87,11 @@ class V8EXPORT Testing {
* should be between 0 and one less than the result from GetStressRuns()
*/
static void PrepareStressRun(int run);
/**
* Force deoptimization of all functions.
*/
static void DeoptimizeAll();
};

636
deps/v8/include/v8.h

File diff suppressed because it is too large

38
deps/v8/preparser/SConscript

@ -0,0 +1,38 @@
# Copyright 2011 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from os.path import join
Import('context')
def ConfigureObjectFiles():
env = Environment()
env.Replace(**context.flags['preparser'])
context.ApplyEnvOverrides(env)
return env.Object('preparser-process.cc')
preparser_object = ConfigureObjectFiles()
Return('preparser_object')

407
deps/v8/preparser/preparser-process.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -27,105 +27,79 @@
#include <stdlib.h>
#include <stdarg.h>
#include <stdio.h>
#include <string.h>
#include "../include/v8stdint.h"
#include "../include/v8-preparser.h"
#include "unicode-inl.h"
enum ResultCode { kSuccess = 0, kErrorReading = 1, kErrorWriting = 2 };
#include "../src/preparse-data-format.h"
namespace v8 {
namespace internal {
namespace i = v8::internal;
// THIS FILE IS PROOF-OF-CONCEPT ONLY.
// The final goal is a stand-alone preparser library.
// This file is only used for testing the stand-alone preparser
// library.
// The first argument must be the path of a JavaScript source file, or
// the flags "-e" and the next argument is then the source of a JavaScript
// program.
// Optionally this can be followed by the word "throws" (case sensitive),
// which signals that the parsing is expected to throw - the default is
// to expect the parsing to not throw.
// The command line can further be followed by a message text (the
// *type* of the exception to throw), and even more optionally, the
// start and end position reported with the exception.
//
// This source file is preparsed and tested against the expectations, and if
// successful, the resulting preparser data is written to stdout.
// Diagnostic output is output on stderr.
// The source file must contain only ASCII characters (UTF-8 isn't supported).
// The file is read into memory, so it should have a reasonable size.
class UTF8InputStream : public v8::UnicodeInputStream {
// Adapts an ASCII string to the UnicodeInputStream interface.
class AsciiInputStream : public v8::UnicodeInputStream {
public:
UTF8InputStream(uint8_t* buffer, size_t length)
AsciiInputStream(const uint8_t* buffer, size_t length)
: buffer_(buffer),
offset_(0),
pos_(0),
end_offset_(static_cast<int>(length)) { }
end_offset_(static_cast<int>(length)),
offset_(0) { }
virtual ~UTF8InputStream() { }
virtual ~AsciiInputStream() { }
virtual void PushBack(int32_t ch) {
// Pushback assumes that the character pushed back is the
// one that was most recently read, and jumps back in the
// UTF-8 stream by the length of that character's encoding.
offset_ -= unibrow::Utf8::Length(ch);
pos_--;
offset_--;
#ifdef DEBUG
if (static_cast<unsigned>(ch) <= unibrow::Utf8::kMaxOneByteChar) {
if (ch != buffer_[offset_]) {
fprintf(stderr, "Invalid pushback: '%c'.", ch);
exit(1);
}
} else {
unsigned tmp = 0;
if (static_cast<unibrow::uchar>(ch) !=
unibrow::Utf8::CalculateValue(buffer_ + offset_,
end_offset_ - offset_,
&tmp)) {
fprintf(stderr, "Invalid pushback: 0x%x.", ch);
exit(1);
}
if (offset_ < 0 ||
(ch != ((offset_ >= end_offset_) ? -1 : buffer_[offset_]))) {
fprintf(stderr, "Invalid pushback: '%c' at offset %d.", ch, offset_);
exit(1);
}
#endif
}
virtual int32_t Next() {
if (offset_ == end_offset_) return -1;
uint8_t first_char = buffer_[offset_];
if (first_char <= unibrow::Utf8::kMaxOneByteChar) {
pos_++;
offset_++;
return static_cast<int32_t>(first_char);
if (offset_ >= end_offset_) {
offset_++; // Increment anyway to allow symmetric pushbacks.
return -1;
}
uint8_t next_char = buffer_[offset_];
#ifdef DEBUG
if (next_char > 0x7fu) {
fprintf(stderr, "Non-ASCII character in input: '%c'.", next_char);
exit(1);
}
unibrow::uchar codepoint =
unibrow::Utf8::CalculateValue(buffer_ + offset_,
end_offset_ - offset_,
&offset_);
pos_++;
return static_cast<int32_t>(codepoint);
#endif
offset_++;
return static_cast<int32_t>(next_char);
}
private:
const uint8_t* buffer_;
unsigned offset_;
unsigned pos_;
unsigned end_offset_;
const int end_offset_;
int offset_;
};
// Write a number to dest in network byte order.
void WriteUInt32(FILE* dest, uint32_t value, bool* ok) {
for (int i = 3; i >= 0; i--) {
uint8_t byte = static_cast<uint8_t>(value >> (i << 3));
int result = fputc(byte, dest);
if (result == EOF) {
*ok = false;
return;
}
}
}
// Read number from FILE* in network byte order.
uint32_t ReadUInt32(FILE* source, bool* ok) {
uint32_t n = 0;
for (int i = 0; i < 4; i++) {
int c = fgetc(source);
if (c == EOF) {
*ok = false;
return 0;
}
n = (n << 8) + static_cast<uint32_t>(c);
}
return n;
}
bool ReadBuffer(FILE* source, void* buffer, size_t length) {
size_t actually_read = fread(buffer, 1, length, source);
return (actually_read == length);
@ -138,69 +112,268 @@ bool WriteBuffer(FILE* dest, const void* buffer, size_t length) {
}
class PreparseDataInterpreter {
public:
PreparseDataInterpreter(const uint8_t* data, int length)
: data_(data), length_(length), message_(NULL) { }
~PreparseDataInterpreter() {
if (message_ != NULL) delete[] message_;
}
bool valid() {
int header_length =
i::PreparseDataConstants::kHeaderSize * sizeof(int); // NOLINT
return length_ >= header_length;
}
bool throws() {
return valid() &&
word(i::PreparseDataConstants::kHasErrorOffset) != 0;
}
const char* message() {
if (message_ != NULL) return message_;
if (!throws()) return NULL;
int text_pos = i::PreparseDataConstants::kHeaderSize +
i::PreparseDataConstants::kMessageTextPos;
int length = word(text_pos);
char* buffer = new char[length + 1];
for (int i = 1; i <= length; i++) {
int character = word(text_pos + i);
buffer[i - 1] = character;
}
buffer[length] = '\0';
message_ = buffer;
return buffer;
}
int beg_pos() {
if (!throws()) return -1;
return word(i::PreparseDataConstants::kHeaderSize +
i::PreparseDataConstants::kMessageStartPos);
}
int end_pos() {
if (!throws()) return -1;
return word(i::PreparseDataConstants::kHeaderSize +
i::PreparseDataConstants::kMessageEndPos);
}
private:
int word(int offset) {
const int* word_data = reinterpret_cast<const int*>(data_);
if (word_data + offset < reinterpret_cast<const int*>(data_ + length_)) {
return word_data[offset];
}
return -1;
}
const uint8_t* const data_;
const int length_;
const char* message_;
};
template <typename T>
class ScopedPointer {
public:
explicit ScopedPointer() : pointer_(NULL) {}
explicit ScopedPointer(T* pointer) : pointer_(pointer) {}
~ScopedPointer() { delete[] pointer_; }
~ScopedPointer() { if (pointer_ != NULL) delete[] pointer_; }
T& operator[](int index) { return pointer_[index]; }
T* operator*() { return pointer_ ;}
T* operator=(T* new_value) {
if (pointer_ != NULL) delete[] pointer_;
pointer_ = new_value;
return new_value;
}
private:
T* pointer_;
};
// Preparse input and output result on stdout.
int PreParseIO(FILE* input) {
fprintf(stderr, "LOG: Enter parsing loop\n");
bool ok = true;
uint32_t length = ReadUInt32(input, &ok);
fprintf(stderr, "LOG: Input length: %d\n", length);
if (!ok) return kErrorReading;
ScopedPointer<uint8_t> buffer(new uint8_t[length]);
if (!ReadBuffer(input, *buffer, length)) {
return kErrorReading;
void fail(v8::PreParserData* data, const char* message, ...) {
va_list args;
va_start(args, message);
vfprintf(stderr, message, args);
va_end(args);
fflush(stderr);
// Print preparser data to stdout.
uint32_t size = data->size();
fprintf(stderr, "LOG: data size: %u\n", size);
if (!WriteBuffer(stdout, data->data(), size)) {
perror("ERROR: Writing data");
fflush(stderr);
}
UTF8InputStream input_buffer(*buffer, static_cast<size_t>(length));
exit(EXIT_FAILURE);
};
v8::PreParserData data =
v8::Preparse(&input_buffer, 64 * 1024 * sizeof(void*)); // NOLINT
if (data.stack_overflow()) {
fprintf(stderr, "LOG: Stack overflow\n");
fflush(stderr);
// Report stack overflow error/no-preparser-data.
WriteUInt32(stdout, 0, &ok);
if (!ok) return kErrorWriting;
return 0;
bool IsFlag(const char* arg) {
// Anything starting with '-' is considered a flag.
// It's summarily ignored for now.
return arg[0] == '-';
}
struct ExceptionExpectation {
ExceptionExpectation()
: throws(false), type(NULL), beg_pos(-1), end_pos(-1) { }
bool throws;
const char* type;
int beg_pos;
int end_pos;
};
void CheckException(v8::PreParserData* data,
ExceptionExpectation* expects) {
PreparseDataInterpreter reader(data->data(), data->size());
if (expects->throws) {
if (!reader.throws()) {
if (expects->type == NULL) {
fail(data, "Didn't throw as expected\n");
} else {
fail(data, "Didn't throw \"%s\" as expected\n", expects->type);
}
}
if (expects->type != NULL) {
const char* actual_message = reader.message();
if (strcmp(expects->type, actual_message)) {
fail(data, "Wrong error message. Expected <%s>, found <%s> at %d..%d\n",
expects->type, actual_message, reader.beg_pos(), reader.end_pos());
}
}
if (expects->beg_pos >= 0) {
if (expects->beg_pos != reader.beg_pos()) {
fail(data, "Wrong error start position: Expected %i, found %i\n",
expects->beg_pos, reader.beg_pos());
}
}
if (expects->end_pos >= 0) {
if (expects->end_pos != reader.end_pos()) {
fail(data, "Wrong error end position: Expected %i, found %i\n",
expects->end_pos, reader.end_pos());
}
}
} else if (reader.throws()) {
const char* message = reader.message();
fail(data, "Throws unexpectedly with message: %s at location %d-%d\n",
message, reader.beg_pos(), reader.end_pos());
}
}
uint32_t size = data.size();
fprintf(stderr, "LOG: Success, data size: %u\n", size);
fflush(stderr);
WriteUInt32(stdout, size, &ok);
if (!ok) return kErrorWriting;
if (!WriteBuffer(stdout, data.data(), size)) {
return kErrorWriting;
ExceptionExpectation ParseExpectation(int argc, const char* argv[]) {
ExceptionExpectation expects;
// Parse exception expectations from (the remainder of) the command line.
int arg_index = 0;
// Skip any flags.
while (argc > arg_index && IsFlag(argv[arg_index])) arg_index++;
if (argc > arg_index) {
if (strncmp("throws", argv[arg_index], 7)) {
// First argument after filename, if present, must be the verbatim
// "throws", marking that the preparsing should fail with an exception.
fail(NULL, "ERROR: Extra arguments not prefixed by \"throws\".\n");
}
expects.throws = true;
do {
arg_index++;
} while (argc > arg_index && IsFlag(argv[arg_index]));
if (argc > arg_index) {
// Next argument is the exception type identifier.
expects.type = argv[arg_index];
do {
arg_index++;
} while (argc > arg_index && IsFlag(argv[arg_index]));
if (argc > arg_index) {
expects.beg_pos = atoi(argv[arg_index]); // NOLINT
do {
arg_index++;
} while (argc > arg_index && IsFlag(argv[arg_index]));
if (argc > arg_index) {
expects.end_pos = atoi(argv[arg_index]); // NOLINT
}
}
}
}
return 0;
return expects;
}
} } // namespace v8::internal
int main(int argc, const char* argv[]) {
// Parse command line.
// Format: preparser (<scriptfile> | -e "<source>")
// ["throws" [<exn-type> [<start> [<end>]]]]
// Any flags (except an initial -s) are ignored.
int main(int argc, char* argv[]) {
FILE* input = stdin;
if (argc > 1) {
char* arg = argv[1];
input = fopen(arg, "rb");
if (input == NULL) return EXIT_FAILURE;
// Check for mandatory filename argument.
int arg_index = 1;
if (argc <= arg_index) {
fail(NULL, "ERROR: No filename on command line.\n");
}
int status = 0;
do {
status = v8::internal::PreParseIO(input);
} while (status == 0);
fprintf(stderr, "EXIT: Failure %d\n", status);
fflush(stderr);
return EXIT_FAILURE;
const uint8_t* source = NULL;
const char* filename = argv[arg_index];
if (!strcmp(filename, "-e")) {
arg_index++;
if (argc <= arg_index) {
fail(NULL, "ERROR: No source after -e on command line.\n");
}
source = reinterpret_cast<const uint8_t*>(argv[arg_index]);
}
// Check remainder of command line for exception expectations.
arg_index++;
ExceptionExpectation expects =
ParseExpectation(argc - arg_index, argv + arg_index);
ScopedPointer<uint8_t> buffer;
size_t length;
if (source == NULL) {
// Open JS file.
FILE* input = fopen(filename, "rb");
if (input == NULL) {
perror("ERROR: Error opening file");
fflush(stderr);
return EXIT_FAILURE;
}
// Find length of JS file.
if (fseek(input, 0, SEEK_END) != 0) {
perror("ERROR: Error during seek");
fflush(stderr);
return EXIT_FAILURE;
}
length = static_cast<size_t>(ftell(input));
rewind(input);
// Read JS file into memory buffer.
buffer = new uint8_t[length];
if (!ReadBuffer(input, *buffer, length)) {
perror("ERROR: Reading file");
fflush(stderr);
return EXIT_FAILURE;
}
fclose(input);
source = *buffer;
} else {
length = strlen(reinterpret_cast<const char*>(source));
}
// Preparse input file.
AsciiInputStream input_buffer(source, length);
size_t kMaxStackSize = 64 * 1024 * sizeof(void*); // NOLINT
v8::PreParserData data = v8::Preparse(&input_buffer, kMaxStackSize);
// Fail if stack overflow.
if (data.stack_overflow()) {
fail(&data, "ERROR: Stack overflow\n");
}
// Check that the expected exception is thrown, if an exception is
// expected.
CheckException(&data, &expects);
return EXIT_SUCCESS;
}

6
deps/v8/samples/process.cc

@ -30,6 +30,10 @@
#include <string>
#include <map>
#ifdef COMPRESS_STARTUP_DATA_BZ2
#error Using compressed startup data is not supported for this sample
#endif
using namespace std;
using namespace v8;
@ -531,7 +535,7 @@ void ParseOptions(int argc,
string* file) {
for (int i = 1; i < argc; i++) {
string arg = argv[i];
int index = arg.find('=', 0);
size_t index = arg.find('=', 0);
if (index == string::npos) {
*file = arg;
} else {

453
deps/v8/samples/shell.cc

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -28,12 +28,36 @@
#include <v8.h>
#include <v8-testing.h>
#include <assert.h>
#ifdef COMPRESS_STARTUP_DATA_BZ2
#include <bzlib.h>
#endif
#include <fcntl.h>
#include <string.h>
#include <stdio.h>
#include <stdlib.h>
// When building with V8 in a shared library we cannot use functions which
// is not explicitly a part of the public V8 API. This extensive use of
// #ifndef USING_V8_SHARED/#endif is a hack until we can resolve whether to
// still use the shell sample for testing or change to use the developer
// shell d8 TODO(1272).
#if !(defined(USING_V8_SHARED) || defined(V8_SHARED))
#include "../src/v8.h"
#endif // USING_V8_SHARED
#if !defined(_WIN32) && !defined(_WIN64)
#include <unistd.h> // NOLINT
#endif
static void ExitShell(int exit_code) {
// Use _exit instead of exit to avoid races between isolate
// threads and static destructors.
fflush(stdout);
fflush(stderr);
_exit(exit_code);
}
v8::Persistent<v8::Context> CreateShellContext();
void RunShell(v8::Handle<v8::Context> context);
bool ExecuteString(v8::Handle<v8::String> source,
v8::Handle<v8::Value> name,
@ -44,62 +68,242 @@ v8::Handle<v8::Value> Read(const v8::Arguments& args);
v8::Handle<v8::Value> Load(const v8::Arguments& args);
v8::Handle<v8::Value> Quit(const v8::Arguments& args);
v8::Handle<v8::Value> Version(const v8::Arguments& args);
v8::Handle<v8::Value> Int8Array(const v8::Arguments& args);
v8::Handle<v8::Value> Uint8Array(const v8::Arguments& args);
v8::Handle<v8::Value> Int16Array(const v8::Arguments& args);
v8::Handle<v8::Value> Uint16Array(const v8::Arguments& args);
v8::Handle<v8::Value> Int32Array(const v8::Arguments& args);
v8::Handle<v8::Value> Uint32Array(const v8::Arguments& args);
v8::Handle<v8::Value> Float32Array(const v8::Arguments& args);
v8::Handle<v8::Value> Float64Array(const v8::Arguments& args);
v8::Handle<v8::Value> PixelArray(const v8::Arguments& args);
v8::Handle<v8::String> ReadFile(const char* name);
void ReportException(v8::TryCatch* handler);
static bool last_run = true;
class SourceGroup {
public:
SourceGroup() :
#if !(defined(USING_V8_SHARED) || defined(V8_SHARED))
next_semaphore_(v8::internal::OS::CreateSemaphore(0)),
done_semaphore_(v8::internal::OS::CreateSemaphore(0)),
thread_(NULL),
#endif // USING_V8_SHARED
argv_(NULL),
begin_offset_(0),
end_offset_(0) { }
void Begin(char** argv, int offset) {
argv_ = const_cast<const char**>(argv);
begin_offset_ = offset;
}
void End(int offset) { end_offset_ = offset; }
void Execute() {
for (int i = begin_offset_; i < end_offset_; ++i) {
const char* arg = argv_[i];
if (strcmp(arg, "-e") == 0 && i + 1 < end_offset_) {
// Execute argument given to -e option directly.
v8::HandleScope handle_scope;
v8::Handle<v8::String> file_name = v8::String::New("unnamed");
v8::Handle<v8::String> source = v8::String::New(argv_[i + 1]);
if (!ExecuteString(source, file_name, false, true)) {
ExitShell(1);
return;
}
++i;
} else if (arg[0] == '-') {
// Ignore other options. They have been parsed already.
} else {
// Use all other arguments as names of files to load and run.
v8::HandleScope handle_scope;
v8::Handle<v8::String> file_name = v8::String::New(arg);
v8::Handle<v8::String> source = ReadFile(arg);
if (source.IsEmpty()) {
printf("Error reading '%s'\n", arg);
continue;
}
if (!ExecuteString(source, file_name, false, true)) {
ExitShell(1);
return;
}
}
}
}
#if !(defined(USING_V8_SHARED) || defined(V8_SHARED))
void StartExecuteInThread() {
if (thread_ == NULL) {
thread_ = new IsolateThread(this);
thread_->Start();
}
next_semaphore_->Signal();
}
void WaitForThread() {
if (thread_ == NULL) return;
if (last_run) {
thread_->Join();
thread_ = NULL;
} else {
done_semaphore_->Wait();
}
}
#endif // USING_V8_SHARED
private:
#if !(defined(USING_V8_SHARED) || defined(V8_SHARED))
static v8::internal::Thread::Options GetThreadOptions() {
v8::internal::Thread::Options options;
options.name = "IsolateThread";
// On some systems (OSX 10.6) the stack size default is 0.5Mb or less
// which is not enough to parse the big literal expressions used in tests.
// The stack size should be at least StackGuard::kLimitSize + some
// OS-specific padding for thread startup code.
options.stack_size = 2 << 20; // 2 Mb seems to be enough
return options;
}
class IsolateThread : public v8::internal::Thread {
public:
explicit IsolateThread(SourceGroup* group)
: v8::internal::Thread(GetThreadOptions()), group_(group) {}
virtual void Run() {
group_->ExecuteInThread();
}
private:
SourceGroup* group_;
};
void ExecuteInThread() {
v8::Isolate* isolate = v8::Isolate::New();
do {
if (next_semaphore_ != NULL) next_semaphore_->Wait();
{
v8::Isolate::Scope iscope(isolate);
v8::HandleScope scope;
v8::Persistent<v8::Context> context = CreateShellContext();
{
v8::Context::Scope cscope(context);
Execute();
}
context.Dispose();
}
if (done_semaphore_ != NULL) done_semaphore_->Signal();
} while (!last_run);
isolate->Dispose();
}
v8::internal::Semaphore* next_semaphore_;
v8::internal::Semaphore* done_semaphore_;
v8::internal::Thread* thread_;
#endif // USING_V8_SHARED
const char** argv_;
int begin_offset_;
int end_offset_;
};
static SourceGroup* isolate_sources = NULL;
#ifdef COMPRESS_STARTUP_DATA_BZ2
class BZip2Decompressor : public v8::StartupDataDecompressor {
public:
virtual ~BZip2Decompressor() { }
protected:
virtual int DecompressData(char* raw_data,
int* raw_data_size,
const char* compressed_data,
int compressed_data_size) {
ASSERT_EQ(v8::StartupData::kBZip2,
v8::V8::GetCompressedStartupDataAlgorithm());
unsigned int decompressed_size = *raw_data_size;
int result =
BZ2_bzBuffToBuffDecompress(raw_data,
&decompressed_size,
const_cast<char*>(compressed_data),
compressed_data_size,
0, 1);
if (result == BZ_OK) {
*raw_data_size = decompressed_size;
}
return result;
}
};
#endif
int RunMain(int argc, char* argv[]) {
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
v8::HandleScope handle_scope;
// Create a template for the global object.
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
// Bind the global 'print' function to the C++ Print callback.
global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
// Bind the global 'read' function to the C++ Read callback.
global->Set(v8::String::New("read"), v8::FunctionTemplate::New(Read));
// Bind the global 'load' function to the C++ Load callback.
global->Set(v8::String::New("load"), v8::FunctionTemplate::New(Load));
// Bind the 'quit' function
global->Set(v8::String::New("quit"), v8::FunctionTemplate::New(Quit));
// Bind the 'version' function
global->Set(v8::String::New("version"), v8::FunctionTemplate::New(Version));
// Create a new execution environment containing the built-in
// functions
v8::Persistent<v8::Context> context = v8::Context::New(NULL, global);
v8::Persistent<v8::Context> context = CreateShellContext();
// Enter the newly created execution environment.
context->Enter();
if (context.IsEmpty()) {
printf("Error creating context\n");
return 1;
}
bool run_shell = (argc == 1);
int num_isolates = 1;
for (int i = 1; i < argc; i++) {
// Enter the execution environment before evaluating any code.
v8::Context::Scope context_scope(context);
const char* str = argv[i];
if (strcmp(str, "--shell") == 0) {
run_shell = true;
} else if (strcmp(str, "-f") == 0) {
// Ignore any -f flags for compatibility with the other stand-
// alone JavaScript engines.
continue;
} else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str);
} else if (strcmp(str, "-e") == 0 && i + 1 < argc) {
// Execute argument given to -e option directly
v8::HandleScope handle_scope;
v8::Handle<v8::String> file_name = v8::String::New("unnamed");
v8::Handle<v8::String> source = v8::String::New(argv[i + 1]);
if (!ExecuteString(source, file_name, false, true))
return 1;
i++;
} else {
// Use all other arguments as names of files to load and run.
v8::HandleScope handle_scope;
v8::Handle<v8::String> file_name = v8::String::New(str);
v8::Handle<v8::String> source = ReadFile(str);
if (source.IsEmpty()) {
printf("Error reading '%s'\n", str);
return 1;
if (strcmp(argv[i], "--isolate") == 0) {
#if !(defined(USING_V8_SHARED) || defined(V8_SHARED))
++num_isolates;
#else // USING_V8_SHARED
printf("Error: --isolate not supported when linked with shared "
"library\n");
ExitShell(1);
#endif // USING_V8_SHARED
}
}
if (isolate_sources == NULL) {
isolate_sources = new SourceGroup[num_isolates];
SourceGroup* current = isolate_sources;
current->Begin(argv, 1);
for (int i = 1; i < argc; i++) {
const char* str = argv[i];
if (strcmp(str, "--isolate") == 0) {
current->End(i);
current++;
current->Begin(argv, i + 1);
} else if (strcmp(str, "--shell") == 0) {
run_shell = true;
} else if (strcmp(str, "-f") == 0) {
// Ignore any -f flags for compatibility with the other stand-
// alone JavaScript engines.
continue;
} else if (strncmp(str, "--", 2) == 0) {
printf("Warning: unknown flag %s.\nTry --help for options\n", str);
}
if (!ExecuteString(source, file_name, false, true))
return 1;
}
current->End(argc);
}
#if !(defined(USING_V8_SHARED) || defined(V8_SHARED))
for (int i = 1; i < num_isolates; ++i) {
isolate_sources[i].StartExecuteInThread();
}
#endif // USING_V8_SHARED
isolate_sources[0].Execute();
if (run_shell) RunShell(context);
#if !(defined(USING_V8_SHARED) || defined(V8_SHARED))
for (int i = 1; i < num_isolates; ++i) {
isolate_sources[i].WaitForThread();
}
#endif // USING_V8_SHARED
if (last_run) {
delete[] isolate_sources;
isolate_sources = NULL;
}
context->Exit();
context.Dispose();
return 0;
}
@ -126,6 +330,15 @@ int main(int argc, char* argv[]) {
}
}
#ifdef COMPRESS_STARTUP_DATA_BZ2
BZip2Decompressor startup_data_decompressor;
int bz2_result = startup_data_decompressor.Decompress();
if (bz2_result != BZ_OK) {
fprintf(stderr, "bzip error code: %d\n", bz2_result);
exit(1);
}
#endif
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
int result = 0;
if (FLAG_stress_opt || FLAG_stress_deopt) {
@ -137,12 +350,16 @@ int main(int argc, char* argv[]) {
printf("============ Stress %d/%d ============\n",
i + 1, stress_runs);
v8::Testing::PrepareStressRun(i);
last_run = (i == stress_runs - 1);
result = RunMain(argc, argv);
}
printf("======== Full Deoptimization =======\n");
v8::Testing::DeoptimizeAll();
} else {
result = RunMain(argc, argv);
}
v8::V8::Dispose();
return result;
}
@ -153,6 +370,46 @@ const char* ToCString(const v8::String::Utf8Value& value) {
}
// Creates a new execution environment containing the built-in
// functions.
v8::Persistent<v8::Context> CreateShellContext() {
// Create a template for the global object.
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
// Bind the global 'print' function to the C++ Print callback.
global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
// Bind the global 'read' function to the C++ Read callback.
global->Set(v8::String::New("read"), v8::FunctionTemplate::New(Read));
// Bind the global 'load' function to the C++ Load callback.
global->Set(v8::String::New("load"), v8::FunctionTemplate::New(Load));
// Bind the 'quit' function
global->Set(v8::String::New("quit"), v8::FunctionTemplate::New(Quit));
// Bind the 'version' function
global->Set(v8::String::New("version"), v8::FunctionTemplate::New(Version));
// Bind the handlers for external arrays.
global->Set(v8::String::New("Int8Array"),
v8::FunctionTemplate::New(Int8Array));
global->Set(v8::String::New("Uint8Array"),
v8::FunctionTemplate::New(Uint8Array));
global->Set(v8::String::New("Int16Array"),
v8::FunctionTemplate::New(Int16Array));
global->Set(v8::String::New("Uint16Array"),
v8::FunctionTemplate::New(Uint16Array));
global->Set(v8::String::New("Int32Array"),
v8::FunctionTemplate::New(Int32Array));
global->Set(v8::String::New("Uint32Array"),
v8::FunctionTemplate::New(Uint32Array));
global->Set(v8::String::New("Float32Array"),
v8::FunctionTemplate::New(Float32Array));
global->Set(v8::String::New("Float64Array"),
v8::FunctionTemplate::New(Float64Array));
global->Set(v8::String::New("PixelArray"),
v8::FunctionTemplate::New(PixelArray));
return v8::Context::New(NULL, global);
}
// The callback that is invoked by v8 whenever the JavaScript 'print'
// function is called. Prints its arguments on stdout separated by
// spaces and ending with a newline.
@ -222,7 +479,7 @@ v8::Handle<v8::Value> Quit(const v8::Arguments& args) {
// If not arguments are given args[0] will yield undefined which
// converts to the integer value 0.
int exit_code = args[0]->Int32Value();
exit(exit_code);
ExitShell(exit_code);
return v8::Undefined();
}
@ -232,6 +489,110 @@ v8::Handle<v8::Value> Version(const v8::Arguments& args) {
}
void ExternalArrayWeakCallback(v8::Persistent<v8::Value> object, void* data) {
free(data);
object.Dispose();
}
v8::Handle<v8::Value> CreateExternalArray(const v8::Arguments& args,
v8::ExternalArrayType type,
size_t element_size) {
ASSERT(element_size == 1 || element_size == 2 || element_size == 4 ||
element_size == 8);
if (args.Length() != 1) {
return v8::ThrowException(
v8::String::New("Array constructor needs one parameter."));
}
size_t length = 0;
if (args[0]->IsUint32()) {
length = args[0]->Uint32Value();
} else if (args[0]->IsNumber()) {
double raw_length = args[0]->NumberValue();
if (raw_length < 0) {
return v8::ThrowException(
v8::String::New("Array length must not be negative."));
}
if (raw_length > v8::internal::ExternalArray::kMaxLength) {
return v8::ThrowException(
v8::String::New("Array length exceeds maximum length."));
}
length = static_cast<size_t>(raw_length);
} else {
return v8::ThrowException(
v8::String::New("Array length must be a number."));
}
if (length > static_cast<size_t>(v8::internal::ExternalArray::kMaxLength)) {
return v8::ThrowException(
v8::String::New("Array length exceeds maximum length."));
}
void* data = calloc(length, element_size);
if (data == NULL) {
return v8::ThrowException(v8::String::New("Memory allocation failed."));
}
v8::Handle<v8::Object> array = v8::Object::New();
v8::Persistent<v8::Object> persistent_array =
v8::Persistent<v8::Object>::New(array);
persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
persistent_array.MarkIndependent();
array->SetIndexedPropertiesToExternalArrayData(data, type, length);
array->Set(v8::String::New("length"), v8::Int32::New(length),
v8::ReadOnly);
array->Set(v8::String::New("BYTES_PER_ELEMENT"),
v8::Int32::New(element_size));
return array;
}
v8::Handle<v8::Value> Int8Array(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t));
}
v8::Handle<v8::Value> Uint8Array(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalUnsignedByteArray,
sizeof(uint8_t));
}
v8::Handle<v8::Value> Int16Array(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalShortArray, sizeof(int16_t));
}
v8::Handle<v8::Value> Uint16Array(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalUnsignedShortArray,
sizeof(uint16_t));
}
v8::Handle<v8::Value> Int32Array(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalIntArray, sizeof(int32_t));
}
v8::Handle<v8::Value> Uint32Array(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalUnsignedIntArray,
sizeof(uint32_t));
}
v8::Handle<v8::Value> Float32Array(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalFloatArray,
sizeof(float)); // NOLINT
}
v8::Handle<v8::Value> Float64Array(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalDoubleArray,
sizeof(double)); // NOLINT
}
v8::Handle<v8::Value> PixelArray(const v8::Arguments& args) {
return CreateExternalArray(args, v8::kExternalPixelArray, sizeof(uint8_t));
}
// Reads a file into a v8 string.
v8::Handle<v8::String> ReadFile(const char* name) {
FILE* file = fopen(name, "rb");

86
deps/v8/src/SConscript

@ -68,7 +68,6 @@ SOURCES = {
execution.cc
factory.cc
flags.cc
frame-element.cc
frames.cc
full-codegen.cc
func-name-inferrer.cc
@ -85,8 +84,8 @@ SOURCES = {
ic.cc
inspector.cc
interpreter-irregexp.cc
isolate.cc
jsregexp.cc
jump-target.cc
lithium-allocator.cc
lithium.cc
liveedit.cc
@ -106,7 +105,6 @@ SOURCES = {
regexp-macro-assembler-irregexp.cc
regexp-macro-assembler.cc
regexp-stack.cc
register-allocator.cc
rewriter.cc
runtime.cc
runtime-profiler.cc
@ -123,7 +121,6 @@ SOURCES = {
strtod.cc
stub-cache.cc
token.cc
top.cc
type-info.cc
unicode.cc
utils.cc
@ -132,14 +129,11 @@ SOURCES = {
v8threads.cc
variables.cc
version.cc
virtual-frame.cc
zone.cc
extensions/gc-extension.cc
extensions/externalize-string-extension.cc
"""),
'arch:arm': Split("""
jump-target-light.cc
virtual-frame-light.cc
arm/builtins-arm.cc
arm/code-stubs-arm.cc
arm/codegen-arm.cc
@ -151,37 +145,32 @@ SOURCES = {
arm/frames-arm.cc
arm/full-codegen-arm.cc
arm/ic-arm.cc
arm/jump-target-arm.cc
arm/lithium-arm.cc
arm/lithium-codegen-arm.cc
arm/lithium-gap-resolver-arm.cc
arm/macro-assembler-arm.cc
arm/regexp-macro-assembler-arm.cc
arm/register-allocator-arm.cc
arm/stub-cache-arm.cc
arm/virtual-frame-arm.cc
arm/assembler-arm.cc
"""),
'arch:mips': Split("""
mips/assembler-mips.cc
mips/builtins-mips.cc
mips/code-stubs-mips.cc
mips/codegen-mips.cc
mips/constants-mips.cc
mips/cpu-mips.cc
mips/debug-mips.cc
mips/deoptimizer-mips.cc
mips/disasm-mips.cc
mips/full-codegen-mips.cc
mips/frames-mips.cc
mips/full-codegen-mips.cc
mips/ic-mips.cc
mips/jump-target-mips.cc
mips/macro-assembler-mips.cc
mips/register-allocator-mips.cc
mips/regexp-macro-assembler-mips.cc
mips/stub-cache-mips.cc
mips/virtual-frame-mips.cc
"""),
'arch:ia32': Split("""
jump-target-heavy.cc
virtual-frame-heavy.cc
ia32/assembler-ia32.cc
ia32/builtins-ia32.cc
ia32/code-stubs-ia32.cc
@ -193,19 +182,14 @@ SOURCES = {
ia32/frames-ia32.cc
ia32/full-codegen-ia32.cc
ia32/ic-ia32.cc
ia32/jump-target-ia32.cc
ia32/lithium-codegen-ia32.cc
ia32/lithium-gap-resolver-ia32.cc
ia32/lithium-ia32.cc
ia32/macro-assembler-ia32.cc
ia32/regexp-macro-assembler-ia32.cc
ia32/register-allocator-ia32.cc
ia32/stub-cache-ia32.cc
ia32/virtual-frame-ia32.cc
"""),
'arch:x64': Split("""
jump-target-heavy.cc
virtual-frame-heavy.cc
x64/assembler-x64.cc
x64/builtins-x64.cc
x64/code-stubs-x64.cc
@ -217,15 +201,12 @@ SOURCES = {
x64/frames-x64.cc
x64/full-codegen-x64.cc
x64/ic-x64.cc
x64/jump-target-x64.cc
x64/lithium-codegen-x64.cc
x64/lithium-gap-resolver-x64.cc
x64/lithium-x64.cc
x64/macro-assembler-x64.cc
x64/regexp-macro-assembler-x64.cc
x64/register-allocator-x64.cc
x64/stub-cache-x64.cc
x64/virtual-frame-x64.cc
"""),
'simulator:arm': ['arm/simulator-arm.cc'],
'simulator:mips': ['mips/simulator-mips.cc'],
@ -245,6 +226,20 @@ SOURCES = {
}
PREPARSER_SOURCES = {
'all': Split("""
allocation.cc
hashmap.cc
preparse-data.cc
preparser.cc
preparser-api.cc
scanner-base.cc
token.cc
unicode.cc
""")
}
D8_FILES = {
'all': [
'd8.cc', 'd8-debug.cc'
@ -300,6 +295,11 @@ debug-debugger.js
'''.split()
EXPERIMENTAL_LIBRARY_FILES = '''
proxy.js
'''.split()
def Abort(message):
print message
sys.exit(1)
@ -310,13 +310,22 @@ def ConfigureObjectFiles():
env.Replace(**context.flags['v8'])
context.ApplyEnvOverrides(env)
env['BUILDERS']['JS2C'] = Builder(action=js2c.JS2C)
env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions')
if 'ENABLE_LOGGING_AND_PROFILING' in env['CPPDEFINES']:
env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET --logfile "$LOGFILE" --log-snapshot-positions')
else:
env['BUILDERS']['Snapshot'] = Builder(action='$SOURCE $TARGET')
def BuildJS2CEnv(type):
js2c_env = { 'TYPE': type, 'COMPRESSION': 'off' }
if 'COMPRESS_STARTUP_DATA_BZ2' in env['CPPDEFINES']:
js2c_env['COMPRESSION'] = 'bz2'
return js2c_env
# Build the standard platform-independent source files.
source_files = context.GetRelevantSources(SOURCES)
d8_files = context.GetRelevantSources(D8_FILES)
d8_js = env.JS2C('d8-js.cc', 'd8.js', TYPE='D8')
d8_js = env.JS2C('d8-js.cc', 'd8.js', **{'TYPE': 'D8', 'COMPRESSION': 'off'})
d8_js_obj = context.ConfigureObject(env, d8_js, CPPPATH=['.'])
d8_objs = [context.ConfigureObject(env, [d8_files]), d8_js_obj]
@ -324,12 +333,25 @@ def ConfigureObjectFiles():
# compile it.
library_files = [s for s in LIBRARY_FILES]
library_files.append('macros.py')
libraries_src, libraries_empty_src = env.JS2C(['libraries.cc', 'libraries-empty.cc'], library_files, TYPE='CORE')
libraries_src = env.JS2C(
['libraries.cc'], library_files, **BuildJS2CEnv('CORE'))
libraries_obj = context.ConfigureObject(env, libraries_src, CPPPATH=['.'])
# Combine the experimental JavaScript library files into a C++ file
# and compile it.
experimental_library_files = [ s for s in EXPERIMENTAL_LIBRARY_FILES ]
experimental_library_files.append('macros.py')
experimental_libraries_src = env.JS2C(['experimental-libraries.cc'],
experimental_library_files,
**BuildJS2CEnv('EXPERIMENTAL'))
experimental_libraries_obj = context.ConfigureObject(env, experimental_libraries_src, CPPPATH=['.'])
source_objs = context.ConfigureObject(env, source_files)
non_snapshot_files = [source_objs]
preparser_source_files = context.GetRelevantSources(PREPARSER_SOURCES)
preparser_objs = context.ConfigureObject(env, preparser_source_files)
# Create snapshot if necessary. For cross compilation you should either
# do without snapshots and take the performance hit or you should build a
# host VM with the simulator=arm and snapshot=on options and then take the
@ -340,7 +362,7 @@ def ConfigureObjectFiles():
mksnapshot_env = env.Copy()
mksnapshot_env.Replace(**context.flags['mksnapshot'])
mksnapshot_src = 'mksnapshot.cc'
mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
mksnapshot = mksnapshot_env.Program('mksnapshot', [mksnapshot_src, libraries_obj, experimental_libraries_obj, non_snapshot_files, empty_snapshot_obj], PDB='mksnapshot.exe.pdb')
if context.use_snapshot:
if context.build_snapshot:
snapshot_cc = env.Snapshot('snapshot.cc', mksnapshot, LOGFILE=File('snapshot.log').abspath)
@ -349,9 +371,9 @@ def ConfigureObjectFiles():
snapshot_obj = context.ConfigureObject(env, snapshot_cc, CPPPATH=['.'])
else:
snapshot_obj = empty_snapshot_obj
library_objs = [non_snapshot_files, libraries_obj, snapshot_obj]
return (library_objs, d8_objs, [mksnapshot])
library_objs = [non_snapshot_files, libraries_obj, experimental_libraries_obj, snapshot_obj]
return (library_objs, d8_objs, [mksnapshot], preparser_objs)
(library_objs, d8_objs, mksnapshot) = ConfigureObjectFiles()
Return('library_objs d8_objs mksnapshot')
(library_objs, d8_objs, mksnapshot, preparser_objs) = ConfigureObjectFiles()
Return('library_objs d8_objs mksnapshot preparser_objs')

309
deps/v8/src/accessors.cc

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -32,9 +32,9 @@
#include "deoptimizer.h"
#include "execution.h"
#include "factory.h"
#include "list-inl.h"
#include "safepoint-table.h"
#include "scopeinfo.h"
#include "top.h"
namespace v8 {
namespace internal {
@ -43,8 +43,9 @@ namespace internal {
template <class C>
static C* FindInPrototypeChain(Object* obj, bool* found_it) {
ASSERT(!*found_it);
Heap* heap = HEAP;
while (!Is<C>(obj)) {
if (obj == Heap::null_value()) return NULL;
if (obj == heap->null_value()) return NULL;
obj = obj->GetPrototype();
}
*found_it = true;
@ -90,24 +91,34 @@ MaybeObject* Accessors::ArrayGetLength(Object* object, void*) {
Object* Accessors::FlattenNumber(Object* value) {
if (value->IsNumber() || !value->IsJSValue()) return value;
JSValue* wrapper = JSValue::cast(value);
ASSERT(
Top::context()->global_context()->number_function()->has_initial_map());
Map* number_map =
Top::context()->global_context()->number_function()->initial_map();
ASSERT(Isolate::Current()->context()->global_context()->number_function()->
has_initial_map());
Map* number_map = Isolate::Current()->context()->global_context()->
number_function()->initial_map();
if (wrapper->map() == number_map) return wrapper->value();
return value;
}
MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
Isolate* isolate = object->GetIsolate();
// This means one of the object's prototypes is a JSArray and the
// object does not have a 'length' property. Calling SetProperty
// causes an infinite loop.
if (!object->IsJSArray()) {
return object->SetLocalPropertyIgnoreAttributes(
isolate->heap()->length_symbol(), value, NONE);
}
value = FlattenNumber(value);
// Need to call methods that may trigger GC.
HandleScope scope;
HandleScope scope(isolate);
// Protect raw pointers.
Handle<JSObject> object_handle(object);
Handle<Object> value_handle(value);
Handle<JSObject> object_handle(object, isolate);
Handle<Object> value_handle(value, isolate);
bool has_exception;
Handle<Object> uint32_v = Execution::ToUint32(value_handle, &has_exception);
@ -115,23 +126,12 @@ MaybeObject* Accessors::ArraySetLength(JSObject* object, Object* value, void*) {
Handle<Object> number_v = Execution::ToNumber(value_handle, &has_exception);
if (has_exception) return Failure::Exception();
// Restore raw pointers,
object = *object_handle;
value = *value_handle;
if (uint32_v->Number() == number_v->Number()) {
if (object->IsJSArray()) {
return JSArray::cast(object)->SetElementsLength(*uint32_v);
} else {
// This means one of the object's prototypes is a JSArray and
// the object does not have a 'length' property.
// Calling SetProperty causes an infinite loop.
return object->SetLocalPropertyIgnoreAttributes(Heap::length_symbol(),
value, NONE);
}
return Handle<JSArray>::cast(object_handle)->SetElementsLength(*uint32_v);
}
return Top::Throw(*Factory::NewRangeError("invalid_array_length",
HandleVector<Object>(NULL, 0)));
return isolate->Throw(
*isolate->factory()->NewRangeError("invalid_array_length",
HandleVector<Object>(NULL, 0)));
}
@ -314,15 +314,18 @@ const AccessorDescriptor Accessors::ScriptCompilationType = {
MaybeObject* Accessors::ScriptGetLineEnds(Object* object, void*) {
HandleScope scope;
Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
JSValue* wrapper = JSValue::cast(object);
Isolate* isolate = wrapper->GetIsolate();
HandleScope scope(isolate);
Handle<Script> script(Script::cast(wrapper->value()), isolate);
InitScriptLineEnds(script);
ASSERT(script->line_ends()->IsFixedArray());
Handle<FixedArray> line_ends(FixedArray::cast(script->line_ends()));
// We do not want anyone to modify this array from JS.
ASSERT(*line_ends == Heap::empty_fixed_array() ||
line_ends->map() == Heap::fixed_cow_array_map());
Handle<JSArray> js_array = Factory::NewJSArrayWithElements(line_ends);
ASSERT(*line_ends == isolate->heap()->empty_fixed_array() ||
line_ends->map() == isolate->heap()->fixed_cow_array_map());
Handle<JSArray> js_array =
isolate->factory()->NewJSArrayWithElements(line_ends);
return *js_array;
}
@ -368,7 +371,7 @@ MaybeObject* Accessors::ScriptGetEvalFromScript(Object* object, void*) {
return *GetScriptWrapper(eval_from_script);
}
}
return Heap::undefined_value();
return HEAP->undefined_value();
}
@ -391,7 +394,7 @@ MaybeObject* Accessors::ScriptGetEvalFromScriptPosition(Object* object, void*) {
// If this is not a script compiled through eval there is no eval position.
int compilation_type = Smi::cast(script->compilation_type())->value();
if (compilation_type != Script::COMPILATION_TYPE_EVAL) {
return Heap::undefined_value();
return HEAP->undefined_value();
}
// Get the function from where eval was called and find the source position
@ -443,9 +446,10 @@ const AccessorDescriptor Accessors::ScriptEvalFromFunctionName = {
MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
Heap* heap = Isolate::Current()->heap();
bool found_it = false;
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return Heap::undefined_value();
if (!found_it) return heap->undefined_value();
while (!function->should_have_prototype()) {
found_it = false;
function = FindInPrototypeChain<JSFunction>(object->GetPrototype(),
@ -456,7 +460,7 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
if (!function->has_prototype()) {
Object* prototype;
{ MaybeObject* maybe_prototype = Heap::AllocateFunctionPrototype(function);
{ MaybeObject* maybe_prototype = heap->AllocateFunctionPrototype(function);
if (!maybe_prototype->ToObject(&prototype)) return maybe_prototype;
}
Object* result;
@ -471,12 +475,13 @@ MaybeObject* Accessors::FunctionGetPrototype(Object* object, void*) {
MaybeObject* Accessors::FunctionSetPrototype(JSObject* object,
Object* value,
void*) {
Heap* heap = object->GetHeap();
bool found_it = false;
JSFunction* function = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return Heap::undefined_value();
if (!found_it) return heap->undefined_value();
if (!function->should_have_prototype()) {
// Since we hit this accessor, object will have no prototype property.
return object->SetLocalPropertyIgnoreAttributes(Heap::prototype_symbol(),
return object->SetLocalPropertyIgnoreAttributes(heap->prototype_symbol(),
value,
NONE);
}
@ -545,7 +550,7 @@ const AccessorDescriptor Accessors::FunctionLength = {
MaybeObject* Accessors::FunctionGetName(Object* object, void*) {
bool found_it = false;
JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return Heap::undefined_value();
if (!found_it) return HEAP->undefined_value();
return holder->shared()->name();
}
@ -561,183 +566,20 @@ const AccessorDescriptor Accessors::FunctionName = {
// Accessors::FunctionArguments
//
static Address SlotAddress(JavaScriptFrame* frame, int slot_index) {
if (slot_index >= 0) {
const int offset = JavaScriptFrameConstants::kLocal0Offset;
return frame->fp() + offset - (slot_index * kPointerSize);
} else {
const int offset = JavaScriptFrameConstants::kSavedRegistersOffset;
return frame->fp() + offset - ((slot_index + 1) * kPointerSize);
}
}
// We can't intermix stack decoding and allocations because
// deoptimization infrastracture is not GC safe.
// Thus we build a temporary structure in malloced space.
class SlotRef BASE_EMBEDDED {
public:
enum SlotRepresentation {
UNKNOWN,
TAGGED,
INT32,
DOUBLE,
LITERAL
};
SlotRef()
: addr_(NULL), representation_(UNKNOWN) { }
SlotRef(Address addr, SlotRepresentation representation)
: addr_(addr), representation_(representation) { }
explicit SlotRef(Object* literal)
: literal_(literal), representation_(LITERAL) { }
Handle<Object> GetValue() {
switch (representation_) {
case TAGGED:
return Handle<Object>(Memory::Object_at(addr_));
case INT32: {
int value = Memory::int32_at(addr_);
if (Smi::IsValid(value)) {
return Handle<Object>(Smi::FromInt(value));
} else {
return Factory::NewNumberFromInt(value);
}
}
case DOUBLE: {
double value = Memory::double_at(addr_);
return Factory::NewNumber(value);
}
case LITERAL:
return literal_;
default:
UNREACHABLE();
return Handle<Object>::null();
}
}
private:
Address addr_;
Handle<Object> literal_;
SlotRepresentation representation_;
};
static SlotRef ComputeSlotForNextArgument(TranslationIterator* iterator,
DeoptimizationInputData* data,
JavaScriptFrame* frame) {
Translation::Opcode opcode =
static_cast<Translation::Opcode>(iterator->Next());
switch (opcode) {
case Translation::BEGIN:
case Translation::FRAME:
// Peeled off before getting here.
break;
case Translation::ARGUMENTS_OBJECT:
// This can be only emitted for local slots not for argument slots.
break;
case Translation::REGISTER:
case Translation::INT32_REGISTER:
case Translation::DOUBLE_REGISTER:
case Translation::DUPLICATE:
// We are at safepoint which corresponds to call. All registers are
// saved by caller so there would be no live registers at this
// point. Thus these translation commands should not be used.
break;
case Translation::STACK_SLOT: {
int slot_index = iterator->Next();
Address slot_addr = SlotAddress(frame, slot_index);
return SlotRef(slot_addr, SlotRef::TAGGED);
}
case Translation::INT32_STACK_SLOT: {
int slot_index = iterator->Next();
Address slot_addr = SlotAddress(frame, slot_index);
return SlotRef(slot_addr, SlotRef::INT32);
}
case Translation::DOUBLE_STACK_SLOT: {
int slot_index = iterator->Next();
Address slot_addr = SlotAddress(frame, slot_index);
return SlotRef(slot_addr, SlotRef::DOUBLE);
}
case Translation::LITERAL: {
int literal_index = iterator->Next();
return SlotRef(data->LiteralArray()->get(literal_index));
}
}
UNREACHABLE();
return SlotRef();
}
static void ComputeSlotMappingForArguments(JavaScriptFrame* frame,
int inlined_frame_index,
Vector<SlotRef>* args_slots) {
AssertNoAllocation no_gc;
int deopt_index = AstNode::kNoNumber;
DeoptimizationInputData* data =
static_cast<OptimizedFrame*>(frame)->GetDeoptimizationData(&deopt_index);
TranslationIterator it(data->TranslationByteArray(),
data->TranslationIndex(deopt_index)->value());
Translation::Opcode opcode = static_cast<Translation::Opcode>(it.Next());
ASSERT(opcode == Translation::BEGIN);
int frame_count = it.Next();
USE(frame_count);
ASSERT(frame_count > inlined_frame_index);
int frames_to_skip = inlined_frame_index;
while (true) {
opcode = static_cast<Translation::Opcode>(it.Next());
// Skip over operands to advance to the next opcode.
it.Skip(Translation::NumberOfOperandsFor(opcode));
if (opcode == Translation::FRAME) {
if (frames_to_skip == 0) {
// We reached the frame corresponding to the inlined function
// in question. Process the translation commands for the
// arguments.
//
// Skip the translation command for the receiver.
it.Skip(Translation::NumberOfOperandsFor(
static_cast<Translation::Opcode>(it.Next())));
// Compute slots for arguments.
for (int i = 0; i < args_slots->length(); ++i) {
(*args_slots)[i] = ComputeSlotForNextArgument(&it, data, frame);
}
return;
}
frames_to_skip--;
}
}
UNREACHABLE();
}
static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
JavaScriptFrame* frame,
Handle<JSFunction> inlined_function,
int inlined_frame_index) {
Factory* factory = Isolate::Current()->factory();
int args_count = inlined_function->shared()->formal_parameter_count();
ScopedVector<SlotRef> args_slots(args_count);
ComputeSlotMappingForArguments(frame, inlined_frame_index, &args_slots);
SlotRef::ComputeSlotMappingForArguments(frame,
inlined_frame_index,
&args_slots);
Handle<JSObject> arguments =
Factory::NewArgumentsObject(inlined_function, args_count);
Handle<FixedArray> array = Factory::NewFixedArray(args_count);
factory->NewArgumentsObject(inlined_function, args_count);
Handle<FixedArray> array = factory->NewFixedArray(args_count);
for (int i = 0; i < args_count; ++i) {
Handle<Object> value = args_slots[i].GetValue();
array->set(i, *value);
@ -750,15 +592,16 @@ static MaybeObject* ConstructArgumentsObjectForInlinedFunction(
MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
HandleScope scope;
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
bool found_it = false;
JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return Heap::undefined_value();
Handle<JSFunction> function(holder);
if (!found_it) return isolate->heap()->undefined_value();
Handle<JSFunction> function(holder, isolate);
// Find the top invocation of the function by traversing frames.
List<JSFunction*> functions(2);
for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
frame->GetFunctions(&functions);
for (int i = functions.length() - 1; i >= 0; i--) {
@ -776,9 +619,9 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
if (!frame->is_optimized()) {
// If there is an arguments variable in the stack, we return that.
Handle<SerializedScopeInfo> info(function->shared()->scope_info());
int index = info->StackSlotIndex(Heap::arguments_symbol());
int index = info->StackSlotIndex(isolate->heap()->arguments_symbol());
if (index >= 0) {
Handle<Object> arguments(frame->GetExpression(index));
Handle<Object> arguments(frame->GetExpression(index), isolate);
if (!arguments->IsArgumentsMarker()) return *arguments;
}
}
@ -792,15 +635,13 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
// Get the number of arguments and construct an arguments object
// mirror for the right frame.
const int length = frame->ComputeParametersCount();
Handle<JSObject> arguments = Factory::NewArgumentsObject(function,
length);
Handle<FixedArray> array = Factory::NewFixedArray(length);
Handle<JSObject> arguments = isolate->factory()->NewArgumentsObject(
function, length);
Handle<FixedArray> array = isolate->factory()->NewFixedArray(length);
// Copy the parameters to the arguments object.
ASSERT(array->length() == length);
for (int i = 0; i < length; i++) {
array->set(i, frame->GetParameter(i));
}
for (int i = 0; i < length; i++) array->set(i, frame->GetParameter(i));
arguments->set_elements(*array);
// Return the freshly allocated arguments object.
@ -810,7 +651,7 @@ MaybeObject* Accessors::FunctionGetArguments(Object* object, void*) {
}
// No frame corresponding to the given function found. Return null.
return Heap::null_value();
return isolate->heap()->null_value();
}
@ -826,16 +667,30 @@ const AccessorDescriptor Accessors::FunctionArguments = {
//
static MaybeObject* CheckNonStrictCallerOrThrow(
Isolate* isolate,
JSFunction* caller) {
DisableAssertNoAllocation enable_allocation;
if (caller->shared()->strict_mode()) {
return isolate->Throw(
*isolate->factory()->NewTypeError("strict_caller",
HandleVector<Object>(NULL, 0)));
}
return caller;
}
MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
HandleScope scope;
Isolate* isolate = Isolate::Current();
HandleScope scope(isolate);
AssertNoAllocation no_alloc;
bool found_it = false;
JSFunction* holder = FindInPrototypeChain<JSFunction>(object, &found_it);
if (!found_it) return Heap::undefined_value();
Handle<JSFunction> function(holder);
if (!found_it) return isolate->heap()->undefined_value();
Handle<JSFunction> function(holder, isolate);
List<JSFunction*> functions(2);
for (JavaScriptFrameIterator it; !it.done(); it.Advance()) {
for (JavaScriptFrameIterator it(isolate); !it.done(); it.Advance()) {
JavaScriptFrame* frame = it.frame();
frame->GetFunctions(&functions);
for (int i = functions.length() - 1; i >= 0; i--) {
@ -845,18 +700,18 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
// frames, e.g. frames for scripts not functions.
if (i > 0) {
ASSERT(!functions[i - 1]->shared()->is_toplevel());
return functions[i - 1];
return CheckNonStrictCallerOrThrow(isolate, functions[i - 1]);
} else {
for (it.Advance(); !it.done(); it.Advance()) {
frame = it.frame();
functions.Rewind(0);
frame->GetFunctions(&functions);
if (!functions.last()->shared()->is_toplevel()) {
return functions.last();
return CheckNonStrictCallerOrThrow(isolate, functions.last());
}
ASSERT(functions.length() == 1);
}
if (it.done()) return Heap::null_value();
if (it.done()) return isolate->heap()->null_value();
break;
}
}
@ -865,7 +720,7 @@ MaybeObject* Accessors::FunctionGetCaller(Object* object, void*) {
}
// No frame corresponding to the given function found. Return null.
return Heap::null_value();
return isolate->heap()->null_value();
}

2
deps/v8/src/accessors.h

@ -28,6 +28,8 @@
#ifndef V8_ACCESSORS_H_
#define V8_ACCESSORS_H_
#include "allocation.h"
namespace v8 {
namespace internal {

19
deps/v8/src/x64/codegen-x64-inl.h → deps/v8/src/allocation-inl.h

@ -25,22 +25,25 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ALLOCATION_INL_H_
#define V8_ALLOCATION_INL_H_
#ifndef V8_X64_CODEGEN_X64_INL_H_
#define V8_X64_CODEGEN_X64_INL_H_
#include "allocation.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
// Platform-specific inline functions.
void* PreallocatedStorage::New(size_t size) {
return Isolate::Current()->PreallocatedStorageNew(size);
}
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cc) { __ j(cc, &entry_label_); }
#undef __
void PreallocatedStorage::Delete(void* p) {
return Isolate::Current()->PreallocatedStorageDelete(p);
}
} } // namespace v8::internal
#endif // V8_X64_CODEGEN_X64_INL_H_
#endif // V8_ALLOCATION_INL_H_

82
deps/v8/src/allocation.cc

@ -25,8 +25,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
#include "../include/v8stdint.h"
#include "globals.h"
#include "checks.h"
@ -37,7 +35,6 @@ namespace v8 {
namespace internal {
void* Malloced::New(size_t size) {
ASSERT(NativeAllocationChecker::allocation_allowed());
void* result = malloc(size);
if (result == NULL) {
v8::internal::FatalProcessOutOfMemory("Malloced operator new");
@ -103,85 +100,6 @@ char* StrNDup(const char* str, int n) {
}
int NativeAllocationChecker::allocation_disallowed_ = 0;
PreallocatedStorage PreallocatedStorage::in_use_list_(0);
PreallocatedStorage PreallocatedStorage::free_list_(0);
bool PreallocatedStorage::preallocated_ = false;
void PreallocatedStorage::Init(size_t size) {
ASSERT(free_list_.next_ == &free_list_);
ASSERT(free_list_.previous_ == &free_list_);
PreallocatedStorage* free_chunk =
reinterpret_cast<PreallocatedStorage*>(new char[size]);
free_list_.next_ = free_list_.previous_ = free_chunk;
free_chunk->next_ = free_chunk->previous_ = &free_list_;
free_chunk->size_ = size - sizeof(PreallocatedStorage);
preallocated_ = true;
}
void* PreallocatedStorage::New(size_t size) {
if (!preallocated_) {
return FreeStoreAllocationPolicy::New(size);
}
ASSERT(free_list_.next_ != &free_list_);
ASSERT(free_list_.previous_ != &free_list_);
size = (size + kPointerSize - 1) & ~(kPointerSize - 1);
// Search for exact fit.
for (PreallocatedStorage* storage = free_list_.next_;
storage != &free_list_;
storage = storage->next_) {
if (storage->size_ == size) {
storage->Unlink();
storage->LinkTo(&in_use_list_);
return reinterpret_cast<void*>(storage + 1);
}
}
// Search for first fit.
for (PreallocatedStorage* storage = free_list_.next_;
storage != &free_list_;
storage = storage->next_) {
if (storage->size_ >= size + sizeof(PreallocatedStorage)) {
storage->Unlink();
storage->LinkTo(&in_use_list_);
PreallocatedStorage* left_over =
reinterpret_cast<PreallocatedStorage*>(
reinterpret_cast<char*>(storage + 1) + size);
left_over->size_ = storage->size_ - size - sizeof(PreallocatedStorage);
ASSERT(size + left_over->size_ + sizeof(PreallocatedStorage) ==
storage->size_);
storage->size_ = size;
left_over->LinkTo(&free_list_);
return reinterpret_cast<void*>(storage + 1);
}
}
// Allocation failure.
ASSERT(false);
return NULL;
}
// We don't attempt to coalesce.
void PreallocatedStorage::Delete(void* p) {
if (p == NULL) {
return;
}
if (!preallocated_) {
FreeStoreAllocationPolicy::Delete(p);
return;
}
PreallocatedStorage* storage = reinterpret_cast<PreallocatedStorage*>(p) - 1;
ASSERT(storage->next_->previous_ == storage);
ASSERT(storage->previous_->next_ == storage);
storage->Unlink();
storage->LinkTo(&free_list_);
}
void PreallocatedStorage::LinkTo(PreallocatedStorage* other) {
next_ = other->next_;
other->next_->previous_ = this;

51
deps/v8/src/allocation.h

@ -39,38 +39,6 @@ namespace internal {
// processing.
void FatalProcessOutOfMemory(const char* message);
// A class that controls whether allocation is allowed. This is for
// the C++ heap only!
class NativeAllocationChecker {
public:
typedef enum { ALLOW, DISALLOW } NativeAllocationAllowed;
explicit inline NativeAllocationChecker(NativeAllocationAllowed allowed)
: allowed_(allowed) {
#ifdef DEBUG
if (allowed == DISALLOW) {
allocation_disallowed_++;
}
#endif
}
~NativeAllocationChecker() {
#ifdef DEBUG
if (allowed_ == DISALLOW) {
allocation_disallowed_--;
}
#endif
ASSERT(allocation_disallowed_ >= 0);
}
static inline bool allocation_allowed() {
return allocation_disallowed_ == 0;
}
private:
// This static counter ensures that NativeAllocationCheckers can be nested.
static int allocation_disallowed_;
// This flag applies to this particular instance.
NativeAllocationAllowed allowed_;
};
// Superclass for classes managed with new & delete.
class Malloced {
public:
@ -114,7 +82,6 @@ class AllStatic {
template <typename T>
static T* NewArray(int size) {
ASSERT(NativeAllocationChecker::allocation_allowed());
T* result = new T[size];
if (result == NULL) Malloced::FatalProcessOutOfMemory();
return result;
@ -146,27 +113,27 @@ class FreeStoreAllocationPolicy {
// Allocation policy for allocating in preallocated space.
// Used as an allocation policy for ScopeInfo when generating
// stack traces.
class PreallocatedStorage : public AllStatic {
class PreallocatedStorage {
public:
explicit PreallocatedStorage(size_t size);
size_t size() { return size_; }
static void* New(size_t size);
static void Delete(void* p);
// Preallocate a set number of bytes.
static void Init(size_t size);
// TODO(isolates): Get rid of these-- we'll have to change the allocator
// interface to include a pointer to an isolate to do this
// efficiently.
static inline void* New(size_t size);
static inline void Delete(void* p);
private:
size_t size_;
PreallocatedStorage* previous_;
PreallocatedStorage* next_;
static bool preallocated_;
static PreallocatedStorage in_use_list_;
static PreallocatedStorage free_list_;
void LinkTo(PreallocatedStorage* other);
void Unlink();
friend class Isolate;
DISALLOW_IMPLICIT_CONSTRUCTORS(PreallocatedStorage);
};

3485
deps/v8/src/api.cc

File diff suppressed because it is too large

111
deps/v8/src/api.h

@ -1,4 +1,4 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -53,8 +53,8 @@ class Consts {
class NeanderObject {
public:
explicit NeanderObject(int size);
inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
inline NeanderObject(v8::internal::Object* obj);
explicit inline NeanderObject(v8::internal::Handle<v8::internal::Object> obj);
explicit inline NeanderObject(v8::internal::Object* obj);
inline v8::internal::Object* get(int index);
inline void set(int index, v8::internal::Object* value);
inline v8::internal::Handle<v8::internal::JSObject> value() { return value_; }
@ -69,7 +69,7 @@ class NeanderObject {
class NeanderArray {
public:
NeanderArray();
inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
explicit inline NeanderArray(v8::internal::Handle<v8::internal::Object> obj);
inline v8::internal::Handle<v8::internal::JSObject> value() {
return obj_.value();
}
@ -115,14 +115,14 @@ void NeanderObject::set(int offset, v8::internal::Object* value) {
template <typename T> static inline T ToCData(v8::internal::Object* obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
return reinterpret_cast<T>(
reinterpret_cast<intptr_t>(v8::internal::Proxy::cast(obj)->proxy()));
reinterpret_cast<intptr_t>(v8::internal::Foreign::cast(obj)->address()));
}
template <typename T>
static inline v8::internal::Handle<v8::internal::Object> FromCData(T obj) {
STATIC_ASSERT(sizeof(T) == sizeof(v8::internal::Address));
return v8::internal::Factory::NewProxy(
return FACTORY->NewForeign(
reinterpret_cast<v8::internal::Address>(reinterpret_cast<intptr_t>(obj)));
}
@ -157,7 +157,6 @@ class RegisteredExtension {
RegisteredExtension* next_auto_;
ExtensionTraversalState state_;
static RegisteredExtension* first_extension_;
static RegisteredExtension* first_auto_extension_;
};
@ -183,7 +182,7 @@ class Utils {
static inline Local<Array> ToLocal(
v8::internal::Handle<v8::internal::JSArray> obj);
static inline Local<External> ToLocal(
v8::internal::Handle<v8::internal::Proxy> obj);
v8::internal::Handle<v8::internal::Foreign> obj);
static inline Local<Message> MessageToLocal(
v8::internal::Handle<v8::internal::Object> obj);
static inline Local<StackTrace> StackTraceToLocal(
@ -237,7 +236,7 @@ class Utils {
OpenHandle(const v8::Signature* sig);
static inline v8::internal::Handle<v8::internal::TypeSwitchInfo>
OpenHandle(const v8::TypeSwitch* that);
static inline v8::internal::Handle<v8::internal::Proxy>
static inline v8::internal::Handle<v8::internal::Foreign>
OpenHandle(const v8::External* that);
};
@ -274,7 +273,7 @@ MAKE_TO_LOCAL(ToLocal, String, String)
MAKE_TO_LOCAL(ToLocal, JSRegExp, RegExp)
MAKE_TO_LOCAL(ToLocal, JSObject, Object)
MAKE_TO_LOCAL(ToLocal, JSArray, Array)
MAKE_TO_LOCAL(ToLocal, Proxy, External)
MAKE_TO_LOCAL(ToLocal, Foreign, External)
MAKE_TO_LOCAL(ToLocal, FunctionTemplateInfo, FunctionTemplate)
MAKE_TO_LOCAL(ToLocal, ObjectTemplateInfo, ObjectTemplate)
MAKE_TO_LOCAL(ToLocal, SignatureInfo, Signature)
@ -312,7 +311,7 @@ MAKE_OPEN_HANDLE(Script, Object)
MAKE_OPEN_HANDLE(Function, JSFunction)
MAKE_OPEN_HANDLE(Message, JSObject)
MAKE_OPEN_HANDLE(Context, Context)
MAKE_OPEN_HANDLE(External, Proxy)
MAKE_OPEN_HANDLE(External, Foreign)
MAKE_OPEN_HANDLE(StackTrace, JSArray)
MAKE_OPEN_HANDLE(StackFrame, JSObject)
@ -321,36 +320,101 @@ MAKE_OPEN_HANDLE(StackFrame, JSObject)
namespace internal {
// Tracks string usage to help make better decisions when
// externalizing strings.
//
// Implementation note: internally this class only tracks fresh
// strings and keeps a single use counter for them.
class StringTracker {
public:
// Records that the given string's characters were copied to some
// external buffer. If this happens often we should honor
// externalization requests for the string.
void RecordWrite(Handle<String> string) {
Address address = reinterpret_cast<Address>(*string);
Address top = isolate_->heap()->NewSpaceTop();
if (IsFreshString(address, top)) {
IncrementUseCount(top);
}
}
// Estimates freshness and use frequency of the given string based
// on how close it is to the new space top and the recorded usage
// history.
inline bool IsFreshUnusedString(Handle<String> string) {
Address address = reinterpret_cast<Address>(*string);
Address top = isolate_->heap()->NewSpaceTop();
return IsFreshString(address, top) && IsUseCountLow(top);
}
private:
StringTracker() : use_count_(0), last_top_(NULL), isolate_(NULL) { }
static inline bool IsFreshString(Address string, Address top) {
return top - kFreshnessLimit <= string && string <= top;
}
inline bool IsUseCountLow(Address top) {
if (last_top_ != top) return true;
return use_count_ < kUseLimit;
}
inline void IncrementUseCount(Address top) {
if (last_top_ != top) {
use_count_ = 0;
last_top_ = top;
}
++use_count_;
}
// Single use counter shared by all fresh strings.
int use_count_;
// Last new space top when the use count above was valid.
Address last_top_;
Isolate* isolate_;
// How close to the new space top a fresh string has to be.
static const int kFreshnessLimit = 1024;
// The number of uses required to consider a string useful.
static const int kUseLimit = 32;
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(StringTracker);
};
// This class is here in order to be able to declare it a friend of
// HandleScope. Moving these methods to be members of HandleScope would be
// neat in some ways, but it would expose external implementation details in
// neat in some ways, but it would expose internal implementation details in
// our public header file, which is undesirable.
//
// There is a singleton instance of this class to hold the per-thread data.
// For multithreaded V8 programs this data is copied in and out of storage
// An isolate has a single instance of this class to hold the current thread's
// data. In multithreaded V8 programs this data is copied in and out of storage
// so that the currently executing thread always has its own copy of this
// data.
class HandleScopeImplementer {
public:
HandleScopeImplementer()
: blocks_(0),
explicit HandleScopeImplementer(Isolate* isolate)
: isolate_(isolate),
blocks_(0),
entered_contexts_(0),
saved_contexts_(0),
spare_(NULL),
ignore_out_of_memory_(false),
call_depth_(0) { }
static HandleScopeImplementer* instance();
// Threading support for handle data.
static int ArchiveSpacePerThread();
static char* RestoreThread(char* from);
static char* ArchiveThread(char* to);
static void FreeThreadResources();
char* RestoreThread(char* from);
char* ArchiveThread(char* to);
void FreeThreadResources();
// Garbage collection support.
static void Iterate(v8::internal::ObjectVisitor* v);
void Iterate(v8::internal::ObjectVisitor* v);
static char* Iterate(v8::internal::ObjectVisitor* v, char* data);
@ -402,6 +466,7 @@ class HandleScopeImplementer {
ASSERT(call_depth_ == 0);
}
Isolate* isolate_;
List<internal::Object**> blocks_;
// Used as a stack to keep track of entered contexts.
List<Handle<Object> > entered_contexts_;

10
deps/v8/src/apinatives.js

@ -73,7 +73,15 @@ function InstantiateFunction(data, name) {
if (name) %FunctionSetName(fun, name);
cache[serialNumber] = fun;
var prototype = %GetTemplateField(data, kApiPrototypeTemplateOffset);
fun.prototype = prototype ? Instantiate(prototype) : {};
var attributes = %GetTemplateField(data, kApiPrototypeAttributesOffset);
if (attributes != NONE) {
%IgnoreAttributesAndSetProperty(
fun, "prototype",
prototype ? Instantiate(prototype) : {},
attributes);
} else {
fun.prototype = prototype ? Instantiate(prototype) : {};
}
%SetProperty(fun.prototype, "constructor", fun, DONT_ENUM);
var parent = %GetTemplateField(data, kApiParentTemplateOffset);
if (parent) {

7
deps/v8/src/apiutils.h

@ -31,11 +31,6 @@
namespace v8 {
class ImplementationUtilities {
public:
static v8::Handle<v8::Primitive> Undefined();
static v8::Handle<v8::Primitive> Null();
static v8::Handle<v8::Boolean> True();
static v8::Handle<v8::Boolean> False();
static int GetNameCount(ExtensionConfiguration* that) {
return that->name_count_;
}
@ -68,8 +63,6 @@ class ImplementationUtilities {
// to access the HandleScope data.
typedef v8::HandleScope::Data HandleScopeData;
static HandleScopeData* CurrentHandleScope();
#ifdef DEBUG
static void ZapHandleRange(internal::Object** begin, internal::Object** end);
#endif

29
deps/v8/src/arguments.h

@ -28,6 +28,8 @@
#ifndef V8_ARGUMENTS_H_
#define V8_ARGUMENTS_H_
#include "allocation.h"
namespace v8 {
namespace internal {
@ -61,11 +63,18 @@ class Arguments BASE_EMBEDDED {
return Handle<S>(reinterpret_cast<S**>(value));
}
int smi_at(int index) {
return Smi::cast((*this)[index])->value();
}
double number_at(int index) {
return (*this)[index]->Number();
}
// Get the total number of arguments including the receiver.
int length() const { return length_; }
Object** arguments() { return arguments_; }
private:
int length_;
Object** arguments_;
@ -77,15 +86,16 @@ class Arguments BASE_EMBEDDED {
// can.
class CustomArguments : public Relocatable {
public:
inline CustomArguments(Object* data,
inline CustomArguments(Isolate* isolate,
Object* data,
Object* self,
JSObject* holder) {
JSObject* holder) : Relocatable(isolate) {
values_[2] = self;
values_[1] = holder;
values_[0] = data;
}
inline CustomArguments() {
inline explicit CustomArguments(Isolate* isolate) : Relocatable(isolate) {
#ifdef DEBUG
for (size_t i = 0; i < ARRAY_SIZE(values_); i++) {
values_[i] = reinterpret_cast<Object*>(kZapValue);
@ -100,6 +110,17 @@ class CustomArguments : public Relocatable {
};
#define DECLARE_RUNTIME_FUNCTION(Type, Name) \
Type Name(Arguments args, Isolate* isolate)
#define RUNTIME_FUNCTION(Type, Name) \
Type Name(Arguments args, Isolate* isolate)
#define RUNTIME_ARGUMENTS(isolate, args) args, isolate
} } // namespace v8::internal
#endif // V8_ARGUMENTS_H_

19
deps/v8/src/arm/assembler-arm-inl.h

@ -203,11 +203,12 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
visitor->VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
// TODO(isolates): Get a cached isolate below.
} else if (((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
IsPatchedDebugBreakSlotSequence())) &&
Isolate::Current()->debug()->has_break_points()) {
visitor->VisitDebugTarget(this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
@ -217,23 +218,23 @@ void RelocInfo::Visit(ObjectVisitor* visitor) {
template<typename StaticVisitor>
void RelocInfo::Visit() {
void RelocInfo::Visit(Heap* heap) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
StaticVisitor::VisitPointer(target_object_address());
StaticVisitor::VisitPointer(heap, target_object_address());
} else if (RelocInfo::IsCodeTarget(mode)) {
StaticVisitor::VisitCodeTarget(this);
StaticVisitor::VisitCodeTarget(heap, this);
} else if (mode == RelocInfo::GLOBAL_PROPERTY_CELL) {
StaticVisitor::VisitGlobalPropertyCell(this);
StaticVisitor::VisitGlobalPropertyCell(heap, this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
StaticVisitor::VisitExternalReference(target_reference_address());
#ifdef ENABLE_DEBUGGER_SUPPORT
} else if (Debug::has_break_points() &&
} else if (heap->isolate()->debug()->has_break_points() &&
((RelocInfo::IsJSReturn(mode) &&
IsPatchedReturnSequence()) ||
(RelocInfo::IsDebugBreakSlot(mode) &&
IsPatchedDebugBreakSlotSequence()))) {
StaticVisitor::VisitDebugTarget(this);
StaticVisitor::VisitDebugTarget(heap, this);
#endif
} else if (mode == RelocInfo::RUNTIME_ENTRY) {
StaticVisitor::VisitRuntimeEntry(this);

692
deps/v8/src/arm/assembler-arm.cc

@ -32,7 +32,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
#include "v8.h"
@ -44,62 +44,80 @@
namespace v8 {
namespace internal {
// Safe default is no features.
#ifdef DEBUG
bool CpuFeatures::initialized_ = false;
#endif
unsigned CpuFeatures::supported_ = 0;
unsigned CpuFeatures::enabled_ = 0;
unsigned CpuFeatures::found_by_runtime_probing_ = 0;
#ifdef __arm__
// Get the CPU features enabled by the build. For cross compilation the
// preprocessor symbols CAN_USE_ARMV7_INSTRUCTIONS and CAN_USE_VFP_INSTRUCTIONS
// can be defined to enable ARMv7 and VFPv3 instructions when building the
// snapshot.
static uint64_t CpuFeaturesImpliedByCompiler() {
uint64_t answer = 0;
#ifdef CAN_USE_ARMV7_INSTRUCTIONS
answer |= 1u << ARMv7;
#endif // def CAN_USE_ARMV7_INSTRUCTIONS
#ifdef CAN_USE_VFP_INSTRUCTIONS
answer |= 1u << VFP3 | 1u << ARMv7;
#endif // def CAN_USE_VFP_INSTRUCTIONS
#ifdef __arm__
// If the compiler is allowed to use VFP then we can use VFP too in our code
// generation even when generating snapshots. This won't work for cross
// compilation.
// compilation. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
#if defined(__VFP_FP__) && !defined(__SOFTFP__)
answer |= 1u << VFP3;
answer |= 1u << VFP3 | 1u << ARMv7;
#endif // defined(__VFP_FP__) && !defined(__SOFTFP__)
#ifdef CAN_USE_VFP_INSTRUCTIONS
answer |= 1u << VFP3;
#endif // def CAN_USE_VFP_INSTRUCTIONS
#endif // def __arm__
return answer;
}
#endif // def __arm__
void CpuFeatures::Probe(bool portable) {
void CpuFeatures::Probe() {
ASSERT(!initialized_);
#ifdef DEBUG
initialized_ = true;
#endif
// Get the features implied by the OS and the compiler settings. This is the
// minimal set of features which is also alowed for generated code in the
// snapshot.
supported_ |= OS::CpuFeaturesImpliedByPlatform();
supported_ |= CpuFeaturesImpliedByCompiler();
if (Serializer::enabled()) {
// No probing for features if we might serialize (generate snapshot).
return;
}
#ifndef __arm__
// For the simulator=arm build, use VFP when FLAG_enable_vfp3 is enabled.
// For the simulator=arm build, use VFP when FLAG_enable_vfp3 is
// enabled. VFPv3 implies ARMv7, see ARM DDI 0406B, page A1-6.
if (FLAG_enable_vfp3) {
supported_ |= 1u << VFP3;
supported_ |= 1u << VFP3 | 1u << ARMv7;
}
// For the simulator=arm build, use ARMv7 when FLAG_enable_armv7 is enabled
if (FLAG_enable_armv7) {
supported_ |= 1u << ARMv7;
}
#else // def __arm__
if (portable && Serializer::enabled()) {
supported_ |= OS::CpuFeaturesImpliedByPlatform();
supported_ |= CpuFeaturesImpliedByCompiler();
return; // No features if we might serialize.
// Probe for additional features not already known to be available.
if (!IsSupported(VFP3) && OS::ArmCpuHasFeature(VFP3)) {
// This implementation also sets the VFP flags if runtime
// detection of VFP returns true. VFPv3 implies ARMv7, see ARM DDI
// 0406B, page A1-6.
supported_ |= 1u << VFP3 | 1u << ARMv7;
found_by_runtime_probing_ |= 1u << VFP3 | 1u << ARMv7;
}
if (OS::ArmCpuHasFeature(VFP3)) {
// This implementation also sets the VFP flags if
// runtime detection of VFP returns true.
supported_ |= 1u << VFP3;
found_by_runtime_probing_ |= 1u << VFP3;
}
if (OS::ArmCpuHasFeature(ARMv7)) {
if (!IsSupported(ARMv7) && OS::ArmCpuHasFeature(ARMv7)) {
supported_ |= 1u << ARMv7;
found_by_runtime_probing_ |= 1u << ARMv7;
}
if (!portable) found_by_runtime_probing_ = 0;
#endif
}
@ -148,7 +166,7 @@ Operand::Operand(Handle<Object> handle) {
rm_ = no_reg;
// Verify all Objects referred by code are NOT in new space.
Object* obj = *handle;
ASSERT(!Heap::InNewSpace(obj));
ASSERT(!HEAP->InNewSpace(obj));
if (obj->IsHeapObject()) {
imm32_ = reinterpret_cast<intptr_t>(handle.location());
rmode_ = RelocInfo::EMBEDDED_OBJECT;
@ -266,21 +284,20 @@ const Instr kLdrStrOffsetMask = 0x00000fff;
// Spare buffer.
static const int kMinimalBufferSize = 4*KB;
static byte* spare_buffer_ = NULL;
Assembler::Assembler(void* buffer, int buffer_size)
: positions_recorder_(this),
allow_peephole_optimization_(false) {
allow_peephole_optimization_ = FLAG_peephole_optimization;
Assembler::Assembler(Isolate* arg_isolate, void* buffer, int buffer_size)
: AssemblerBase(arg_isolate),
positions_recorder_(this),
emit_debug_code_(FLAG_debug_code) {
if (buffer == NULL) {
// Do our own buffer management.
if (buffer_size <= kMinimalBufferSize) {
buffer_size = kMinimalBufferSize;
if (spare_buffer_ != NULL) {
buffer = spare_buffer_;
spare_buffer_ = NULL;
if (isolate()->assembler_spare_buffer() != NULL) {
buffer = isolate()->assembler_spare_buffer();
isolate()->set_assembler_spare_buffer(NULL);
}
}
if (buffer == NULL) {
@ -303,20 +320,22 @@ Assembler::Assembler(void* buffer, int buffer_size)
ASSERT(buffer_ != NULL);
pc_ = buffer_;
reloc_info_writer.Reposition(buffer_ + buffer_size, pc_);
num_prinfo_ = 0;
num_pending_reloc_info_ = 0;
next_buffer_check_ = 0;
const_pool_blocked_nesting_ = 0;
no_const_pool_before_ = 0;
last_const_pool_end_ = 0;
first_const_pool_use_ = -1;
last_bound_pos_ = 0;
ast_id_for_reloc_info_ = kNoASTId;
}
Assembler::~Assembler() {
ASSERT(const_pool_blocked_nesting_ == 0);
if (own_buffer_) {
if (spare_buffer_ == NULL && buffer_size_ == kMinimalBufferSize) {
spare_buffer_ = buffer_;
if (isolate()->assembler_spare_buffer() == NULL &&
buffer_size_ == kMinimalBufferSize) {
isolate()->set_assembler_spare_buffer(buffer_);
} else {
DeleteArray(buffer_);
}
@ -327,7 +346,7 @@ Assembler::~Assembler() {
void Assembler::GetCode(CodeDesc* desc) {
// Emit constant pool if necessary.
CheckConstPool(true, false);
ASSERT(num_prinfo_ == 0);
ASSERT(num_pending_reloc_info_ == 0);
// Setup code descriptor.
desc->buffer = buffer_;
@ -767,11 +786,36 @@ bool Operand::must_use_constant_pool() const {
}
bool Operand::is_single_instruction() const {
bool Operand::is_single_instruction(Instr instr) const {
if (rm_.is_valid()) return true;
if (must_use_constant_pool()) return false;
uint32_t dummy1, dummy2;
return fits_shifter(imm32_, &dummy1, &dummy2, NULL);
if (must_use_constant_pool() ||
!fits_shifter(imm32_, &dummy1, &dummy2, &instr)) {
// The immediate operand cannot be encoded as a shifter operand, or use of
// constant pool is required. For a mov instruction not setting the
// condition code additional instruction conventions can be used.
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (must_use_constant_pool() ||
!CpuFeatures::IsSupported(ARMv7)) {
// mov instruction will be an ldr from constant pool (one instruction).
return true;
} else {
// mov instruction will be a mov or movw followed by movt (two
// instructions).
return false;
}
} else {
// If this is not a mov or mvn instruction there will always an additional
// instructions - either mov or ldr. The mov might actually be two
// instructions mov or movw followed by movt so including the actual
// instruction two or three instructions will be generated.
return false;
}
} else {
// No use of constant pool and the immediate operand can be encoded as a
// shifter operand.
return true;
}
}
@ -794,7 +838,8 @@ void Assembler::addrmod1(Instr instr,
CHECK(!rn.is(ip)); // rn should never be ip, or will be trashed
Condition cond = Instruction::ConditionField(instr);
if ((instr & ~kCondMask) == 13*B21) { // mov, S not set
if (x.must_use_constant_pool() || !CpuFeatures::IsSupported(ARMv7)) {
if (x.must_use_constant_pool() ||
!CpuFeatures::IsSupported(ARMv7)) {
RecordRelocInfo(x.rmode_, x.imm32_);
ldr(rd, MemOperand(pc, 0), cond);
} else {
@ -828,7 +873,7 @@ void Assembler::addrmod1(Instr instr,
emit(instr | rn.code()*B16 | rd.code()*B12);
if (rn.is(pc) || x.rm_.is(pc)) {
// Block constant pool emission for one instruction after reading pc.
BlockConstPoolBefore(pc_offset() + kInstrSize);
BlockConstPoolFor(1);
}
}
@ -952,7 +997,7 @@ int Assembler::branch_offset(Label* L, bool jump_elimination_allowed) {
// Block the emission of the constant pool, since the branch instruction must
// be emitted at the pc offset recorded by the label.
BlockConstPoolBefore(pc_offset() + kInstrSize);
BlockConstPoolFor(1);
return target_pos - (pc_offset() + kPcLoadDelta);
}
@ -1049,20 +1094,6 @@ void Assembler::rsb(Register dst, Register src1, const Operand& src2,
void Assembler::add(Register dst, Register src1, const Operand& src2,
SBit s, Condition cond) {
addrmod1(cond | ADD | s, src1, dst, src2);
// Eliminate pattern: push(r), pop()
// str(src, MemOperand(sp, 4, NegPreIndex), al);
// add(sp, sp, Operand(kPointerSize));
// Both instructions can be eliminated.
if (can_peephole_optimize(2) &&
// Pattern.
instr_at(pc_ - 1 * kInstrSize) == kPopInstruction &&
(instr_at(pc_ - 2 * kInstrSize) & ~kRdMask) == kPushRegPattern) {
pc_ -= 2 * kInstrSize;
if (FLAG_print_peephole_optimization) {
PrintF("%x push(reg)/pop() eliminated\n", pc_offset());
}
}
}
@ -1367,195 +1398,11 @@ void Assembler::ldr(Register dst, const MemOperand& src, Condition cond) {
positions_recorder()->WriteRecordedPositions();
}
addrmod2(cond | B26 | L, dst, src);
// Eliminate pattern: push(ry), pop(rx)
// str(ry, MemOperand(sp, 4, NegPreIndex), al)
// ldr(rx, MemOperand(sp, 4, PostIndex), al)
// Both instructions can be eliminated if ry = rx.
// If ry != rx, a register copy from ry to rx is inserted
// after eliminating the push and the pop instructions.
if (can_peephole_optimize(2)) {
Instr push_instr = instr_at(pc_ - 2 * kInstrSize);
Instr pop_instr = instr_at(pc_ - 1 * kInstrSize);
if (IsPush(push_instr) && IsPop(pop_instr)) {
if (Instruction::RdValue(pop_instr) != Instruction::RdValue(push_instr)) {
// For consecutive push and pop on different registers,
// we delete both the push & pop and insert a register move.
// push ry, pop rx --> mov rx, ry
Register reg_pushed, reg_popped;
reg_pushed = GetRd(push_instr);
reg_popped = GetRd(pop_instr);
pc_ -= 2 * kInstrSize;
// Insert a mov instruction, which is better than a pair of push & pop
mov(reg_popped, reg_pushed);
if (FLAG_print_peephole_optimization) {
PrintF("%x push/pop (diff reg) replaced by a reg move\n",
pc_offset());
}
} else {
// For consecutive push and pop on the same register,
// both the push and the pop can be deleted.
pc_ -= 2 * kInstrSize;
if (FLAG_print_peephole_optimization) {
PrintF("%x push/pop (same reg) eliminated\n", pc_offset());
}
}
}
}
if (can_peephole_optimize(2)) {
Instr str_instr = instr_at(pc_ - 2 * kInstrSize);
Instr ldr_instr = instr_at(pc_ - 1 * kInstrSize);
if ((IsStrRegFpOffset(str_instr) &&
IsLdrRegFpOffset(ldr_instr)) ||
(IsStrRegFpNegOffset(str_instr) &&
IsLdrRegFpNegOffset(ldr_instr))) {
if ((ldr_instr & kLdrStrInstrArgumentMask) ==
(str_instr & kLdrStrInstrArgumentMask)) {
// Pattern: Ldr/str same fp+offset, same register.
//
// The following:
// str rx, [fp, #-12]
// ldr rx, [fp, #-12]
//
// Becomes:
// str rx, [fp, #-12]
pc_ -= 1 * kInstrSize;
if (FLAG_print_peephole_optimization) {
PrintF("%x str/ldr (fp + same offset), same reg\n", pc_offset());
}
} else if ((ldr_instr & kLdrStrOffsetMask) ==
(str_instr & kLdrStrOffsetMask)) {
// Pattern: Ldr/str same fp+offset, different register.
//
// The following:
// str rx, [fp, #-12]
// ldr ry, [fp, #-12]
//
// Becomes:
// str rx, [fp, #-12]
// mov ry, rx
Register reg_stored, reg_loaded;
reg_stored = GetRd(str_instr);
reg_loaded = GetRd(ldr_instr);
pc_ -= 1 * kInstrSize;
// Insert a mov instruction, which is better than ldr.
mov(reg_loaded, reg_stored);
if (FLAG_print_peephole_optimization) {
PrintF("%x str/ldr (fp + same offset), diff reg \n", pc_offset());
}
}
}
}
if (can_peephole_optimize(3)) {
Instr mem_write_instr = instr_at(pc_ - 3 * kInstrSize);
Instr ldr_instr = instr_at(pc_ - 2 * kInstrSize);
Instr mem_read_instr = instr_at(pc_ - 1 * kInstrSize);
if (IsPush(mem_write_instr) &&
IsPop(mem_read_instr)) {
if ((IsLdrRegFpOffset(ldr_instr) ||
IsLdrRegFpNegOffset(ldr_instr))) {
if (Instruction::RdValue(mem_write_instr) ==
Instruction::RdValue(mem_read_instr)) {
// Pattern: push & pop from/to same register,
// with a fp+offset ldr in between
//
// The following:
// str rx, [sp, #-4]!
// ldr rz, [fp, #-24]
// ldr rx, [sp], #+4
//
// Becomes:
// if(rx == rz)
// delete all
// else
// ldr rz, [fp, #-24]
if (Instruction::RdValue(mem_write_instr) ==
Instruction::RdValue(ldr_instr)) {
pc_ -= 3 * kInstrSize;
} else {
pc_ -= 3 * kInstrSize;
// Reinsert back the ldr rz.
emit(ldr_instr);
}
if (FLAG_print_peephole_optimization) {
PrintF("%x push/pop -dead ldr fp+offset in middle\n", pc_offset());
}
} else {
// Pattern: push & pop from/to different registers
// with a fp+offset ldr in between
//
// The following:
// str rx, [sp, #-4]!
// ldr rz, [fp, #-24]
// ldr ry, [sp], #+4
//
// Becomes:
// if(ry == rz)
// mov ry, rx;
// else if(rx != rz)
// ldr rz, [fp, #-24]
// mov ry, rx
// else if((ry != rz) || (rx == rz)) becomes:
// mov ry, rx
// ldr rz, [fp, #-24]
Register reg_pushed, reg_popped;
if (Instruction::RdValue(mem_read_instr) ==
Instruction::RdValue(ldr_instr)) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
mov(reg_popped, reg_pushed);
} else if (Instruction::RdValue(mem_write_instr) !=
Instruction::RdValue(ldr_instr)) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
emit(ldr_instr);
mov(reg_popped, reg_pushed);
} else if ((Instruction::RdValue(mem_read_instr) !=
Instruction::RdValue(ldr_instr)) ||
(Instruction::RdValue(mem_write_instr) ==
Instruction::RdValue(ldr_instr))) {
reg_pushed = GetRd(mem_write_instr);
reg_popped = GetRd(mem_read_instr);
pc_ -= 3 * kInstrSize;
mov(reg_popped, reg_pushed);
emit(ldr_instr);
}
if (FLAG_print_peephole_optimization) {
PrintF("%x push/pop (ldr fp+off in middle)\n", pc_offset());
}
}
}
}
}
}
void Assembler::str(Register src, const MemOperand& dst, Condition cond) {
addrmod2(cond | B26, src, dst);
// Eliminate pattern: pop(), push(r)
// add sp, sp, #4 LeaveCC, al; str r, [sp, #-4], al
// -> str r, [sp, 0], al
if (can_peephole_optimize(2) &&
// Pattern.
instr_at(pc_ - 1 * kInstrSize) == (kPushRegPattern | src.code() * B12) &&
instr_at(pc_ - 2 * kInstrSize) == kPopInstruction) {
pc_ -= 2 * kInstrSize;
emit(al | B26 | 0 | Offset | sp.code() * B16 | src.code() * B12);
if (FLAG_print_peephole_optimization) {
PrintF("%x pop()/push(reg) eliminated\n", pc_offset());
}
}
}
@ -1646,15 +1493,17 @@ void Assembler::stm(BlockAddrMode am,
void Assembler::stop(const char* msg, Condition cond, int32_t code) {
#ifndef __arm__
ASSERT(code >= kDefaultStopCode);
// The Simulator will handle the stop instruction and get the message address.
// It expects to find the address just after the svc instruction.
BlockConstPoolFor(2);
if (code >= 0) {
svc(kStopCode + code, cond);
} else {
svc(kStopCode + kMaxStopCode, cond);
{
// The Simulator will handle the stop instruction and get the message
// address. It expects to find the address just after the svc instruction.
BlockConstPoolScope block_const_pool(this);
if (code >= 0) {
svc(kStopCode + code, cond);
} else {
svc(kStopCode + kMaxStopCode, cond);
}
emit(reinterpret_cast<Instr>(msg));
}
emit(reinterpret_cast<Instr>(msg));
#else // def __arm__
#ifdef CAN_USE_ARMV5_INSTRUCTIONS
if (cond != al) {
@ -1793,45 +1642,6 @@ void Assembler::ldc2(Coprocessor coproc,
}
void Assembler::stc(Coprocessor coproc,
CRegister crd,
const MemOperand& dst,
LFlag l,
Condition cond) {
addrmod5(cond | B27 | B26 | l | coproc*B8, crd, dst);
}
void Assembler::stc(Coprocessor coproc,
CRegister crd,
Register rn,
int option,
LFlag l,
Condition cond) {
// Unindexed addressing.
ASSERT(is_uint8(option));
emit(cond | B27 | B26 | U | l | rn.code()*B16 | crd.code()*B12 |
coproc*B8 | (option & 255));
}
void Assembler::stc2(Coprocessor
coproc, CRegister crd,
const MemOperand& dst,
LFlag l) { // v5 and above
stc(coproc, crd, dst, l, kSpecialCondition);
}
void Assembler::stc2(Coprocessor coproc,
CRegister crd,
Register rn,
int option,
LFlag l) { // v5 and above
stc(coproc, crd, rn, option, l, kSpecialCondition);
}
// Support for VFP.
void Assembler::vldr(const DwVfpRegister dst,
@ -2004,6 +1814,88 @@ void Assembler::vstr(const SwVfpRegister src,
}
void Assembler::vldm(BlockAddrMode am,
Register base,
DwVfpRegister first,
DwVfpRegister last,
Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count * 2)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
void Assembler::vstm(BlockAddrMode am,
Register base,
DwVfpRegister first,
DwVfpRegister last,
Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count * 2)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
0xB*B8 | count*2);
}
void Assembler::vldm(BlockAddrMode am,
Register base,
SwVfpRegister first,
SwVfpRegister last,
Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-626.
// cond(31-28) | 110(27-25)| PUDW1(24-20) | Rbase(19-16) |
// first(15-12) | 1010(11-8) | (count/2)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
emit(cond | B27 | B26 | am | d*B22 | B20 | base.code()*B16 | sd*B12 |
0xA*B8 | count);
}
void Assembler::vstm(BlockAddrMode am,
Register base,
SwVfpRegister first,
SwVfpRegister last,
Condition cond) {
// Instruction details available in ARM DDI 0406A, A8-784.
// cond(31-28) | 110(27-25)| PUDW0(24-20) | Rbase(19-16) |
// first(15-12) | 1011(11-8) | (count/2)
ASSERT(CpuFeatures::IsEnabled(VFP3));
ASSERT_LE(first.code(), last.code());
ASSERT(am == ia || am == ia_w || am == db_w);
ASSERT(!base.is(pc));
int sd, d;
first.split_code(&sd, &d);
int count = last.code() - first.code() + 1;
emit(cond | B27 | B26 | am | d*B22 | base.code()*B16 | sd*B12 |
0xA*B8 | count);
}
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);
@ -2360,6 +2252,14 @@ void Assembler::vcvt_f32_f64(const SwVfpRegister dst,
}
void Assembler::vneg(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
emit(cond | 0xE*B24 | 0xB*B20 | B16 | dst.code()*B12 |
0x5*B9 | B8 | B6 | src.code());
}
void Assembler::vabs(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond) {
@ -2508,11 +2408,6 @@ bool Assembler::ImmediateFitsAddrMode1Instruction(int32_t imm32) {
}
void Assembler::BlockConstPoolFor(int instructions) {
BlockConstPoolBefore(pc_offset() + instructions * kInstrSize);
}
// Debugging.
void Assembler::RecordJSReturn() {
positions_recorder()->WriteRecordedPositions();
@ -2576,8 +2471,8 @@ void Assembler::GrowBuffer() {
// to relocate any emitted relocation entries.
// Relocate pending relocation entries.
for (int i = 0; i < num_prinfo_; i++) {
RelocInfo& rinfo = prinfo_[i];
for (int i = 0; i < num_pending_reloc_info_; i++) {
RelocInfo& rinfo = pending_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION);
if (rinfo.rmode() != RelocInfo::JS_RETURN) {
@ -2591,7 +2486,7 @@ void Assembler::db(uint8_t data) {
// No relocation info should be pending while using db. db is used
// to write pure data with no pointers and the constant pool should
// be emitted before using db.
ASSERT(num_prinfo_ == 0);
ASSERT(num_pending_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint8_t*>(pc_) = data;
pc_ += sizeof(uint8_t);
@ -2602,7 +2497,7 @@ void Assembler::dd(uint32_t data) {
// No relocation info should be pending while using dd. dd is used
// to write pure data with no pointers and the constant pool should
// be emitted before using dd.
ASSERT(num_prinfo_ == 0);
ASSERT(num_pending_reloc_info_ == 0);
CheckBuffer();
*reinterpret_cast<uint32_t*>(pc_) = data;
pc_ += sizeof(uint32_t);
@ -2619,11 +2514,14 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
|| RelocInfo::IsPosition(rmode));
// These modes do not need an entry in the constant pool.
} else {
ASSERT(num_prinfo_ < kMaxNumPRInfo);
prinfo_[num_prinfo_++] = rinfo;
ASSERT(num_pending_reloc_info_ < kMaxNumPendingRelocInfo);
if (num_pending_reloc_info_ == 0) {
first_const_pool_use_ = pc_offset();
}
pending_reloc_info_[num_pending_reloc_info_++] = rinfo;
// Make sure the constant pool is not emitted in place of the next
// instruction for which we just recorded relocation info.
BlockConstPoolBefore(pc_offset() + kInstrSize);
BlockConstPoolFor(1);
}
if (rinfo.rmode() != RelocInfo::NONE) {
// Don't record external references unless the heap will be serialized.
@ -2633,121 +2531,129 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
Serializer::TooLateToEnableNow();
}
#endif
if (!Serializer::enabled() && !FLAG_debug_code) {
if (!Serializer::enabled() && !emit_debug_code()) {
return;
}
}
ASSERT(buffer_space() >= kMaxRelocSize); // too late to grow buffer here
reloc_info_writer.Write(&rinfo);
if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
ASSERT(ast_id_for_reloc_info_ != kNoASTId);
RelocInfo reloc_info_with_ast_id(pc_, rmode, ast_id_for_reloc_info_);
ast_id_for_reloc_info_ = kNoASTId;
reloc_info_writer.Write(&reloc_info_with_ast_id);
} else {
reloc_info_writer.Write(&rinfo);
}
}
}
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Calculate the offset of the next check. It will be overwritten
// when a const pool is generated or when const pools are being
// blocked for a specific range.
next_buffer_check_ = pc_offset() + kCheckConstInterval;
// There is nothing to do if there are no pending relocation info entries.
if (num_prinfo_ == 0) return;
// We emit a constant pool at regular intervals of about kDistBetweenPools
// or when requested by parameter force_emit (e.g. after each function).
// We prefer not to emit a jump unless the max distance is reached or if we
// are running low on slots, which can happen if a lot of constants are being
// emitted (e.g. --debug-code and many static references).
int dist = pc_offset() - last_const_pool_end_;
if (!force_emit && dist < kMaxDistBetweenPools &&
(require_jump || dist < kDistBetweenPools) &&
// TODO(1236125): Cleanup the "magic" number below. We know that
// the code generation will test every kCheckConstIntervalInst.
// Thus we are safe as long as we generate less than 7 constant
// entries per instruction.
(num_prinfo_ < (kMaxNumPRInfo - (7 * kCheckConstIntervalInst)))) {
return;
void Assembler::BlockConstPoolFor(int instructions) {
int pc_limit = pc_offset() + instructions * kInstrSize;
if (no_const_pool_before_ < pc_limit) {
// If there are some pending entries, the constant pool cannot be blocked
// further than first_const_pool_use_ + kMaxDistToPool
ASSERT((num_pending_reloc_info_ == 0) ||
(pc_limit < (first_const_pool_use_ + kMaxDistToPool)));
no_const_pool_before_ = pc_limit;
}
// If we did not return by now, we need to emit the constant pool soon.
if (next_buffer_check_ < no_const_pool_before_) {
next_buffer_check_ = no_const_pool_before_;
}
}
// However, some small sequences of instructions must not be broken up by the
// insertion of a constant pool; such sequences are protected by setting
// either const_pool_blocked_nesting_ or no_const_pool_before_, which are
// both checked here. Also, recursive calls to CheckConstPool are blocked by
// no_const_pool_before_.
if (const_pool_blocked_nesting_ > 0 || pc_offset() < no_const_pool_before_) {
// Emission is currently blocked; make sure we try again as soon as
// possible.
if (const_pool_blocked_nesting_ > 0) {
next_buffer_check_ = pc_offset() + kInstrSize;
} else {
next_buffer_check_ = no_const_pool_before_;
}
void Assembler::CheckConstPool(bool force_emit, bool require_jump) {
// Some short sequence of instruction mustn't be broken up by constant pool
// emission, such sequences are protected by calls to BlockConstPoolFor and
// BlockConstPoolScope.
if (is_const_pool_blocked()) {
// Something is wrong if emission is forced and blocked at the same time.
ASSERT(!force_emit);
return;
}
int jump_instr = require_jump ? kInstrSize : 0;
// There is nothing to do if there are no pending constant pool entries.
if (num_pending_reloc_info_ == 0) {
// Calculate the offset of the next check.
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
return;
}
// We emit a constant pool when:
// * requested to do so by parameter force_emit (e.g. after each function).
// * the distance to the first instruction accessing the constant pool is
// kAvgDistToPool or more.
// * no jump is required and the distance to the first instruction accessing
// the constant pool is at least kMaxDistToPool / 2.
ASSERT(first_const_pool_use_ >= 0);
int dist = pc_offset() - first_const_pool_use_;
if (!force_emit && dist < kAvgDistToPool &&
(require_jump || (dist < (kMaxDistToPool / 2)))) {
return;
}
// Check that the code buffer is large enough before emitting the constant
// pool and relocation information (include the jump over the pool and the
// constant pool marker).
int max_needed_space =
jump_instr + kInstrSize + num_prinfo_*(kInstrSize + kMaxRelocSize);
while (buffer_space() <= (max_needed_space + kGap)) GrowBuffer();
// Block recursive calls to CheckConstPool.
BlockConstPoolBefore(pc_offset() + jump_instr + kInstrSize +
num_prinfo_*kInstrSize);
// Don't bother to check for the emit calls below.
next_buffer_check_ = no_const_pool_before_;
// Emit jump over constant pool if necessary.
Label after_pool;
if (require_jump) b(&after_pool);
RecordComment("[ Constant Pool");
// Put down constant pool marker "Undefined instruction" as specified by
// A3.1 Instruction set encoding.
emit(0x03000000 | num_prinfo_);
// Emit constant pool entries.
for (int i = 0; i < num_prinfo_; i++) {
RelocInfo& rinfo = prinfo_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION &&
rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
Instr instr = instr_at(rinfo.pc());
// Instruction to patch must be a ldr/str [pc, #offset].
// P and U set, B and W clear, Rn == pc, offset12 still 0.
ASSERT((instr & (7*B25 | P | U | B | W | 15*B16 | kOff12Mask)) ==
(2*B25 | P | U | pc.code()*B16));
int delta = pc_ - rinfo.pc() - 8;
ASSERT(delta >= -4); // instr could be ldr pc, [pc, #-4] followed by targ32
if (delta < 0) {
instr &= ~U;
delta = -delta;
// pool (include the jump over the pool and the constant pool marker and
// the gap to the relocation information).
int jump_instr = require_jump ? kInstrSize : 0;
int needed_space = jump_instr + kInstrSize +
num_pending_reloc_info_ * kInstrSize + kGap;
while (buffer_space() <= needed_space) GrowBuffer();
{
// Block recursive calls to CheckConstPool.
BlockConstPoolScope block_const_pool(this);
// Emit jump over constant pool if necessary.
Label after_pool;
if (require_jump) {
b(&after_pool);
}
ASSERT(is_uint12(delta));
instr_at_put(rinfo.pc(), instr + delta);
emit(rinfo.data());
}
num_prinfo_ = 0;
last_const_pool_end_ = pc_offset();
RecordComment("]");
RecordComment("[ Constant Pool");
// Put down constant pool marker "Undefined instruction" as specified by
// A5.6 (ARMv7) Instruction set encoding.
emit(kConstantPoolMarker | num_pending_reloc_info_);
// Emit constant pool entries.
for (int i = 0; i < num_pending_reloc_info_; i++) {
RelocInfo& rinfo = pending_reloc_info_[i];
ASSERT(rinfo.rmode() != RelocInfo::COMMENT &&
rinfo.rmode() != RelocInfo::POSITION &&
rinfo.rmode() != RelocInfo::STATEMENT_POSITION);
Instr instr = instr_at(rinfo.pc());
// Instruction to patch must be 'ldr rd, [pc, #offset]' with offset == 0.
ASSERT(IsLdrPcImmediateOffset(instr) &&
GetLdrRegisterImmediateOffset(instr) == 0);
int delta = pc_ - rinfo.pc() - kPcLoadDelta;
// 0 is the smallest delta:
// ldr rd, [pc, #0]
// constant pool marker
// data
ASSERT(is_uint12(delta));
instr_at_put(rinfo.pc(), SetLdrRegisterImmediateOffset(instr, delta));
emit(rinfo.data());
}
num_pending_reloc_info_ = 0;
first_const_pool_use_ = -1;
if (after_pool.is_linked()) {
bind(&after_pool);
RecordComment("]");
if (after_pool.is_linked()) {
bind(&after_pool);
}
}
// Since a constant pool was just emitted, move the check offset forward by
// the standard interval.
next_buffer_check_ = pc_offset() + kCheckConstInterval;
next_buffer_check_ = pc_offset() + kCheckPoolInterval;
}

253
deps/v8/src/arm/assembler-arm.h

@ -32,7 +32,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// A light-weight ARM Assembler
// Generates user mode instructions for the ARM architecture up to version 5
@ -72,6 +72,7 @@ namespace internal {
struct Register {
static const int kNumRegisters = 16;
static const int kNumAllocatableRegisters = 8;
static const int kSizeInBytes = 4;
static int ToAllocationIndex(Register reg) {
ASSERT(reg.code() < kNumAllocatableRegisters);
@ -166,13 +167,14 @@ struct SwVfpRegister {
// Double word VFP register.
struct DwVfpRegister {
// d0 has been excluded from allocation. This is following ia32
// where xmm0 is excluded. This should be revisited.
// Currently d0 is used as a scratch register.
// d1 has also been excluded from allocation to be used as a scratch
// register as well.
static const int kNumRegisters = 16;
static const int kNumAllocatableRegisters = 15;
// A few double registers are reserved: one as a scratch register and one to
// hold 0.0, that does not fit in the immediate field of vmov instructions.
// d14: 0.0
// d15: scratch register.
static const int kNumReservedRegisters = 2;
static const int kNumAllocatableRegisters = kNumRegisters -
kNumReservedRegisters;
static int ToAllocationIndex(DwVfpRegister reg) {
ASSERT(reg.code() != 0);
@ -187,6 +189,7 @@ struct DwVfpRegister {
static const char* AllocationIndexToString(int index) {
ASSERT(index >= 0 && index < kNumAllocatableRegisters);
const char* const names[] = {
"d0",
"d1",
"d2",
"d3",
@ -199,9 +202,7 @@ struct DwVfpRegister {
"d10",
"d11",
"d12",
"d13",
"d14",
"d15"
"d13"
};
return names[index];
}
@ -302,6 +303,11 @@ const DwVfpRegister d13 = { 13 };
const DwVfpRegister d14 = { 14 };
const DwVfpRegister d15 = { 15 };
// Aliases for double registers.
const DwVfpRegister kFirstCalleeSavedDoubleReg = d8;
const DwVfpRegister kLastCalleeSavedDoubleReg = d15;
const DwVfpRegister kDoubleRegZero = d14;
// Coprocessor register
struct CRegister {
@ -389,8 +395,11 @@ class Operand BASE_EMBEDDED {
INLINE(bool is_reg() const);
// Return true if this operand fits in one instruction so that no
// 2-instruction solution with a load into the ip register is necessary.
bool is_single_instruction() const;
// 2-instruction solution with a load into the ip register is necessary. If
// the instruction this operand is used for is a MOV or MVN instruction the
// actual instruction to use is required for this calculation. For other
// instructions instr is ignored.
bool is_single_instruction(Instr instr = 0) const;
bool must_use_constant_pool() const;
inline int32_t immediate() const {
@ -447,6 +456,7 @@ class MemOperand BASE_EMBEDDED {
Register rn() const { return rn_; }
Register rm() const { return rm_; }
AddrMode am() const { return am_; }
bool OffsetIsUint12Encodable() const {
return offset_ >= 0 ? is_uint12(offset_) : is_uint12(-offset_);
@ -469,43 +479,98 @@ class CpuFeatures : public AllStatic {
public:
// Detect features of the target CPU. Set safe defaults if the serializer
// is enabled (snapshots must be portable).
static void Probe(bool portable);
static void Probe();
// Check whether a feature is supported by the target CPU.
static bool IsSupported(CpuFeature f) {
ASSERT(initialized_);
if (f == VFP3 && !FLAG_enable_vfp3) return false;
return (supported_ & (1u << f)) != 0;
}
#ifdef DEBUG
// Check whether a feature is currently enabled.
static bool IsEnabled(CpuFeature f) {
return (enabled_ & (1u << f)) != 0;
ASSERT(initialized_);
Isolate* isolate = Isolate::UncheckedCurrent();
if (isolate == NULL) {
// When no isolate is available, work as if we're running in
// release mode.
return IsSupported(f);
}
unsigned enabled = static_cast<unsigned>(isolate->enabled_cpu_features());
return (enabled & (1u << f)) != 0;
}
#endif
// Enable a specified feature within a scope.
class Scope BASE_EMBEDDED {
#ifdef DEBUG
public:
explicit Scope(CpuFeature f) {
unsigned mask = 1u << f;
ASSERT(CpuFeatures::IsSupported(f));
ASSERT(!Serializer::enabled() ||
(found_by_runtime_probing_ & (1u << f)) == 0);
old_enabled_ = CpuFeatures::enabled_;
CpuFeatures::enabled_ |= 1u << f;
(CpuFeatures::found_by_runtime_probing_ & mask) == 0);
isolate_ = Isolate::UncheckedCurrent();
old_enabled_ = 0;
if (isolate_ != NULL) {
old_enabled_ = static_cast<unsigned>(isolate_->enabled_cpu_features());
isolate_->set_enabled_cpu_features(old_enabled_ | mask);
}
}
~Scope() { CpuFeatures::enabled_ = old_enabled_; }
~Scope() {
ASSERT_EQ(Isolate::UncheckedCurrent(), isolate_);
if (isolate_ != NULL) {
isolate_->set_enabled_cpu_features(old_enabled_);
}
}
private:
Isolate* isolate_;
unsigned old_enabled_;
#else
public:
explicit Scope(CpuFeature f) {}
#endif
};
class TryForceFeatureScope BASE_EMBEDDED {
public:
explicit TryForceFeatureScope(CpuFeature f)
: old_supported_(CpuFeatures::supported_) {
if (CanForce()) {
CpuFeatures::supported_ |= (1u << f);
}
}
~TryForceFeatureScope() {
if (CanForce()) {
CpuFeatures::supported_ = old_supported_;
}
}
private:
static bool CanForce() {
// It's only safe to temporarily force support of CPU features
// when there's only a single isolate, which is guaranteed when
// the serializer is enabled.
return Serializer::enabled();
}
const unsigned old_supported_;
};
private:
#ifdef DEBUG
static bool initialized_;
#endif
static unsigned supported_;
static unsigned enabled_;
static unsigned found_by_runtime_probing_;
DISALLOW_COPY_AND_ASSIGN(CpuFeatures);
};
@ -533,7 +598,7 @@ extern const Instr kAndBicFlip;
class Assembler : public Malloced {
class Assembler : public AssemblerBase {
public:
// Create an assembler. Instructions and relocation information are emitted
// into a buffer, with the instructions starting from the beginning and the
@ -548,9 +613,12 @@ class Assembler : public Malloced {
// for code generation and assumes its size to be buffer_size. If the buffer
// is too small, a fatal error occurs. No deallocation of the buffer is done
// upon destruction of the assembler.
Assembler(void* buffer, int buffer_size);
Assembler(Isolate* isolate, void* buffer, int buffer_size);
~Assembler();
// Overrides the default provided by FLAG_debug_code.
void set_emit_debug_code(bool value) { emit_debug_code_ = value; }
// GetCode emits any pending (non-emitted) code and fills the descriptor
// desc. GetCode() is idempotent; it returns the same result if no other
// Assembler functions are invoked in between GetCode() calls.
@ -889,16 +957,6 @@ class Assembler : public Malloced {
void ldc2(Coprocessor coproc, CRegister crd, Register base, int option,
LFlag l = Short); // v5 and above
void stc(Coprocessor coproc, CRegister crd, const MemOperand& dst,
LFlag l = Short, Condition cond = al);
void stc(Coprocessor coproc, CRegister crd, Register base, int option,
LFlag l = Short, Condition cond = al);
void stc2(Coprocessor coproc, CRegister crd, const MemOperand& dst,
LFlag l = Short); // v5 and above
void stc2(Coprocessor coproc, CRegister crd, Register base, int option,
LFlag l = Short); // v5 and above
// Support for VFP.
// All these APIs support S0 to S31 and D0 to D15.
// Currently these APIs do not support extended D registers, i.e, D16 to D31.
@ -937,6 +995,30 @@ class Assembler : public Malloced {
const MemOperand& dst,
const Condition cond = al);
void vldm(BlockAddrMode am,
Register base,
DwVfpRegister first,
DwVfpRegister last,
Condition cond = al);
void vstm(BlockAddrMode am,
Register base,
DwVfpRegister first,
DwVfpRegister last,
Condition cond = al);
void vldm(BlockAddrMode am,
Register base,
SwVfpRegister first,
SwVfpRegister last,
Condition cond = al);
void vstm(BlockAddrMode am,
Register base,
SwVfpRegister first,
SwVfpRegister last,
Condition cond = al);
void vmov(const DwVfpRegister dst,
double imm,
const Condition cond = al);
@ -989,6 +1071,9 @@ class Assembler : public Malloced {
VFPConversionMode mode = kDefaultRoundToZero,
const Condition cond = al);
void vneg(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
void vabs(const DwVfpRegister dst,
const DwVfpRegister src,
const Condition cond = al);
@ -1079,10 +1164,6 @@ class Assembler : public Malloced {
DISALLOW_IMPLICIT_CONSTRUCTORS(BlockConstPoolScope);
};
// Postpone the generation of the constant pool for the specified number of
// instructions.
void BlockConstPoolFor(int instructions);
// Debugging
// Mark address of the ExitJSFrame code.
@ -1091,6 +1172,10 @@ class Assembler : public Malloced {
// Mark address of a debug break slot.
void RecordDebugBreakSlot();
// Record the AST id of the CallIC being compiled, so that it can be placed
// in the relocation information.
void RecordAstId(unsigned ast_id) { ast_id_for_reloc_info_ = ast_id; }
// Record a comment relocation entry that can be used by a disassembler.
// Use --code-comments to enable.
void RecordComment(const char* msg);
@ -1106,12 +1191,6 @@ class Assembler : public Malloced {
PositionsRecorder* positions_recorder() { return &positions_recorder_; }
bool can_peephole_optimize(int instructions) {
if (!allow_peephole_optimization_) return false;
if (last_bound_pos_ > pc_offset() - instructions * kInstrSize) return false;
return reloc_info_writer.last_pc() <= pc_ - instructions * kInstrSize;
}
// Read/patch instructions
static Instr instr_at(byte* pc) { return *reinterpret_cast<Instr*>(pc); }
static void instr_at_put(byte* pc, Instr instr) {
@ -1144,10 +1223,27 @@ class Assembler : public Malloced {
static int GetCmpImmediateRawImmediate(Instr instr);
static bool IsNop(Instr instr, int type = NON_MARKING_NOP);
// Check if is time to emit a constant pool for pending reloc info entries
// Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB thereby defining a maximum distance between the instruction
// and the accessed constant.
static const int kMaxDistToPool = 4*KB;
static const int kMaxNumPendingRelocInfo = kMaxDistToPool/kInstrSize;
// Postpone the generation of the constant pool for the specified number of
// instructions.
void BlockConstPoolFor(int instructions);
// Check if is time to emit a constant pool.
void CheckConstPool(bool force_emit, bool require_jump);
protected:
// Relocation for a type-recording IC has the AST id added to it. This
// member variable is a way to pass the information from the call site to
// the relocation info.
unsigned ast_id_for_reloc_info_;
bool emit_debug_code() const { return emit_debug_code_; }
int buffer_space() const { return reloc_info_writer.pos() - pc_; }
// Read/patch instructions
@ -1162,18 +1258,37 @@ class Assembler : public Malloced {
// Patch branch instruction at pos to branch to given branch target pos
void target_at_put(int pos, int target_pos);
// Block the emission of the constant pool before pc_offset
void BlockConstPoolBefore(int pc_offset) {
if (no_const_pool_before_ < pc_offset) no_const_pool_before_ = pc_offset;
}
// Prevent contant pool emission until EndBlockConstPool is called.
// Call to this function can be nested but must be followed by an equal
// number of call to EndBlockConstpool.
void StartBlockConstPool() {
const_pool_blocked_nesting_++;
if (const_pool_blocked_nesting_++ == 0) {
// Prevent constant pool checks happening by setting the next check to
// the biggest possible offset.
next_buffer_check_ = kMaxInt;
}
}
// Resume constant pool emission. Need to be called as many time as
// StartBlockConstPool to have an effect.
void EndBlockConstPool() {
const_pool_blocked_nesting_--;
if (--const_pool_blocked_nesting_ == 0) {
// Check the constant pool hasn't been blocked for too long.
ASSERT((num_pending_reloc_info_ == 0) ||
(pc_offset() < (first_const_pool_use_ + kMaxDistToPool)));
// Two cases:
// * no_const_pool_before_ >= next_buffer_check_ and the emission is
// still blocked
// * no_const_pool_before_ < next_buffer_check_ and the next emit will
// trigger a check.
next_buffer_check_ = no_const_pool_before_;
}
}
bool is_const_pool_blocked() const {
return (const_pool_blocked_nesting_ > 0) ||
(pc_offset() < no_const_pool_before_);
}
bool is_const_pool_blocked() const { return const_pool_blocked_nesting_ > 0; }
private:
// Code buffer:
@ -1183,9 +1298,6 @@ class Assembler : public Malloced {
// True if the assembler owns the buffer, false if buffer is external.
bool own_buffer_;
// Buffer size and constant pool distance are checked together at regular
// intervals of kBufferCheckInterval emitted bytes
static const int kBufferCheckInterval = 1*KB/2;
int next_buffer_check_; // pc offset of next buffer check
// Code generation
@ -1210,40 +1322,41 @@ class Assembler : public Malloced {
// expensive. By default we only check again once a number of instructions
// has been generated. That also means that the sizing of the buffers is not
// an exact science, and that we rely on some slop to not overrun buffers.
static const int kCheckConstIntervalInst = 32;
static const int kCheckConstInterval = kCheckConstIntervalInst * kInstrSize;
static const int kCheckPoolIntervalInst = 32;
static const int kCheckPoolInterval = kCheckPoolIntervalInst * kInstrSize;
// Pools are emitted after function return and in dead code at (more or less)
// regular intervals of kDistBetweenPools bytes
static const int kDistBetweenPools = 1*KB;
// Constants in pools are accessed via pc relative addressing, which can
// reach +/-4KB thereby defining a maximum distance between the instruction
// and the accessed constant. We satisfy this constraint by limiting the
// distance between pools.
static const int kMaxDistBetweenPools = 4*KB - 2*kBufferCheckInterval;
// Average distance beetween a constant pool and the first instruction
// accessing the constant pool. Longer distance should result in less I-cache
// pollution.
// In practice the distance will be smaller since constant pool emission is
// forced after function return and sometimes after unconditional branches.
static const int kAvgDistToPool = kMaxDistToPool - kCheckPoolInterval;
// Emission of the constant pool may be blocked in some code sequences.
int const_pool_blocked_nesting_; // Block emission if this is not zero.
int no_const_pool_before_; // Block emission before this pc offset.
// Keep track of the last emitted pool to guarantee a maximal distance
int last_const_pool_end_; // pc offset following the last constant pool
// Keep track of the first instruction requiring a constant pool entry
// since the previous constant pool was emitted.
int first_const_pool_use_;
// Relocation info generation
// Each relocation is encoded as a variable size value
static const int kMaxRelocSize = RelocInfoWriter::kMaxSize;
RelocInfoWriter reloc_info_writer;
// Relocation info records are also used during code generation as temporary
// containers for constants and code target addresses until they are emitted
// to the constant pool. These pending relocation info records are temporarily
// stored in a separate buffer until a constant pool is emitted.
// If every instruction in a long sequence is accessing the pool, we need one
// pending relocation entry per instruction.
static const int kMaxNumPRInfo = kMaxDistBetweenPools/kInstrSize;
RelocInfo prinfo_[kMaxNumPRInfo]; // the buffer of pending relocation info
int num_prinfo_; // number of pending reloc info entries in the buffer
// the buffer of pending relocation info
RelocInfo pending_reloc_info_[kMaxNumPendingRelocInfo];
// number of pending reloc info entries in the buffer
int num_pending_reloc_info_;
// The bound position, before this we cannot do instruction elimination.
int last_bound_pos_;
@ -1275,7 +1388,7 @@ class Assembler : public Malloced {
friend class BlockConstPoolScope;
PositionsRecorder positions_recorder_;
bool allow_peephole_optimization_;
bool emit_debug_code_;
friend class PositionsRecorder;
friend class EnsureSpace;
};

170
deps/v8/src/arm/builtins-arm.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen-inl.h"
#include "codegen.h"
#include "debug.h"
#include "deoptimizer.h"
#include "full-codegen.h"
@ -68,7 +68,7 @@ void Builtins::Generate_Adaptor(MacroAssembler* masm,
// JumpToExternalReference expects r0 to contain the number of arguments
// including the receiver and the extra arguments.
__ add(r0, r0, Operand(num_extra_args + 1));
__ JumpToExternalReference(ExternalReference(id));
__ JumpToExternalReference(ExternalReference(id, masm->isolate()));
}
@ -310,6 +310,7 @@ static void AllocateJSArray(MacroAssembler* masm,
// construct call and normal call.
static void ArrayNativeCode(MacroAssembler* masm,
Label* call_generic_code) {
Counters* counters = masm->isolate()->counters();
Label argc_one_or_more, argc_two_or_more;
// Check for array construction with zero arguments or one.
@ -325,7 +326,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
r5,
JSArray::kPreallocatedArrayElements,
call_generic_code);
__ IncrementCounter(&Counters::array_function_native, 1, r3, r4);
__ IncrementCounter(counters->array_function_native(), 1, r3, r4);
// Setup return value, remove receiver from stack and return.
__ mov(r0, r2);
__ add(sp, sp, Operand(kPointerSize));
@ -361,7 +362,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
r7,
true,
call_generic_code);
__ IncrementCounter(&Counters::array_function_native, 1, r2, r4);
__ IncrementCounter(counters->array_function_native(), 1, r2, r4);
// Setup return value, remove receiver and argument from stack and return.
__ mov(r0, r3);
__ add(sp, sp, Operand(2 * kPointerSize));
@ -385,7 +386,7 @@ static void ArrayNativeCode(MacroAssembler* masm,
r7,
false,
call_generic_code);
__ IncrementCounter(&Counters::array_function_native, 1, r2, r6);
__ IncrementCounter(counters->array_function_native(), 1, r2, r6);
// Fill arguments as array elements. Copy from the top of the stack (last
// element) to the array backing store filling it backwards. Note:
@ -428,7 +429,7 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
GenerateLoadArrayFunction(masm, r1);
if (FLAG_debug_code) {
// Initial map for the builtin Array function shoud be a map.
// Initial map for the builtin Array functions should be maps.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ tst(r2, Operand(kSmiTagMask));
__ Assert(ne, "Unexpected initial map for Array function");
@ -442,8 +443,9 @@ void Builtins::Generate_ArrayCode(MacroAssembler* masm) {
// Jump to the generic array code if the specialized code cannot handle
// the construction.
__ bind(&generic_array_code);
Code* code = Builtins::builtin(Builtins::ArrayCodeGeneric);
Handle<Code> array_code(code);
Handle<Code> array_code =
masm->isolate()->builtins()->ArrayCodeGeneric();
__ Jump(array_code, RelocInfo::CODE_TARGET);
}
@ -458,11 +460,8 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
Label generic_constructor;
if (FLAG_debug_code) {
// The array construct code is only set for the builtin Array function which
// always have a map.
GenerateLoadArrayFunction(masm, r2);
__ cmp(r1, r2);
__ Assert(eq, "Unexpected Array function");
// The array construct code is only set for the builtin and internal
// Array functions which always have a map.
// Initial map for the builtin Array function should be a map.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ tst(r2, Operand(kSmiTagMask));
@ -477,8 +476,8 @@ void Builtins::Generate_ArrayConstructCode(MacroAssembler* masm) {
// Jump to the generic construct code in case the specialized code cannot
// handle the construction.
__ bind(&generic_constructor);
Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
Handle<Code> generic_construct_stub(code);
Handle<Code> generic_construct_stub =
masm->isolate()->builtins()->JSConstructStubGeneric();
__ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
}
@ -491,7 +490,8 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// -- sp[(argc - n - 1) * 4] : arg[n] (zero based)
// -- sp[argc * 4] : receiver
// -----------------------------------
__ IncrementCounter(&Counters::string_ctor_calls, 1, r2, r3);
Counters* counters = masm->isolate()->counters();
__ IncrementCounter(counters->string_ctor_calls(), 1, r2, r3);
Register function = r1;
if (FLAG_debug_code) {
@ -521,7 +521,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
r5, // Scratch.
false, // Is it a Smi?
&not_cached);
__ IncrementCounter(&Counters::string_ctor_cached_number, 1, r3, r4);
__ IncrementCounter(counters->string_ctor_cached_number(), 1, r3, r4);
__ bind(&argument_is_string);
// ----------- S t a t e -------------
@ -575,16 +575,16 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
__ tst(r3, Operand(kIsNotStringMask));
__ b(ne, &convert_argument);
__ mov(argument, r0);
__ IncrementCounter(&Counters::string_ctor_conversions, 1, r3, r4);
__ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
__ b(&argument_is_string);
// Invoke the conversion builtin and put the result into r2.
__ bind(&convert_argument);
__ push(function); // Preserve the function.
__ IncrementCounter(&Counters::string_ctor_conversions, 1, r3, r4);
__ IncrementCounter(counters->string_ctor_conversions(), 1, r3, r4);
__ EnterInternalFrame();
__ push(r0);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_JS);
__ InvokeBuiltin(Builtins::TO_STRING, CALL_FUNCTION);
__ LeaveInternalFrame();
__ pop(function);
__ mov(argument, r0);
@ -600,7 +600,7 @@ void Builtins::Generate_StringConstructCode(MacroAssembler* masm) {
// At this point the argument is already a string. Call runtime to
// create a string wrapper.
__ bind(&gc_required);
__ IncrementCounter(&Counters::string_ctor_gc_required, 1, r3, r4);
__ IncrementCounter(counters->string_ctor_gc_required(), 1, r3, r4);
__ EnterInternalFrame();
__ push(argument);
__ CallRuntime(Runtime::kNewStringWrapper, 1);
@ -619,8 +619,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
Label non_function_call;
// Check that the function is not a smi.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &non_function_call);
__ JumpIfSmi(r1, &non_function_call);
// Check that the function is a JSFunction.
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &non_function_call);
@ -636,7 +635,8 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
__ SetCallKind(r5, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
}
@ -647,6 +647,8 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Should never count constructions for api objects.
ASSERT(!is_api_function || !count_constructions);
Isolate* isolate = masm->isolate();
// Enter a construct frame.
__ EnterConstructFrame();
@ -662,7 +664,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address();
ExternalReference::debug_step_in_fp_address(isolate);
__ mov(r2, Operand(debug_step_in_fp));
__ ldr(r2, MemOperand(r2));
__ tst(r2, r2);
@ -672,8 +674,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// Load the initial map and verify that it is in fact a map.
// r1: constructor function
__ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset));
__ tst(r2, Operand(kSmiTagMask));
__ b(eq, &rt_call);
__ JumpIfSmi(r2, &rt_call);
__ CompareObjectType(r2, r3, r4, MAP_TYPE);
__ b(ne, &rt_call);
@ -908,14 +909,15 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// r1: constructor function
if (is_api_function) {
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
Handle<Code> code = Handle<Code>(
Builtins::builtin(Builtins::HandleApiCallConstruct));
Handle<Code> code =
masm->isolate()->builtins()->HandleApiCallConstruct();
ParameterCount expected(0);
__ InvokeCode(code, expected, expected,
RelocInfo::CODE_TARGET, CALL_FUNCTION);
RelocInfo::CODE_TARGET, CALL_FUNCTION, CALL_AS_METHOD);
} else {
ParameterCount actual(r0);
__ InvokeFunction(r1, actual, CALL_FUNCTION);
__ InvokeFunction(r1, actual, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
}
// Pop the function from the stack.
@ -942,12 +944,11 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
// sp[0]: receiver (newly allocated object)
// sp[1]: constructor function
// sp[2]: number of arguments (smi-tagged)
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &use_receiver);
__ JumpIfSmi(r0, &use_receiver);
// If the type of the result (stored in its map) is less than
// FIRST_JS_OBJECT_TYPE, it is not an object in the ECMA sense.
__ CompareObjectType(r0, r3, r3, FIRST_JS_OBJECT_TYPE);
// FIRST_SPEC_OBJECT_TYPE, it is not an object in the ECMA sense.
__ CompareObjectType(r0, r3, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &exit);
// Throw away the result of the constructor invocation and use the
@ -966,7 +967,7 @@ static void Generate_JSConstructStubHelper(MacroAssembler* masm,
__ LeaveConstructFrame();
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize));
__ IncrementCounter(&Counters::constructed_objects, 1, r1, r2);
__ IncrementCounter(isolate->counters()->constructed_objects(), 1, r1, r2);
__ Jump(lr);
}
@ -1006,7 +1007,8 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
// Set up the roots register.
ExternalReference roots_address = ExternalReference::roots_address();
ExternalReference roots_address =
ExternalReference::roots_address(masm->isolate());
__ mov(r10, Operand(roots_address));
// Push the function and the receiver onto the stack.
@ -1042,11 +1044,12 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3));
if (is_construct) {
__ Call(Handle<Code>(Builtins::builtin(Builtins::JSConstructCall)),
__ Call(masm->isolate()->builtins()->JSConstructCall(),
RelocInfo::CODE_TARGET);
} else {
ParameterCount actual(r0);
__ InvokeFunction(r1, actual, CALL_FUNCTION);
__ InvokeFunction(r1, actual, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
}
// Exit the JS frame and remove the parameters (except function), and return.
@ -1074,12 +1077,17 @@ void Builtins::Generate_LazyCompile(MacroAssembler* masm) {
// Preserve the function.
__ push(r1);
// Push call kind information.
__ push(r5);
// Push the function on the stack as the argument to the runtime function.
__ push(r1);
__ CallRuntime(Runtime::kLazyCompile, 1);
// Calculate the entry point.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore call kind information.
__ pop(r5);
// Restore saved function.
__ pop(r1);
@ -1097,12 +1105,17 @@ void Builtins::Generate_LazyRecompile(MacroAssembler* masm) {
// Preserve the function.
__ push(r1);
// Push call kind information.
__ push(r5);
// Push the function on the stack as the argument to the runtime function.
__ push(r1);
__ CallRuntime(Runtime::kLazyRecompile, 1);
// Calculate the entry point.
__ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag));
// Restore call kind information.
__ pop(r5);
// Restore saved function.
__ pop(r1);
@ -1170,9 +1183,11 @@ void Builtins::Generate_NotifyOSR(MacroAssembler* masm) {
void Builtins::Generate_OnStackReplacement(MacroAssembler* masm) {
// Probe the CPU to set the supported features, because this builtin
// may be called before the initialization performs CPU setup.
CpuFeatures::Probe(false);
CpuFeatures::TryForceFeatureScope scope(VFP3);
if (!CpuFeatures::IsSupported(VFP3)) {
__ Abort("Unreachable code: Cannot optimize without VFP3 support.");
return;
}
// Lookup the function in the JavaScript frame and push it as an
// argument to the on-stack replacement function.
@ -1218,8 +1233,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// r0: actual number of arguments
Label non_function;
__ ldr(r1, MemOperand(sp, r0, LSL, kPointerSizeLog2));
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &non_function);
__ JumpIfSmi(r1, &non_function);
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &non_function);
@ -1233,31 +1247,33 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Do not transform the receiver for strict mode functions.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
__ ldr(r3, FieldMemOperand(r2, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r3, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
kSmiTagSize)));
__ b(ne, &shift_arguments);
// Do not transform the receiver for native (Compilerhints already in r3).
__ tst(r3, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &shift_arguments);
// Compute the receiver in non-strict mode.
__ add(r2, sp, Operand(r0, LSL, kPointerSizeLog2));
__ ldr(r2, MemOperand(r2, -kPointerSize));
// r0: actual number of arguments
// r1: function
// r2: first argument
__ tst(r2, Operand(kSmiTagMask));
__ b(eq, &convert_to_object);
__ JumpIfSmi(r2, &convert_to_object);
__ LoadRoot(r3, Heap::kNullValueRootIndex);
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ cmp(r2, r3);
__ b(eq, &use_global_receiver);
__ LoadRoot(r3, Heap::kUndefinedValueRootIndex);
__ LoadRoot(r3, Heap::kNullValueRootIndex);
__ cmp(r2, r3);
__ b(eq, &use_global_receiver);
__ CompareObjectType(r2, r3, r3, FIRST_JS_OBJECT_TYPE);
__ b(lt, &convert_to_object);
__ cmp(r3, Operand(LAST_JS_OBJECT_TYPE));
__ b(le, &shift_arguments);
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ CompareObjectType(r2, r3, r3, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &shift_arguments);
__ bind(&convert_to_object);
__ EnterInternalFrame(); // In order to preserve argument count.
@ -1265,7 +1281,7 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
__ push(r0);
__ push(r2);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ mov(r2, r0);
__ pop(r0);
@ -1335,8 +1351,9 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
// Expected number of arguments is 0 for CALL_NON_FUNCTION.
__ mov(r2, Operand(0, RelocInfo::NONE));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
__ SetCallKind(r5, CALL_AS_METHOD);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET);
__ bind(&function);
}
@ -1350,12 +1367,15 @@ void Builtins::Generate_FunctionCall(MacroAssembler* masm) {
FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset));
__ mov(r2, Operand(r2, ASR, kSmiTagSize));
__ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
__ SetCallKind(r5, CALL_AS_METHOD);
__ cmp(r2, r0); // Check formal and actual parameter counts.
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET, ne);
__ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(),
RelocInfo::CODE_TARGET,
ne);
ParameterCount expected(0);
__ InvokeCode(r3, expected, expected, JUMP_FUNCTION);
__ InvokeCode(r3, expected, expected, JUMP_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
}
@ -1372,7 +1392,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(r0);
__ ldr(r0, MemOperand(fp, kArgsOffset)); // get the args array
__ push(r0);
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_JS);
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_FUNCTION);
// Check the stack for overflow. We are not trying need to catch
// interruptions (e.g. debug break and preemption) here, so the "real stack
@ -1390,7 +1410,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ push(r1);
__ push(r0);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_JS);
__ InvokeBuiltin(Builtins::APPLY_OVERFLOW, CALL_FUNCTION);
// End of stack check.
// Push current limit and index.
@ -1410,14 +1430,17 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ ldr(r0, MemOperand(fp, kRecvOffset));
// Do not transform the receiver for strict mode functions.
__ ldr(r1, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r1, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
__ ldr(r2, FieldMemOperand(r1, SharedFunctionInfo::kCompilerHintsOffset));
__ tst(r2, Operand(1 << (SharedFunctionInfo::kStrictModeFunction +
kSmiTagSize)));
__ b(ne, &push_receiver);
// Do not transform the receiver for strict mode functions.
__ tst(r2, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize)));
__ b(ne, &push_receiver);
// Compute the receiver in non-strict mode.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &call_to_object);
__ JumpIfSmi(r0, &call_to_object);
__ LoadRoot(r1, Heap::kNullValueRootIndex);
__ cmp(r0, r1);
__ b(eq, &use_global_receiver);
@ -1427,16 +1450,15 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Check if the receiver is already a JavaScript object.
// r0: receiver
__ CompareObjectType(r0, r1, r1, FIRST_JS_OBJECT_TYPE);
__ b(lt, &call_to_object);
__ cmp(r1, Operand(LAST_JS_OBJECT_TYPE));
__ b(le, &push_receiver);
STATIC_ASSERT(LAST_SPEC_OBJECT_TYPE == LAST_TYPE);
__ CompareObjectType(r0, r1, r1, FIRST_SPEC_OBJECT_TYPE);
__ b(ge, &push_receiver);
// Convert the receiver to a regular object.
// r0: receiver
__ bind(&call_to_object);
__ push(r0);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
__ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION);
__ b(&push_receiver);
// Use the current global receiver object as the receiver.
@ -1486,7 +1508,8 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
ParameterCount actual(r0);
__ mov(r0, Operand(r0, ASR, kSmiTagSize));
__ ldr(r1, MemOperand(fp, kFunctionOffset));
__ InvokeFunction(r1, actual, CALL_FUNCTION);
__ InvokeFunction(r1, actual, CALL_FUNCTION,
NullCallWrapper(), CALL_AS_METHOD);
// Tear down the internal frame and remove function, receiver and args.
__ LeaveInternalFrame();
@ -1523,6 +1546,7 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// -- r1 : function (passed through to callee)
// -- r2 : expected number of arguments
// -- r3 : code entry to call
// -- r5 : call kind information
// -----------------------------------
Label invoke, dont_adapt_arguments;

3702
deps/v8/src/arm/code-stubs-arm.cc

File diff suppressed because it is too large

615
deps/v8/src/arm/code-stubs-arm.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -38,202 +38,128 @@ namespace internal {
// TranscendentalCache runtime function.
class TranscendentalCacheStub: public CodeStub {
public:
explicit TranscendentalCacheStub(TranscendentalCache::Type type)
: type_(type) {}
enum ArgumentType {
TAGGED = 0 << TranscendentalCache::kTranscendentalTypeBits,
UNTAGGED = 1 << TranscendentalCache::kTranscendentalTypeBits
};
TranscendentalCacheStub(TranscendentalCache::Type type,
ArgumentType argument_type)
: type_(type), argument_type_(argument_type) { }
void Generate(MacroAssembler* masm);
private:
TranscendentalCache::Type type_;
ArgumentType argument_type_;
void GenerateCallCFunction(MacroAssembler* masm, Register scratch);
Major MajorKey() { return TranscendentalCache; }
int MinorKey() { return type_; }
int MinorKey() { return type_ | argument_type_; }
Runtime::FunctionId RuntimeFunction();
};
class ToBooleanStub: public CodeStub {
public:
explicit ToBooleanStub(Register tos) : tos_(tos) { }
void Generate(MacroAssembler* masm);
private:
Register tos_;
Major MajorKey() { return ToBoolean; }
int MinorKey() { return tos_.code(); }
};
class GenericBinaryOpStub : public CodeStub {
class UnaryOpStub: public CodeStub {
public:
static const int kUnknownIntValue = -1;
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode,
Register lhs,
Register rhs,
int constant_rhs = kUnknownIntValue)
UnaryOpStub(Token::Value op, UnaryOverwriteMode mode)
: op_(op),
mode_(mode),
lhs_(lhs),
rhs_(rhs),
constant_rhs_(constant_rhs),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op, constant_rhs)),
runtime_operands_type_(BinaryOpIC::UNINIT_OR_SMI),
name_(NULL) { }
operand_type_(UnaryOpIC::UNINITIALIZED),
name_(NULL) {
}
GenericBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info)
UnaryOpStub(
int key,
UnaryOpIC::TypeInfo operand_type)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
lhs_(LhsRegister(RegisterBits::decode(key))),
rhs_(RhsRegister(RegisterBits::decode(key))),
constant_rhs_(KnownBitsForMinorKey(KnownIntBits::decode(key))),
specialized_on_rhs_(RhsIsOneWeWantToOptimizeFor(op_, constant_rhs_)),
runtime_operands_type_(type_info),
name_(NULL) { }
operand_type_(operand_type),
name_(NULL) {
}
private:
Token::Value op_;
OverwriteMode mode_;
Register lhs_;
Register rhs_;
int constant_rhs_;
bool specialized_on_rhs_;
BinaryOpIC::TypeInfo runtime_operands_type_;
char* name_;
static const int kMaxKnownRhs = 0x40000000;
static const int kKnownRhsKeyBits = 6;
UnaryOverwriteMode mode_;
// Minor key encoding in 17 bits.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 6> {};
class TypeInfoBits: public BitField<int, 8, 3> {};
class RegisterBits: public BitField<bool, 11, 1> {};
class KnownIntBits: public BitField<int, 12, kKnownRhsKeyBits> {};
// Operand type information determined at runtime.
UnaryOpIC::TypeInfo operand_type_;
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() {
ASSERT((lhs_.is(r0) && rhs_.is(r1)) ||
(lhs_.is(r1) && rhs_.is(r0)));
// Encode the parameters in a unique 18 bit value.
return OpBits::encode(op_)
| ModeBits::encode(mode_)
| KnownIntBits::encode(MinorKeyForKnownInt())
| TypeInfoBits::encode(runtime_operands_type_)
| RegisterBits::encode(lhs_.is(r0));
}
char* name_;
void Generate(MacroAssembler* masm);
void HandleNonSmiBitwiseOp(MacroAssembler* masm,
Register lhs,
Register rhs);
void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi,
Register lhs,
Register rhs,
const Builtins::JavaScript& builtin);
void GenerateTypeTransition(MacroAssembler* masm);
const char* GetName();
static bool RhsIsOneWeWantToOptimizeFor(Token::Value op, int constant_rhs) {
if (constant_rhs == kUnknownIntValue) return false;
if (op == Token::DIV) return constant_rhs >= 2 && constant_rhs <= 3;
if (op == Token::MOD) {
if (constant_rhs <= 1) return false;
if (constant_rhs <= 10) return true;
if (constant_rhs <= kMaxKnownRhs && IsPowerOf2(constant_rhs)) return true;
return false;
}
return false;
#ifdef DEBUG
void Print() {
PrintF("UnaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
UnaryOpIC::GetName(operand_type_));
}
#endif
int MinorKeyForKnownInt() {
if (!specialized_on_rhs_) return 0;
if (constant_rhs_ <= 10) return constant_rhs_ + 1;
ASSERT(IsPowerOf2(constant_rhs_));
int key = 12;
int d = constant_rhs_;
while ((d & 1) == 0) {
key++;
d >>= 1;
}
ASSERT(key >= 0 && key < (1 << kKnownRhsKeyBits));
return key;
}
class ModeBits: public BitField<UnaryOverwriteMode, 0, 1> {};
class OpBits: public BitField<Token::Value, 1, 7> {};
class OperandTypeInfoBits: public BitField<UnaryOpIC::TypeInfo, 8, 3> {};
int KnownBitsForMinorKey(int key) {
if (!key) return 0;
if (key <= 11) return key - 1;
int d = 1;
while (key != 12) {
key--;
d <<= 1;
}
return d;
Major MajorKey() { return UnaryOp; }
int MinorKey() {
return ModeBits::encode(mode_)
| OpBits::encode(op_)
| OperandTypeInfoBits::encode(operand_type_);
}
Register LhsRegister(bool lhs_is_r0) {
return lhs_is_r0 ? r0 : r1;
}
// Note: A lot of the helper functions below will vanish when we use virtual
// function instead of switch more often.
void Generate(MacroAssembler* masm);
Register RhsRegister(bool lhs_is_r0) {
return lhs_is_r0 ? r1 : r0;
}
void GenerateTypeTransition(MacroAssembler* masm);
bool HasSmiSmiFastPath() {
return op_ != Token::DIV;
}
void GenerateSmiStub(MacroAssembler* masm);
void GenerateSmiStubSub(MacroAssembler* masm);
void GenerateSmiStubBitNot(MacroAssembler* masm);
void GenerateSmiCodeSub(MacroAssembler* masm, Label* non_smi, Label* slow);
void GenerateSmiCodeBitNot(MacroAssembler* masm, Label* slow);
bool ShouldGenerateSmiCode() {
return ((op_ != Token::DIV && op_ != Token::MOD) || specialized_on_rhs_) &&
runtime_operands_type_ != BinaryOpIC::HEAP_NUMBERS &&
runtime_operands_type_ != BinaryOpIC::STRINGS;
}
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateHeapNumberStubSub(MacroAssembler* masm);
void GenerateHeapNumberStubBitNot(MacroAssembler* masm);
void GenerateHeapNumberCodeSub(MacroAssembler* masm, Label* slow);
void GenerateHeapNumberCodeBitNot(MacroAssembler* masm, Label* slow);
bool ShouldGenerateFPCode() {
return runtime_operands_type_ != BinaryOpIC::STRINGS;
}
void GenerateGenericStub(MacroAssembler* masm);
void GenerateGenericStubSub(MacroAssembler* masm);
void GenerateGenericStubBitNot(MacroAssembler* masm);
void GenerateGenericCodeFallback(MacroAssembler* masm);
virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual int GetCodeKind() { return Code::UNARY_OP_IC; }
virtual InlineCacheState GetICState() {
return BinaryOpIC::ToState(runtime_operands_type_);
return UnaryOpIC::ToState(operand_type_);
}
const char* GetName();
virtual void FinishCode(Code* code) {
code->set_binary_op_type(runtime_operands_type_);
code->set_unary_op_type(operand_type_);
}
#ifdef DEBUG
void Print() {
if (!specialized_on_rhs_) {
PrintF("GenericBinaryOpStub (%s)\n", Token::String(op_));
} else {
PrintF("GenericBinaryOpStub (%s by %d)\n",
Token::String(op_),
constant_rhs_);
}
}
#endif
};
class TypeRecordingBinaryOpStub: public CodeStub {
class BinaryOpStub: public CodeStub {
public:
TypeRecordingBinaryOpStub(Token::Value op, OverwriteMode mode)
BinaryOpStub(Token::Value op, OverwriteMode mode)
: op_(op),
mode_(mode),
operands_type_(TRBinaryOpIC::UNINITIALIZED),
result_type_(TRBinaryOpIC::UNINITIALIZED),
operands_type_(BinaryOpIC::UNINITIALIZED),
result_type_(BinaryOpIC::UNINITIALIZED),
name_(NULL) {
use_vfp3_ = CpuFeatures::IsSupported(VFP3);
ASSERT(OpBits::is_valid(Token::NUM_TOKENS));
}
TypeRecordingBinaryOpStub(
BinaryOpStub(
int key,
TRBinaryOpIC::TypeInfo operands_type,
TRBinaryOpIC::TypeInfo result_type = TRBinaryOpIC::UNINITIALIZED)
BinaryOpIC::TypeInfo operands_type,
BinaryOpIC::TypeInfo result_type = BinaryOpIC::UNINITIALIZED)
: op_(OpBits::decode(key)),
mode_(ModeBits::decode(key)),
use_vfp3_(VFP3Bits::decode(key)),
@ -252,8 +178,8 @@ class TypeRecordingBinaryOpStub: public CodeStub {
bool use_vfp3_;
// Operand type information determined at runtime.
TRBinaryOpIC::TypeInfo operands_type_;
TRBinaryOpIC::TypeInfo result_type_;
BinaryOpIC::TypeInfo operands_type_;
BinaryOpIC::TypeInfo result_type_;
char* name_;
@ -261,12 +187,12 @@ class TypeRecordingBinaryOpStub: public CodeStub {
#ifdef DEBUG
void Print() {
PrintF("TypeRecordingBinaryOpStub %d (op %s), "
PrintF("BinaryOpStub %d (op %s), "
"(mode %d, runtime_type_info %s)\n",
MinorKey(),
Token::String(op_),
static_cast<int>(mode_),
TRBinaryOpIC::GetName(operands_type_));
BinaryOpIC::GetName(operands_type_));
}
#endif
@ -274,10 +200,10 @@ class TypeRecordingBinaryOpStub: public CodeStub {
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 7> {};
class VFP3Bits: public BitField<bool, 9, 1> {};
class OperandTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 10, 3> {};
class ResultTypeInfoBits: public BitField<TRBinaryOpIC::TypeInfo, 13, 3> {};
class OperandTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 10, 3> {};
class ResultTypeInfoBits: public BitField<BinaryOpIC::TypeInfo, 13, 3> {};
Major MajorKey() { return TypeRecordingBinaryOp; }
Major MajorKey() { return BinaryOp; }
int MinorKey() {
return OpBits::encode(op_)
| ModeBits::encode(mode_)
@ -294,6 +220,7 @@ class TypeRecordingBinaryOpStub: public CodeStub {
Label* not_numbers,
Label* gc_required);
void GenerateSmiCode(MacroAssembler* masm,
Label* use_runtime,
Label* gc_required,
SmiCodeGenerateHeapNumberResults heapnumber_results);
void GenerateLoadArguments(MacroAssembler* masm);
@ -304,6 +231,7 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateHeapNumberStub(MacroAssembler* masm);
void GenerateOddballStub(MacroAssembler* masm);
void GenerateStringStub(MacroAssembler* masm);
void GenerateBothStringStub(MacroAssembler* masm);
void GenerateGenericStub(MacroAssembler* masm);
void GenerateAddStrings(MacroAssembler* masm);
void GenerateCallRuntime(MacroAssembler* masm);
@ -318,15 +246,15 @@ class TypeRecordingBinaryOpStub: public CodeStub {
void GenerateTypeTransition(MacroAssembler* masm);
void GenerateTypeTransitionWithSavedArgs(MacroAssembler* masm);
virtual int GetCodeKind() { return Code::TYPE_RECORDING_BINARY_OP_IC; }
virtual int GetCodeKind() { return Code::BINARY_OP_IC; }
virtual InlineCacheState GetICState() {
return TRBinaryOpIC::ToState(operands_type_);
return BinaryOpIC::ToState(operands_type_);
}
virtual void FinishCode(Code* code) {
code->set_type_recording_binary_op_type(operands_type_);
code->set_type_recording_binary_op_result_type(result_type_);
code->set_binary_op_type(operands_type_);
code->set_binary_op_result_type(result_type_);
}
friend class CodeGenerator;
@ -386,8 +314,7 @@ class StringCompareStub: public CodeStub {
public:
StringCompareStub() { }
// Compare two flat ASCII strings and returns result in r0.
// Does not use the stack.
// Compares two flat ASCII strings and returns result in r0.
static void GenerateCompareFlatAsciiStrings(MacroAssembler* masm,
Register left,
Register right,
@ -396,107 +323,27 @@ class StringCompareStub: public CodeStub {
Register scratch3,
Register scratch4);
private:
Major MajorKey() { return StringCompare; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
// This stub can do a fast mod operation without using fp.
// It is tail called from the GenericBinaryOpStub and it always
// returns an answer. It never causes GC so it doesn't need a real frame.
//
// The inputs are always positive Smis. This is never called
// where the denominator is a power of 2. We handle that separately.
//
// If we consider the denominator as an odd number multiplied by a power of 2,
// then:
// * The exponent (power of 2) is in the shift_distance register.
// * The odd number is in the odd_number register. It is always in the range
// of 3 to 25.
// * The bits from the numerator that are to be copied to the answer (there are
// shift_distance of them) are in the mask_bits register.
// * The other bits of the numerator have been shifted down and are in the lhs
// register.
class IntegerModStub : public CodeStub {
public:
IntegerModStub(Register result,
Register shift_distance,
Register odd_number,
Register mask_bits,
Register lhs,
Register scratch)
: result_(result),
shift_distance_(shift_distance),
odd_number_(odd_number),
mask_bits_(mask_bits),
lhs_(lhs),
scratch_(scratch) {
// We don't code these in the minor key, so they should always be the same.
// We don't really want to fix that since this stub is rather large and we
// don't want many copies of it.
ASSERT(shift_distance_.is(r9));
ASSERT(odd_number_.is(r4));
ASSERT(mask_bits_.is(r3));
ASSERT(scratch_.is(r5));
}
// Compares two flat ASCII strings for equality and returns result
// in r0.
static void GenerateFlatAsciiStringEquals(MacroAssembler* masm,
Register left,
Register right,
Register scratch1,
Register scratch2,
Register scratch3);
private:
Register result_;
Register shift_distance_;
Register odd_number_;
Register mask_bits_;
Register lhs_;
Register scratch_;
// Minor key encoding in 16 bits.
class ResultRegisterBits: public BitField<int, 0, 4> {};
class LhsRegisterBits: public BitField<int, 4, 4> {};
Major MajorKey() { return IntegerMod; }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return ResultRegisterBits::encode(result_.code())
| LhsRegisterBits::encode(lhs_.code());
}
void Generate(MacroAssembler* masm);
const char* GetName() { return "IntegerModStub"; }
// Utility functions.
void DigitSum(MacroAssembler* masm,
Register lhs,
int mask,
int shift,
Label* entry);
void DigitSum(MacroAssembler* masm,
Register lhs,
Register scratch,
int mask,
int shift1,
int shift2,
Label* entry);
void ModGetInRangeBySubtraction(MacroAssembler* masm,
Register lhs,
int shift,
int rhs);
void ModReduce(MacroAssembler* masm,
Register lhs,
int max,
int denominator);
void ModAnswer(MacroAssembler* masm,
Register result,
Register shift_distance,
Register mask_bits,
Register sum_of_digits);
#ifdef DEBUG
void Print() { PrintF("IntegerModStub\n"); }
#endif
virtual Major MajorKey() { return StringCompare; }
virtual int MinorKey() { return 0; }
virtual void Generate(MacroAssembler* masm);
static void GenerateAsciiCharsCompareLoop(MacroAssembler* masm,
Register left,
Register right,
Register length,
Register scratch1,
Register scratch2,
Label* chars_not_equal);
};
@ -580,6 +427,9 @@ class RegExpCEntryStub: public CodeStub {
private:
Major MajorKey() { return RegExpCEntry; }
int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
const char* GetName() { return "RegExpCEntryStub"; }
};
@ -599,59 +449,210 @@ class DirectCEntryStub: public CodeStub {
private:
Major MajorKey() { return DirectCEntry; }
int MinorKey() { return 0; }
bool NeedsImmovableCode() { return true; }
const char* GetName() { return "DirectCEntryStub"; }
};
// Generate code to load an element from a pixel array. The receiver is assumed
// to not be a smi and to have elements, the caller must guarantee this
// precondition. If key is not a smi, then the generated code branches to
// key_not_smi. Callers can specify NULL for key_not_smi to signal that a smi
// check has already been performed on key so that the smi check is not
// generated. If key is not a valid index within the bounds of the pixel array,
// the generated code jumps to out_of_range. receiver, key and elements are
// unchanged throughout the generated code sequence.
void GenerateFastPixelArrayLoad(MacroAssembler* masm,
Register receiver,
Register key,
Register elements_map,
Register elements,
class FloatingPointHelper : public AllStatic {
public:
enum Destination {
kVFPRegisters,
kCoreRegisters
};
// Loads smis from r0 and r1 (right and left in binary operations) into
// floating point registers. Depending on the destination the values ends up
// either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
// floating point registers VFP3 must be supported. If core registers are
// requested when VFP3 is supported d6 and d7 will be scratched.
static void LoadSmis(MacroAssembler* masm,
Destination destination,
Register scratch1,
Register scratch2);
// Loads objects from r0 and r1 (right and left in binary operations) into
// floating point registers. Depending on the destination the values ends up
// either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
// floating point registers VFP3 must be supported. If core registers are
// requested when VFP3 is supported d6 and d7 will still be scratched. If
// either r0 or r1 is not a number (not smi and not heap number object) the
// not_number label is jumped to with r0 and r1 intact.
static void LoadOperands(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* not_number);
// Convert the smi or heap number in object to an int32 using the rules
// for ToInt32 as described in ECMAScript 9.5.: the value is truncated
// and brought into the range -2^31 .. +2^31 - 1.
static void ConvertNumberToInt32(MacroAssembler* masm,
Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
Register scratch3,
DwVfpRegister double_scratch,
Label* not_int32);
// Converts the integer (untagged smi) in |int_scratch| to a double, storing
// the result either in |double_dst| or |dst2:dst1|, depending on
// |destination|.
// Warning: The value in |int_scratch| will be changed in the process!
static void ConvertIntToDouble(MacroAssembler* masm,
Register int_scratch,
Destination destination,
DwVfpRegister double_dst,
Register dst1,
Register dst2,
Register scratch2,
SwVfpRegister single_scratch);
// Load the number from object into double_dst in the double format.
// Control will jump to not_int32 if the value cannot be exactly represented
// by a 32-bit integer.
// Floating point value in the 32-bit integer range that are not exact integer
// won't be loaded.
static void LoadNumberAsInt32Double(MacroAssembler* masm,
Register object,
Destination destination,
DwVfpRegister double_dst,
Register dst1,
Register dst2,
Register heap_number_map,
Register scratch1,
Register scratch2,
SwVfpRegister single_scratch,
Label* not_int32);
// Loads the number from object into dst as a 32-bit integer.
// Control will jump to not_int32 if the object cannot be exactly represented
// by a 32-bit integer.
// Floating point value in the 32-bit integer range that are not exact integer
// won't be converted.
// scratch3 is not used when VFP3 is supported.
static void LoadNumberAsInt32(MacroAssembler* masm,
Register object,
Register dst,
Register heap_number_map,
Register scratch1,
Register scratch2,
Register result,
Label* not_pixel_array,
Label* key_not_smi,
Label* out_of_range);
// Generate code to store an element into a pixel array, clamping values between
// [0..255]. The receiver is assumed to not be a smi and to have elements, the
// caller must guarantee this precondition. If key is not a smi, then the
// generated code branches to key_not_smi. Callers can specify NULL for
// key_not_smi to signal that a smi check has already been performed on key so
// that the smi check is not generated. If value is not a smi, the generated
// code will branch to value_not_smi. If the receiver doesn't have pixel array
// elements, the generated code will branch to not_pixel_array, unless
// not_pixel_array is NULL, in which case the caller must ensure that the
// receiver has pixel array elements. If key is not a valid index within the
// bounds of the pixel array, the generated code jumps to out_of_range. If
// load_elements_from_receiver is true, then the elements of receiver is loaded
// into elements, otherwise elements is assumed to already be the receiver's
// elements. If load_elements_map_from_elements is true, elements_map is loaded
// from elements, otherwise it is assumed to already contain the element map.
void GenerateFastPixelArrayStore(MacroAssembler* masm,
Register receiver,
Register key,
Register value,
Register elements,
Register elements_map,
Register scratch1,
Register scratch2,
bool load_elements_from_receiver,
bool load_elements_map_from_elements,
Label* key_not_smi,
Label* value_not_smi,
Label* not_pixel_array,
Label* out_of_range);
Register scratch3,
DwVfpRegister double_scratch,
Label* not_int32);
// Generate non VFP3 code to check if a double can be exactly represented by a
// 32-bit integer. This does not check for 0 or -0, which need
// to be checked for separately.
// Control jumps to not_int32 if the value is not a 32-bit integer, and falls
// through otherwise.
// src1 and src2 will be cloberred.
//
// Expected input:
// - src1: higher (exponent) part of the double value.
// - src2: lower (mantissa) part of the double value.
// Output status:
// - dst: 32 higher bits of the mantissa. (mantissa[51:20])
// - src2: contains 1.
// - other registers are clobbered.
static void DoubleIs32BitInteger(MacroAssembler* masm,
Register src1,
Register src2,
Register dst,
Register scratch,
Label* not_int32);
// Generates code to call a C function to do a double operation using core
// registers. (Used when VFP3 is not supported.)
// This code never falls through, but returns with a heap number containing
// the result in r0.
// Register heapnumber_result must be a heap number in which the
// result of the operation will be stored.
// Requires the following layout on entry:
// r0: Left value (least significant part of mantissa).
// r1: Left value (sign, exponent, top of mantissa).
// r2: Right value (least significant part of mantissa).
// r3: Right value (sign, exponent, top of mantissa).
static void CallCCodeForDoubleOperation(MacroAssembler* masm,
Token::Value op,
Register heap_number_result,
Register scratch);
private:
static void LoadNumber(MacroAssembler* masm,
FloatingPointHelper::Destination destination,
Register object,
DwVfpRegister dst,
Register dst1,
Register dst2,
Register heap_number_map,
Register scratch1,
Register scratch2,
Label* not_number);
};
class StringDictionaryLookupStub: public CodeStub {
public:
enum LookupMode { POSITIVE_LOOKUP, NEGATIVE_LOOKUP };
explicit StringDictionaryLookupStub(LookupMode mode) : mode_(mode) { }
void Generate(MacroAssembler* masm);
MUST_USE_RESULT static MaybeObject* GenerateNegativeLookup(
MacroAssembler* masm,
Label* miss,
Label* done,
Register receiver,
Register properties,
String* name,
Register scratch0);
static void GeneratePositiveLookup(MacroAssembler* masm,
Label* miss,
Label* done,
Register elements,
Register name,
Register r0,
Register r1);
private:
static const int kInlinedProbes = 4;
static const int kTotalProbes = 20;
static const int kCapacityOffset =
StringDictionary::kHeaderSize +
StringDictionary::kCapacityIndex * kPointerSize;
static const int kElementsStartOffset =
StringDictionary::kHeaderSize +
StringDictionary::kElementsStartIndex * kPointerSize;
#ifdef DEBUG
void Print() {
PrintF("StringDictionaryLookupStub\n");
}
#endif
Major MajorKey() { return StringDictionaryNegativeLookup; }
int MinorKey() {
return LookupModeBits::encode(mode_);
}
class LookupModeBits: public BitField<LookupMode, 0, 1> {};
LookupMode mode_;
};
} } // namespace v8::internal

48
deps/v8/src/arm/codegen-arm-inl.h

@ -1,48 +0,0 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_CODEGEN_ARM_INL_H_
#define V8_ARM_CODEGEN_ARM_INL_H_
#include "virtual-frame-arm.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm_)
// Platform-specific inline functions.
void DeferredCode::Jump() { __ jmp(&entry_label_); }
void DeferredCode::Branch(Condition cond) { __ b(cond, &entry_label_); }
#undef __
} } // namespace v8::internal
#endif // V8_ARM_CODEGEN_ARM_INL_H_

7360
deps/v8/src/arm/codegen-arm.cc

File diff suppressed because it is too large

512
deps/v8/src/arm/codegen-arm.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -37,162 +37,8 @@ namespace internal {
// Forward declarations
class CompilationInfo;
class DeferredCode;
class JumpTarget;
class RegisterAllocator;
class RegisterFile;
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
enum GenerateInlineSmi { DONT_GENERATE_INLINE_SMI, GENERATE_INLINE_SMI };
enum WriteBarrierCharacter { UNLIKELY_SMI, LIKELY_SMI, NEVER_NEWSPACE };
// -------------------------------------------------------------------------
// Reference support
// A reference is a C++ stack-allocated object that puts a
// reference on the virtual frame. The reference may be consumed
// by GetValue, TakeValue, SetValue, and Codegen::UnloadReference.
// When the lifetime (scope) of a valid reference ends, it must have
// been consumed, and be in state UNLOADED.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
enum Type { UNLOADED = -2, ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
Reference(CodeGenerator* cgen,
Expression* expression,
bool persist_after_get = false);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
ASSERT_EQ(ILLEGAL, type_);
type_ = value;
}
void set_unloaded() {
ASSERT_NE(ILLEGAL, type_);
ASSERT_NE(UNLOADED, type_);
type_ = UNLOADED;
}
// The size the reference takes up on the stack.
int size() const {
return (type_ < SLOT) ? 0 : type_;
}
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
bool is_unloaded() const { return type_ == UNLOADED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is consumed by the call unless the
// reference is for a compound assignment.
// If the reference is not consumed, it is left in place under its value.
void GetValue();
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
// on the expression stack. The value is stored in the location specified
// by the reference, and is left on top of the stack, after the reference
// is popped from beneath it (unloaded).
void SetValue(InitState init_state, WriteBarrierCharacter wb);
// This is in preparation for something that uses the reference on the stack.
// If we need this reference afterwards get then dup it now. Otherwise mark
// it as used.
inline void DupIfPersist();
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
// Keep the reference on the stack after get, so it can be used by set later.
bool persist_after_get_;
};
// -------------------------------------------------------------------------
// Code generation state
// The state is passed down the AST by the code generator (and back up, in
// the form of the state of the label pair). It is threaded through the
// call stack. Constructing a state implicitly pushes it on the owning code
// generator's stack of states, and destroying one implicitly pops it.
class CodeGenState BASE_EMBEDDED {
public:
// Create an initial code generator state. Destroying the initial state
// leaves the code generator with a NULL state.
explicit CodeGenState(CodeGenerator* owner);
// Destroy a code generator state and restore the owning code generator's
// previous state.
virtual ~CodeGenState();
virtual JumpTarget* true_target() const { return NULL; }
virtual JumpTarget* false_target() const { return NULL; }
protected:
inline CodeGenerator* owner() { return owner_; }
inline CodeGenState* previous() const { return previous_; }
private:
CodeGenerator* owner_;
CodeGenState* previous_;
};
class ConditionCodeGenState : public CodeGenState {
public:
// Create a code generator state based on a code generator's current
// state. The new state has its own pair of branch labels.
ConditionCodeGenState(CodeGenerator* owner,
JumpTarget* true_target,
JumpTarget* false_target);
virtual JumpTarget* true_target() const { return true_target_; }
virtual JumpTarget* false_target() const { return false_target_; }
private:
JumpTarget* true_target_;
JumpTarget* false_target_;
};
class TypeInfoCodeGenState : public CodeGenState {
public:
TypeInfoCodeGenState(CodeGenerator* owner,
Slot* slot_number,
TypeInfo info);
~TypeInfoCodeGenState();
virtual JumpTarget* true_target() const { return previous()->true_target(); }
virtual JumpTarget* false_target() const {
return previous()->false_target();
}
private:
Slot* slot_;
TypeInfo old_type_info_;
};
// -------------------------------------------------------------------------
// Arguments allocation mode
enum ArgumentsAllocationMode {
NO_ARGUMENTS_ALLOCATION,
EAGER_ARGUMENTS_ALLOCATION,
LAZY_ARGUMENTS_ALLOCATION
};
// -------------------------------------------------------------------------
// CodeGenerator
@ -225,367 +71,17 @@ class CodeGenerator: public AstVisitor {
int pos,
bool right_here = false);
// Accessors
MacroAssembler* masm() { return masm_; }
VirtualFrame* frame() const { return frame_; }
inline Handle<Script> script();
bool has_valid_frame() const { return frame_ != NULL; }
// Set the virtual frame to be new_frame, with non-frame register
// reference counts given by non_frame_registers. The non-frame
// register reference counts of the old frame are returned in
// non_frame_registers.
void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
void DeleteFrame();
RegisterAllocator* allocator() const { return allocator_; }
CodeGenState* state() { return state_; }
void set_state(CodeGenState* state) { state_ = state; }
TypeInfo type_info(Slot* slot) {
int index = NumberOfSlot(slot);
if (index == kInvalidSlotNumber) return TypeInfo::Unknown();
return (*type_info_)[index];
}
TypeInfo set_type_info(Slot* slot, TypeInfo info) {
int index = NumberOfSlot(slot);
ASSERT(index >= kInvalidSlotNumber);
if (index != kInvalidSlotNumber) {
TypeInfo previous_value = (*type_info_)[index];
(*type_info_)[index] = info;
return previous_value;
}
return TypeInfo::Unknown();
}
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
// Constants related to patching of inlined load/store.
static int GetInlinedKeyedLoadInstructionsAfterPatch() {
return FLAG_debug_code ? 32 : 13;
}
static const int kInlinedKeyedStoreInstructionsAfterPatch = 5;
static const int kInlinedKeyedStoreInstructionsAfterPatch = 8;
static int GetInlinedNamedStoreInstructionsAfterPatch() {
ASSERT(inlined_write_barrier_size_ != -1);
return inlined_write_barrier_size_ + 4;
ASSERT(Isolate::Current()->inlined_write_barrier_size() != -1);
return Isolate::Current()->inlined_write_barrier_size() + 4;
}
private:
// Type of a member function that generates inline code for a native function.
typedef void (CodeGenerator::*InlineFunctionGenerator)
(ZoneList<Expression*>*);
static const InlineFunctionGenerator kInlineFunctionGenerators[];
// Construction/Destruction
explicit CodeGenerator(MacroAssembler* masm);
// Accessors
inline bool is_eval();
inline Scope* scope();
inline StrictModeFlag strict_mode_flag();
// Generating deferred code.
void ProcessDeferred();
static const int kInvalidSlotNumber = -1;
int NumberOfSlot(Slot* slot);
// State
bool has_cc() const { return cc_reg_ != al; }
JumpTarget* true_target() const { return state_->true_target(); }
JumpTarget* false_target() const { return state_->false_target(); }
// Track loop nesting level.
int loop_nesting() const { return loop_nesting_; }
void IncrementLoopNesting() { loop_nesting_++; }
void DecrementLoopNesting() { loop_nesting_--; }
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
virtual void VisitSlot(Slot* node);
#define DEF_VISIT(type) \
virtual void Visit##type(type* node);
AST_NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
// Main code generation function
void Generate(CompilationInfo* info);
// Generate the return sequence code. Should be called no more than
// once per compiled function, immediately after binding the return
// target (which can not be done more than once). The return value should
// be in r0.
void GenerateReturnSequence();
// Returns the arguments allocation mode.
ArgumentsAllocationMode ArgumentsMode();
// Store the arguments object and allocate it if necessary.
void StoreArgumentsObject(bool initial);
// The following are used by class Reference.
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
MemOperand SlotOperand(Slot* slot, Register tmp);
MemOperand ContextSlotOperandCheckExtensions(Slot* slot,
Register tmp,
Register tmp2,
JumpTarget* slow);
// Expressions
void LoadCondition(Expression* x,
JumpTarget* true_target,
JumpTarget* false_target,
bool force_cc);
void Load(Expression* expr);
void LoadGlobal();
void LoadGlobalReceiver(Register scratch);
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromSlotCheckForArguments(Slot* slot, TypeofState state);
// Store the value on top of the stack to a slot.
void StoreToSlot(Slot* slot, InitState init_state);
// Support for compiling assignment expressions.
void EmitSlotAssignment(Assignment* node);
void EmitNamedPropertyAssignment(Assignment* node);
void EmitKeyedPropertyAssignment(Assignment* node);
// Load a named property, returning it in r0. The receiver is passed on the
// stack, and remains there.
void EmitNamedLoad(Handle<String> name, bool is_contextual);
// Store to a named property. If the store is contextual, value is passed on
// the frame and consumed. Otherwise, receiver and value are passed on the
// frame and consumed. The result is returned in r0.
void EmitNamedStore(Handle<String> name, bool is_contextual);
// Load a keyed property, leaving it in r0. The receiver and key are
// passed on the stack, and remain there.
void EmitKeyedLoad();
// Store a keyed property. Key and receiver are on the stack and the value is
// in r0. Result is returned in r0.
void EmitKeyedStore(StaticType* key_type, WriteBarrierCharacter wb_info);
void LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow);
// Support for loading from local/global variables and arguments
// whose location is known unless they are shadowed by
// eval-introduced bindings. Generates no code for unsupported slot
// types and therefore expects to fall through to the slow jump target.
void EmitDynamicLoadFromSlotFastCase(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow,
JumpTarget* done);
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
// non-existing properties of the global object, so we must make it
// look like an explicit property access, instead of an access
// through the context chain.
void LoadTypeofExpression(Expression* x);
void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
// Generate code that computes a shortcutting logical operation.
void GenerateLogicalBooleanOperation(BinaryOperation* node);
void GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode,
GenerateInlineSmi inline_smi,
int known_rhs =
GenericBinaryOpStub::kUnknownIntValue);
void Comparison(Condition cc,
Expression* left,
Expression* right,
bool strict = false);
void SmiOperation(Token::Value op,
Handle<Object> value,
bool reversed,
OverwriteMode mode);
void CallWithArguments(ZoneList<Expression*>* arguments,
CallFunctionFlags flags,
int position);
// An optimized implementation of expressions of the form
// x.apply(y, arguments). We call x the applicand and y the receiver.
// The optimization avoids allocating an arguments object if possible.
void CallApplyLazy(Expression* applicand,
Expression* receiver,
VariableProxy* arguments,
int position);
// Control flow
void Branch(bool if_true, JumpTarget* target);
void CheckStack();
bool CheckForInlineRuntimeCall(CallRuntime* node);
static Handle<Code> ComputeLazyCompile(int argc);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
// Declare global variables and functions in the given array of
// name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs);
// Instantiate the function based on the shared function info.
void InstantiateFunction(Handle<SharedFunctionInfo> function_info,
bool pretenure);
// Support for type checks.
void GenerateIsSmi(ZoneList<Expression*>* args);
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
void GenerateIsRegExp(ZoneList<Expression*>* args);
void GenerateIsObject(ZoneList<Expression*>* args);
void GenerateIsSpecObject(ZoneList<Expression*>* args);
void GenerateIsFunction(ZoneList<Expression*>* args);
void GenerateIsUndetectableObject(ZoneList<Expression*>* args);
void GenerateIsStringWrapperSafeForDefaultValueOf(
ZoneList<Expression*>* args);
// Support for construct call checks.
void GenerateIsConstructCall(ZoneList<Expression*>* args);
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArguments(ZoneList<Expression*>* args);
// Support for accessing the class and value fields of an object.
void GenerateClassOf(ZoneList<Expression*>* args);
void GenerateValueOf(ZoneList<Expression*>* args);
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
void GenerateStringCharCodeAt(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
void GenerateStringCharFromCode(ZoneList<Expression*>* args);
// Fast support for string.charAt(n) and string[n].
void GenerateStringCharAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
void GenerateLog(ZoneList<Expression*>* args);
// Fast support for Math.random().
void GenerateRandomHeapNumber(ZoneList<Expression*>* args);
// Fast support for StringAdd.
void GenerateStringAdd(ZoneList<Expression*>* args);
// Fast support for SubString.
void GenerateSubString(ZoneList<Expression*>* args);
// Fast support for StringCompare.
void GenerateStringCompare(ZoneList<Expression*>* args);
// Support for direct calls from JavaScript to native RegExp code.
void GenerateRegExpExec(ZoneList<Expression*>* args);
void GenerateRegExpConstructResult(ZoneList<Expression*>* args);
// Support for fast native caches.
void GenerateGetFromCache(ZoneList<Expression*>* args);
// Fast support for number to string.
void GenerateNumberToString(ZoneList<Expression*>* args);
// Fast swapping of elements.
void GenerateSwapElements(ZoneList<Expression*>* args);
// Fast call for custom callbacks.
void GenerateCallFunction(ZoneList<Expression*>* args);
// Fast call to math functions.
void GenerateMathPow(ZoneList<Expression*>* args);
void GenerateMathSin(ZoneList<Expression*>* args);
void GenerateMathCos(ZoneList<Expression*>* args);
void GenerateMathSqrt(ZoneList<Expression*>* args);
void GenerateMathLog(ZoneList<Expression*>* args);
void GenerateIsRegExpEquivalent(ZoneList<Expression*>* args);
void GenerateHasCachedArrayIndex(ZoneList<Expression*>* args);
void GenerateGetCachedArrayIndex(ZoneList<Expression*>* args);
void GenerateFastAsciiArrayJoin(ZoneList<Expression*>* args);
// Simple condition analysis.
enum ConditionAnalysis {
ALWAYS_TRUE,
ALWAYS_FALSE,
DONT_KNOW
};
ConditionAnalysis AnalyzeCondition(Expression* cond);
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
void CodeForStatementPosition(Statement* node);
void CodeForDoWhileConditionPosition(DoWhileStatement* stmt);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
// True if the registers are valid for entry to a block.
bool HasValidEntryRegisters();
#endif
List<DeferredCode*> deferred_;
// Assembler
MacroAssembler* masm_; // to generate code
CompilationInfo* info_;
// Code generation state
VirtualFrame* frame_;
RegisterAllocator* allocator_;
Condition cc_reg_;
CodeGenState* state_;
int loop_nesting_;
Vector<TypeInfo>* type_info_;
// Jump targets
BreakTarget function_return_;
// True if the function return is shadowed (ie, jumping to the target
// function_return_ does not jump to the true function return, but rather
// to some unlinking code).
bool function_return_is_shadowed_;
// Size of inlined write barriers generated by EmitNamedStore.
static int inlined_write_barrier_size_;
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
friend class FastCodeGenerator;
friend class FullCodeGenerator;
friend class FullCodeGenSyntaxChecker;
friend class LCodeGen;
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};

24
deps/v8/src/arm/constants-arm.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -28,12 +28,9 @@
#ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_ARM_CONSTANTS_ARM_H_
// The simulator emulates the EABI so we define the USE_ARM_EABI macro if we
// are not running on real ARM hardware. One reason for this is that the
// old ABI uses fp registers in the calling convention and the simulator does
// not simulate fp registers or coroutine instructions.
#if defined(__ARM_EABI__) || !defined(__arm__)
# define USE_ARM_EABI 1
// ARM EABI is required.
#if defined(__arm__) && !defined(__ARM_EABI__)
#error ARM EABI support is required.
#endif
// This means that interwork-compatible jump instructions are generated. We
@ -89,6 +86,11 @@
namespace v8 {
namespace internal {
// Constant pool marker.
static const int kConstantPoolMarkerMask = 0xffe00000;
static const int kConstantPoolMarker = 0x0c000000;
static const int kConstantPoolLengthMask = 0x001ffff;
// Number of registers in normal ARM mode.
static const int kNumRegisters = 16;
@ -341,7 +343,9 @@ enum BlockAddrMode {
da_x = (0|0|0) << 21, // Decrement after.
ia_x = (0|4|0) << 21, // Increment after.
db_x = (8|0|0) << 21, // Decrement before.
ib_x = (8|4|0) << 21 // Increment before.
ib_x = (8|4|0) << 21, // Increment before.
kBlockAddrModeMask = (8|4|1) << 21
};
@ -388,9 +392,11 @@ enum VFPConversionMode {
// This mask does not include the "inexact" or "input denormal" cumulative
// exceptions flags, because we usually don't want to check for it.
static const uint32_t kVFPExceptionMask = 0xf;
static const uint32_t kVFPInvalidOpExceptionBit = 1 << 0;
static const uint32_t kVFPOverflowExceptionBit = 1 << 2;
static const uint32_t kVFPUnderflowExceptionBit = 1 << 3;
static const uint32_t kVFPInexactExceptionBit = 1 << 4;
static const uint32_t kVFPFlushToZeroMask = 1 << 24;
static const uint32_t kVFPInvalidExceptionBit = 1;
static const uint32_t kVFPNConditionFlagBit = 1 << 31;
static const uint32_t kVFPZConditionFlagBit = 1 << 30;

93
deps/v8/src/arm/cpu-arm.cc

@ -42,10 +42,12 @@ namespace v8 {
namespace internal {
void CPU::Setup() {
CpuFeatures::Probe(true);
if (!CpuFeatures::IsSupported(VFP3) || Serializer::enabled()) {
V8::DisableCrankshaft();
}
CpuFeatures::Probe();
}
bool CPU::SupportsCrankshaft() {
return CpuFeatures::IsSupported(VFP3);
}
@ -61,7 +63,7 @@ void CPU::FlushICache(void* start, size_t size) {
// that the Icache was flushed.
// None of this code ends up in the snapshot so there are no issues
// around whether or not to generate the code when building snapshots.
Simulator::FlushICache(start, size);
Simulator::FlushICache(Isolate::Current()->simulator_i_cache(), start, size);
#else
// Ideally, we would call
// syscall(__ARM_NR_cacheflush, start,
@ -73,62 +75,33 @@ void CPU::FlushICache(void* start, size_t size) {
register uint32_t end asm("a2") =
reinterpret_cast<uint32_t>(start) + size;
register uint32_t flg asm("a3") = 0;
#ifdef __ARM_EABI__
#if defined (__arm__) && !defined(__thumb__)
// __arm__ may be defined in thumb mode.
register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
asm volatile(
"svc 0x0"
: "=r" (beg)
: "0" (beg), "r" (end), "r" (flg), "r" (scno));
#else
// r7 is reserved by the EABI in thumb mode.
asm volatile(
"@ Enter ARM Mode \n\t"
"adr r3, 1f \n\t"
"bx r3 \n\t"
".ALIGN 4 \n\t"
".ARM \n"
"1: push {r7} \n\t"
"mov r7, %4 \n\t"
"svc 0x0 \n\t"
"pop {r7} \n\t"
"@ Enter THUMB Mode\n\t"
"adr r3, 2f+1 \n\t"
"bx r3 \n\t"
".THUMB \n"
"2: \n\t"
: "=r" (beg)
: "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
: "r3");
#endif
#if defined (__arm__) && !defined(__thumb__)
// __arm__ may be defined in thumb mode.
register uint32_t scno asm("r7") = __ARM_NR_cacheflush;
asm volatile(
"svc 0x0"
: "=r" (beg)
: "0" (beg), "r" (end), "r" (flg), "r" (scno));
#else
#if defined (__arm__) && !defined(__thumb__)
// __arm__ may be defined in thumb mode.
asm volatile(
"svc %1"
: "=r" (beg)
: "i" (__ARM_NR_cacheflush), "0" (beg), "r" (end), "r" (flg));
#else
// Do not use the value of __ARM_NR_cacheflush in the inline assembly
// below, because the thumb mode value would be used, which would be
// wrong, since we switch to ARM mode before executing the svc instruction
asm volatile(
"@ Enter ARM Mode \n\t"
"adr r3, 1f \n\t"
"bx r3 \n\t"
".ALIGN 4 \n\t"
".ARM \n"
"1: svc 0x9f0002 \n"
"@ Enter THUMB Mode\n\t"
"adr r3, 2f+1 \n\t"
"bx r3 \n\t"
".THUMB \n"
"2: \n\t"
: "=r" (beg)
: "0" (beg), "r" (end), "r" (flg)
: "r3");
#endif
// r7 is reserved by the EABI in thumb mode.
asm volatile(
"@ Enter ARM Mode \n\t"
"adr r3, 1f \n\t"
"bx r3 \n\t"
".ALIGN 4 \n\t"
".ARM \n"
"1: push {r7} \n\t"
"mov r7, %4 \n\t"
"svc 0x0 \n\t"
"pop {r7} \n\t"
"@ Enter THUMB Mode\n\t"
"adr r3, 2f+1 \n\t"
"bx r3 \n\t"
".THUMB \n"
"2: \n\t"
: "=r" (beg)
: "0" (beg), "r" (end), "r" (flg), "r" (__ARM_NR_cacheflush)
: "r3");
#endif
#endif
}

14
deps/v8/src/arm/debug-arm.cc

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -29,7 +29,7 @@
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen-inl.h"
#include "codegen.h"
#include "debug.h"
namespace v8 {
@ -65,7 +65,7 @@ void BreakLocationIterator::SetDebugBreakAtReturn() {
patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
#endif
patcher.Emit(Debug::debug_break_return()->entry());
patcher.Emit(Isolate::Current()->debug()->debug_break_return()->entry());
patcher.masm()->bkpt(0);
}
@ -115,7 +115,7 @@ void BreakLocationIterator::SetDebugBreakAtSlot() {
patcher.masm()->mov(v8::internal::lr, v8::internal::pc);
patcher.masm()->ldr(v8::internal::pc, MemOperand(v8::internal::pc, -4));
#endif
patcher.Emit(Debug::debug_break_slot()->entry());
patcher.Emit(Isolate::Current()->debug()->debug_break_slot()->entry());
}
@ -159,7 +159,7 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
__ RecordComment("// Calling from debug break to runtime - come in - over");
#endif
__ mov(r0, Operand(0, RelocInfo::NONE)); // no arguments
__ mov(r1, Operand(ExternalReference::debug_break()));
__ mov(r1, Operand(ExternalReference::debug_break(masm->isolate())));
CEntryStub ceb(1);
__ CallStub(&ceb);
@ -185,7 +185,9 @@ static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
// Now that the break point has been handled, resume normal execution by
// jumping to the target address intended by the caller and that was
// overwritten by the address of DebugBreakXXX.
__ mov(ip, Operand(ExternalReference(Debug_Address::AfterBreakTarget())));
ExternalReference after_break_target =
ExternalReference(Debug_Address::AfterBreakTarget(), masm->isolate());
__ mov(ip, Operand(after_break_target));
__ ldr(ip, MemOperand(ip));
__ Jump(ip);
}

80
deps/v8/src/arm/deoptimizer-arm.cc

@ -51,6 +51,7 @@ void Deoptimizer::EnsureRelocSpaceForLazyDeoptimization(Handle<Code> code) {
void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
HandleScope scope;
AssertNoAllocation no_allocation;
if (!function->IsOptimized()) return;
@ -74,8 +75,6 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
int deoptimization_index = safepoint_entry.deoptimization_index();
int gap_code_size = safepoint_entry.gap_code_size();
// Check that we did not shoot past next safepoint.
// TODO(srdjan): How do we guarantee that safepoint code does not
// overlap other safepoint patching code?
CHECK(pc_offset >= last_pc_offset);
#ifdef DEBUG
// Destroy the code which is not supposed to be run again.
@ -112,8 +111,9 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
// Add the deoptimizing code to the list.
DeoptimizingCodeListNode* node = new DeoptimizingCodeListNode(code);
node->set_next(deoptimizing_code_list_);
deoptimizing_code_list_ = node;
DeoptimizerData* data = code->GetIsolate()->deoptimizer_data();
node->set_next(data->deoptimizing_code_list_);
data->deoptimizing_code_list_ = node;
// Set the code for the function to non-optimized version.
function->ReplaceCode(function->shared()->code());
@ -122,6 +122,11 @@ void Deoptimizer::DeoptimizeFunction(JSFunction* function) {
PrintF("[forced deoptimization: ");
function->PrintName();
PrintF(" / %x]\n", reinterpret_cast<uint32_t>(function));
#ifdef DEBUG
if (FLAG_print_code) {
code->PrintLn();
}
#endif
}
}
@ -283,14 +288,33 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
// There are no translation commands for the caller's pc and fp, the
// context, and the function. Set them up explicitly.
for (int i = 0; ok && i < 4; i++) {
for (int i = StandardFrameConstants::kCallerPCOffset;
ok && i >= StandardFrameConstants::kMarkerOffset;
i -= kPointerSize) {
uint32_t input_value = input_->GetFrameSlot(input_offset);
if (FLAG_trace_osr) {
PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part)\n",
const char* name = "UNKNOWN";
switch (i) {
case StandardFrameConstants::kCallerPCOffset:
name = "caller's pc";
break;
case StandardFrameConstants::kCallerFPOffset:
name = "fp";
break;
case StandardFrameConstants::kContextOffset:
name = "context";
break;
case StandardFrameConstants::kMarkerOffset:
name = "function";
break;
}
PrintF(" [sp + %d] <- 0x%08x ; [sp + %d] (fixed part - %s)\n",
output_offset,
input_value,
input_offset);
input_offset,
name);
}
output_[0]->SetFrameSlot(output_offset, input_->GetFrameSlot(input_offset));
input_offset -= kPointerSize;
output_offset -= kPointerSize;
@ -316,7 +340,7 @@ void Deoptimizer::DoComputeOsrOutputFrame() {
optimized_code_->entry() + pc_offset);
output_[0]->SetPc(pc);
}
Code* continuation = Builtins::builtin(Builtins::NotifyOSR);
Code* continuation = isolate_->builtins()->builtin(Builtins::kNotifyOSR);
output_[0]->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
@ -490,11 +514,13 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
FullCodeGenerator::StateField::decode(pc_and_state);
output_frame->SetState(Smi::FromInt(state));
// Set the continuation for the topmost frame.
if (is_topmost) {
Builtins* builtins = isolate_->builtins();
Code* continuation = (bailout_type_ == EAGER)
? Builtins::builtin(Builtins::NotifyDeoptimized)
: Builtins::builtin(Builtins::NotifyLazyDeoptimized);
? builtins->builtin(Builtins::kNotifyDeoptimized)
: builtins->builtin(Builtins::kNotifyLazyDeoptimized);
output_frame->SetContinuation(
reinterpret_cast<uint32_t>(continuation->entry()));
}
@ -510,6 +536,9 @@ void Deoptimizer::DoComputeFrame(TranslationIterator* iterator,
// easily ported.
void Deoptimizer::EntryGenerator::Generate() {
GeneratePrologue();
Isolate* isolate = masm()->isolate();
CpuFeatures::Scope scope(VFP3);
// Save all general purpose registers before messing with them.
const int kNumberOfRegisters = Register::kNumRegisters;
@ -520,13 +549,21 @@ void Deoptimizer::EntryGenerator::Generate() {
const int kDoubleRegsSize =
kDoubleSize * DwVfpRegister::kNumAllocatableRegisters;
// Save all general purpose registers before messing with them.
__ sub(sp, sp, Operand(kDoubleRegsSize));
for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; ++i) {
DwVfpRegister vfp_reg = DwVfpRegister::FromAllocationIndex(i);
int offset = i * kDoubleSize;
__ vstr(vfp_reg, sp, offset);
// Save all VFP registers before messing with them.
DwVfpRegister first = DwVfpRegister::FromAllocationIndex(0);
DwVfpRegister last =
DwVfpRegister::FromAllocationIndex(
DwVfpRegister::kNumAllocatableRegisters - 1);
ASSERT(last.code() > first.code());
ASSERT((last.code() - first.code()) ==
(DwVfpRegister::kNumAllocatableRegisters - 1));
#ifdef DEBUG
for (int i = 0; i <= (DwVfpRegister::kNumAllocatableRegisters - 1); i++) {
ASSERT((DwVfpRegister::FromAllocationIndex(i).code() <= last.code()) &&
(DwVfpRegister::FromAllocationIndex(i).code() >= first.code()));
}
#endif
__ vstm(db_w, sp, first, last);
// Push all 16 registers (needed to populate FrameDescription::registers_).
__ stm(db_w, sp, restored_regs | sp.bit() | lr.bit() | pc.bit());
@ -557,14 +594,16 @@ void Deoptimizer::EntryGenerator::Generate() {
// Allocate a new deoptimizer object.
// Pass four arguments in r0 to r3 and fifth argument on stack.
__ PrepareCallCFunction(5, r5);
__ PrepareCallCFunction(6, r5);
__ ldr(r0, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset));
__ mov(r1, Operand(type())); // bailout type,
// r2: bailout id already loaded.
// r3: code address or 0 already loaded.
__ str(r4, MemOperand(sp, 0 * kPointerSize)); // Fp-to-sp delta.
__ mov(r5, Operand(ExternalReference::isolate_address()));
__ str(r5, MemOperand(sp, 1 * kPointerSize)); // Isolate.
// Call Deoptimizer::New().
__ CallCFunction(ExternalReference::new_deoptimizer_function(), 5);
__ CallCFunction(ExternalReference::new_deoptimizer_function(isolate), 6);
// Preserve "deoptimizer" object in register r0 and get the input
// frame descriptor pointer to r1 (deoptimizer->input_);
@ -618,7 +657,8 @@ void Deoptimizer::EntryGenerator::Generate() {
// r0: deoptimizer object; r1: scratch.
__ PrepareCallCFunction(1, r1);
// Call Deoptimizer::ComputeOutputFrames().
__ CallCFunction(ExternalReference::compute_output_frames_function(), 1);
__ CallCFunction(
ExternalReference::compute_output_frames_function(isolate), 1);
__ pop(r0); // Restore deoptimizer object (class Deoptimizer).
// Replace the current (input) frame with the output frames.
@ -668,7 +708,7 @@ void Deoptimizer::EntryGenerator::Generate() {
__ pop(ip); // remove lr
// Set up the roots register.
ExternalReference roots_address = ExternalReference::roots_address();
ExternalReference roots_address = ExternalReference::roots_address(isolate);
__ mov(r10, Operand(roots_address));
__ pop(ip); // remove pc

145
deps/v8/src/arm/disasm-arm.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -89,6 +89,9 @@ class Decoder {
// Returns the length of the disassembled machine instruction in bytes.
int InstructionDecode(byte* instruction);
static bool IsConstantPoolAt(byte* instr_ptr);
static int ConstantPoolSizeAt(byte* instr_ptr);
private:
// Bottleneck functions to print into the out_buffer.
void PrintChar(const char ch);
@ -368,25 +371,34 @@ int Decoder::FormatRegister(Instruction* instr, const char* format) {
int Decoder::FormatVFPRegister(Instruction* instr, const char* format) {
ASSERT((format[0] == 'S') || (format[0] == 'D'));
VFPRegPrecision precision =
format[0] == 'D' ? kDoublePrecision : kSinglePrecision;
int retval = 2;
int reg = -1;
if (format[1] == 'n') {
int reg = instr->VnValue();
if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->NValue()));
if (format[0] == 'D') PrintDRegister(reg);
return 2;
reg = instr->VFPNRegValue(precision);
} else if (format[1] == 'm') {
int reg = instr->VmValue();
if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->MValue()));
if (format[0] == 'D') PrintDRegister(reg);
return 2;
reg = instr->VFPMRegValue(precision);
} else if (format[1] == 'd') {
int reg = instr->VdValue();
if (format[0] == 'S') PrintSRegister(((reg << 1) | instr->DValue()));
if (format[0] == 'D') PrintDRegister(reg);
return 2;
reg = instr->VFPDRegValue(precision);
if (format[2] == '+') {
int immed8 = instr->Immed8Value();
if (format[0] == 'S') reg += immed8 - 1;
if (format[0] == 'D') reg += (immed8 / 2 - 1);
}
if (format[2] == '+') retval = 3;
} else {
UNREACHABLE();
}
UNREACHABLE();
return -1;
if (precision == kSinglePrecision) {
PrintSRegister(reg);
} else {
PrintDRegister(reg);
}
return retval;
}
@ -490,13 +502,16 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
ASSERT(STRING_STARTS_WITH(format, "memop"));
if (instr->HasL()) {
Print("ldr");
} else if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0)) {
if (instr->Bits(7, 4) == 0xf) {
Print("strd");
} else {
Print("ldrd");
}
} else {
if ((instr->Bits(27, 25) == 0) && (instr->Bit(20) == 0) &&
(instr->Bits(7, 6) == 3) && (instr->Bit(4) == 1)) {
if (instr->Bit(5) == 1) {
Print("strd");
} else {
Print("ldrd");
}
return 5;
}
Print("str");
}
return 5;
@ -899,6 +914,7 @@ void Decoder::DecodeType2(Instruction* instr) {
case da_x: {
if (instr->HasW()) {
Unknown(instr); // not used in V8
return;
}
Format(instr, "'memop'cond'b 'rd, ['rn], #-'off12");
break;
@ -906,6 +922,7 @@ void Decoder::DecodeType2(Instruction* instr) {
case ia_x: {
if (instr->HasW()) {
Unknown(instr); // not used in V8
return;
}
Format(instr, "'memop'cond'b 'rd, ['rn], #+'off12");
break;
@ -992,11 +1009,15 @@ void Decoder::DecodeType3(Instruction* instr) {
void Decoder::DecodeType4(Instruction* instr) {
ASSERT(instr->Bit(22) == 0); // Privileged mode currently not supported.
if (instr->HasL()) {
Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
if (instr->Bit(22) != 0) {
// Privileged mode currently not supported.
Unknown(instr);
} else {
Format(instr, "stm'cond'pu 'rn'w, 'rlist");
if (instr->HasL()) {
Format(instr, "ldm'cond'pu 'rn'w, 'rlist");
} else {
Format(instr, "stm'cond'pu 'rn'w, 'rlist");
}
}
}
@ -1042,6 +1063,8 @@ int Decoder::DecodeType7(Instruction* instr) {
// vmov: Rt = Sn
// vcvt: Dd = Sm
// vcvt: Sd = Dm
// Dd = vabs(Dm)
// Dd = vneg(Dm)
// Dd = vadd(Dn, Dm)
// Dd = vsub(Dn, Dm)
// Dd = vmul(Dn, Dm)
@ -1066,7 +1089,10 @@ void Decoder::DecodeTypeVFP(Instruction* instr) {
}
} else if ((instr->Opc2Value() == 0x0) && (instr->Opc3Value() == 0x3)) {
// vabs
Format(instr, "vabs'cond 'Dd, 'Dm");
Format(instr, "vabs.f64'cond 'Dd, 'Dm");
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
// vneg
Format(instr, "vneg.f64'cond 'Dd, 'Dm");
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
@ -1259,9 +1285,22 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
Format(instr, "vstr'cond 'Sd, ['rn + 4*'imm08@00]");
}
break;
case 0x4:
case 0x5:
case 0x6:
case 0x7:
case 0x9:
case 0xB: {
bool to_vfp_register = (instr->VLValue() == 0x1);
if (to_vfp_register) {
Format(instr, "vldm'cond'pu 'rn'w, {'Sd-'Sd+}");
} else {
Format(instr, "vstm'cond'pu 'rn'w, {'Sd-'Sd+}");
}
break;
}
default:
Unknown(instr); // Not used by V8.
break;
}
} else if (instr->CoprocessorValue() == 0xB) {
switch (instr->OpcodeValue()) {
@ -1289,12 +1328,38 @@ void Decoder::DecodeType6CoprocessorIns(Instruction* instr) {
Format(instr, "vstr'cond 'Dd, ['rn + 4*'imm08@00]");
}
break;
case 0x4:
case 0x5:
case 0x9: {
bool to_vfp_register = (instr->VLValue() == 0x1);
if (to_vfp_register) {
Format(instr, "vldm'cond'pu 'rn'w, {'Dd-'Dd+}");
} else {
Format(instr, "vstm'cond'pu 'rn'w, {'Dd-'Dd+}");
}
break;
}
default:
Unknown(instr); // Not used by V8.
break;
}
} else {
UNIMPLEMENTED(); // Not used by V8.
Unknown(instr); // Not used by V8.
}
}
bool Decoder::IsConstantPoolAt(byte* instr_ptr) {
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
return (instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker;
}
int Decoder::ConstantPoolSizeAt(byte* instr_ptr) {
if (IsConstantPoolAt(instr_ptr)) {
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
return instruction_bits & kConstantPoolLengthMask;
} else {
return -1;
}
}
@ -1307,7 +1372,15 @@ int Decoder::InstructionDecode(byte* instr_ptr) {
"%08x ",
instr->InstructionBits());
if (instr->ConditionField() == kSpecialCondition) {
UNIMPLEMENTED();
Unknown(instr);
return Instruction::kInstrSize;
}
int instruction_bits = *(reinterpret_cast<int*>(instr_ptr));
if ((instruction_bits & kConstantPoolMarkerMask) == kConstantPoolMarker) {
out_buffer_pos_ += OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"constant pool begin (length %d)",
instruction_bits &
kConstantPoolLengthMask);
return Instruction::kInstrSize;
}
switch (instr->TypeValue()) {
@ -1359,9 +1432,8 @@ namespace disasm {
const char* NameConverter::NameOfAddress(byte* addr) const {
static v8::internal::EmbeddedVector<char, 32> tmp_buffer;
v8::internal::OS::SNPrintF(tmp_buffer, "%p", addr);
return tmp_buffer.start();
v8::internal::OS::SNPrintF(tmp_buffer_, "%p", addr);
return tmp_buffer_.start();
}
@ -1411,12 +1483,7 @@ int Disassembler::InstructionDecode(v8::internal::Vector<char> buffer,
int Disassembler::ConstantPoolSizeAt(byte* instruction) {
int instruction_bits = *(reinterpret_cast<int*>(instruction));
if ((instruction_bits & 0xfff00000) == 0x03000000) {
return instruction_bits & 0x0000ffff;
} else {
return -1;
}
return v8::internal::Decoder::ConstantPoolSizeAt(instruction);
}

5
deps/v8/src/arm/frames-arm.h

@ -72,6 +72,9 @@ static const RegList kCalleeSaved =
static const int kNumCalleeSaved = 7 + kR9Available;
// Double registers d8 to d15 are callee-saved.
static const int kNumDoubleCalleeSaved = 8;
// Number of registers for which space is reserved in safepoints. Must be a
// multiple of 8.
@ -136,7 +139,7 @@ class JavaScriptFrameConstants : public AllStatic {
public:
// FP-relative.
static const int kLocal0Offset = StandardFrameConstants::kExpressionsOffset;
static const int kSavedRegistersOffset = +2 * kPointerSize;
static const int kLastParameterOffset = +2 * kPointerSize;
static const int kFunctionOffset = StandardFrameConstants::kMarkerOffset;
// Caller SP-relative.

1069
deps/v8/src/arm/full-codegen-arm.cc

File diff suppressed because it is too large

685
deps/v8/src/arm/ic-arm.cc

File diff suppressed because it is too large

174
deps/v8/src/arm/jump-target-arm.cc

@ -1,174 +0,0 @@
// Copyright 2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen-inl.h"
#include "jump-target-inl.h"
#include "register-allocator-inl.h"
#include "virtual-frame-inl.h"
namespace v8 {
namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
#define __ ACCESS_MASM(cgen()->masm())
void JumpTarget::DoJump() {
ASSERT(cgen()->has_valid_frame());
// Live non-frame registers are not allowed at unconditional jumps
// because we have no way of invalidating the corresponding results
// which are still live in the C++ code.
ASSERT(cgen()->HasValidEntryRegisters());
if (entry_frame_set_) {
if (entry_label_.is_bound()) {
// If we already bound and generated code at the destination then it
// is too late to ask for less optimistic type assumptions.
ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
}
// There already a frame expectation at the target.
cgen()->frame()->MergeTo(&entry_frame_);
cgen()->DeleteFrame();
} else {
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
// Zap the fall-through frame since the jump was unconditional.
RegisterFile empty;
cgen()->SetFrame(NULL, &empty);
}
if (entry_label_.is_bound()) {
// You can't jump backwards to an already bound label unless you admitted
// up front that this was a bidirectional jump target. Bidirectional jump
// targets will zap their type info when bound in case some later virtual
// frame with less precise type info branches to them.
ASSERT(direction_ != FORWARD_ONLY);
}
__ jmp(&entry_label_);
}
void JumpTarget::DoBranch(Condition cond, Hint ignored) {
ASSERT(cgen()->has_valid_frame());
if (entry_frame_set_) {
if (entry_label_.is_bound()) {
// If we already bound and generated code at the destination then it
// is too late to ask for less optimistic type assumptions.
ASSERT(entry_frame_.IsCompatibleWith(cgen()->frame()));
}
// We have an expected frame to merge to on the backward edge.
cgen()->frame()->MergeTo(&entry_frame_, cond);
} else {
// Clone the current frame to use as the expected one at the target.
set_entry_frame(cgen()->frame());
}
if (entry_label_.is_bound()) {
// You can't branch backwards to an already bound label unless you admitted
// up front that this was a bidirectional jump target. Bidirectional jump
// targets will zap their type info when bound in case some later virtual
// frame with less precise type info branches to them.
ASSERT(direction_ != FORWARD_ONLY);
}
__ b(cond, &entry_label_);
if (cond == al) {
cgen()->DeleteFrame();
}
}
void JumpTarget::Call() {
// Call is used to push the address of the catch block on the stack as
// a return address when compiling try/catch and try/finally. We
// fully spill the frame before making the call. The expected frame
// at the label (which should be the only one) is the spilled current
// frame plus an in-memory return address. The "fall-through" frame
// at the return site is the spilled current frame.
ASSERT(cgen()->has_valid_frame());
// There are no non-frame references across the call.
ASSERT(cgen()->HasValidEntryRegisters());
ASSERT(!is_linked());
// Calls are always 'forward' so we use a copy of the current frame (plus
// one for a return address) as the expected frame.
ASSERT(!entry_frame_set_);
VirtualFrame target_frame = *cgen()->frame();
target_frame.Adjust(1);
set_entry_frame(&target_frame);
__ bl(&entry_label_);
}
void JumpTarget::DoBind() {
ASSERT(!is_bound());
// Live non-frame registers are not allowed at the start of a basic
// block.
ASSERT(!cgen()->has_valid_frame() || cgen()->HasValidEntryRegisters());
if (cgen()->has_valid_frame()) {
if (direction_ != FORWARD_ONLY) cgen()->frame()->ForgetTypeInfo();
// If there is a current frame we can use it on the fall through.
if (!entry_frame_set_) {
entry_frame_ = *cgen()->frame();
entry_frame_set_ = true;
} else {
cgen()->frame()->MergeTo(&entry_frame_);
// On fall through we may have to merge both ways.
if (direction_ != FORWARD_ONLY) {
// This will not need to adjust the virtual frame entries that are
// register allocated since that was done above and they now match.
// But it does need to adjust the entry_frame_ of this jump target
// to make it potentially less optimistic. Later code can branch back
// to this jump target and we need to assert that that code does not
// have weaker assumptions about types.
entry_frame_.MergeTo(cgen()->frame());
}
}
} else {
// If there is no current frame we must have an entry frame which we can
// copy.
ASSERT(entry_frame_set_);
RegisterFile empty;
cgen()->SetFrame(new VirtualFrame(&entry_frame_), &empty);
}
__ bind(&entry_label_);
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

648
deps/v8/src/arm/lithium-arm.cc

File diff suppressed because it is too large

570
deps/v8/src/arm/lithium-arm.h

@ -32,6 +32,7 @@
#include "lithium-allocator.h"
#include "lithium.h"
#include "safepoint-table.h"
#include "utils.h"
namespace v8 {
namespace internal {
@ -69,18 +70,23 @@ class LCodeGen;
V(CallStub) \
V(CheckFunction) \
V(CheckInstanceType) \
V(CheckNonSmi) \
V(CheckMap) \
V(CheckPrototypeMaps) \
V(CheckSmi) \
V(ClampDToUint8) \
V(ClampIToUint8) \
V(ClampTToUint8) \
V(ClassOfTest) \
V(ClassOfTestAndBranch) \
V(CmpConstantEq) \
V(CmpConstantEqAndBranch) \
V(CmpID) \
V(CmpIDAndBranch) \
V(CmpJSObjectEq) \
V(CmpJSObjectEqAndBranch) \
V(CmpObjectEq) \
V(CmpObjectEqAndBranch) \
V(CmpMapAndBranch) \
V(CmpT) \
V(CmpTAndBranch) \
V(ConstantD) \
V(ConstantI) \
V(ConstantT) \
@ -89,9 +95,10 @@ class LCodeGen;
V(Deoptimize) \
V(DivI) \
V(DoubleToI) \
V(ElementsKind) \
V(ExternalArrayLength) \
V(FixedArrayLength) \
V(FunctionLiteral) \
V(Gap) \
V(GetCachedArrayIndex) \
V(GlobalObject) \
V(GlobalReceiver) \
@ -100,29 +107,37 @@ class LCodeGen;
V(HasCachedArrayIndexAndBranch) \
V(HasInstanceType) \
V(HasInstanceTypeAndBranch) \
V(In) \
V(InstanceOf) \
V(InstanceOfAndBranch) \
V(InstanceOfKnownGlobal) \
V(InstructionGap) \
V(Integer32ToDouble) \
V(InvokeFunction) \
V(IsConstructCall) \
V(IsConstructCallAndBranch) \
V(IsNull) \
V(IsNullAndBranch) \
V(IsObject) \
V(IsObjectAndBranch) \
V(IsSmi) \
V(IsSmiAndBranch) \
V(IsUndetectable) \
V(IsUndetectableAndBranch) \
V(JSArrayLength) \
V(Label) \
V(LazyBailout) \
V(LoadContextSlot) \
V(LoadElements) \
V(LoadExternalArrayPointer) \
V(LoadFunctionPrototype) \
V(LoadGlobal) \
V(LoadGlobalCell) \
V(LoadGlobalGeneric) \
V(LoadKeyedFastElement) \
V(LoadKeyedGeneric) \
V(LoadKeyedSpecializedArrayElement) \
V(LoadNamedField) \
V(LoadNamedFieldPolymorphic) \
V(LoadNamedGeneric) \
V(LoadPixelArrayElement) \
V(LoadPixelArrayExternalPointer) \
V(ModI) \
V(MulI) \
V(NumberTagD) \
@ -132,7 +147,6 @@ class LCodeGen;
V(OsrEntry) \
V(OuterContext) \
V(Parameter) \
V(PixelArrayLength) \
V(Power) \
V(PushArgument) \
V(RegExpLiteral) \
@ -142,40 +156,40 @@ class LCodeGen;
V(SmiUntag) \
V(StackCheck) \
V(StoreContextSlot) \
V(StoreGlobal) \
V(StoreGlobalCell) \
V(StoreGlobalGeneric) \
V(StoreKeyedFastElement) \
V(StoreKeyedGeneric) \
V(StoreKeyedSpecializedArrayElement) \
V(StoreNamedField) \
V(StoreNamedGeneric) \
V(StringAdd) \
V(StringCharCodeAt) \
V(StringCharFromCode) \
V(StringLength) \
V(SubI) \
V(TaggedToI) \
V(ThisFunction) \
V(Throw) \
V(ToFastProperties) \
V(Typeof) \
V(TypeofIs) \
V(TypeofIsAndBranch) \
V(IsConstructCall) \
V(IsConstructCallAndBranch) \
V(UnaryMathOperation) \
V(UnknownOSRValue) \
V(ValueOf)
#define DECLARE_INSTRUCTION(type) \
virtual bool Is##type() const { return true; } \
static L##type* cast(LInstruction* instr) { \
ASSERT(instr->Is##type()); \
return reinterpret_cast<L##type*>(instr); \
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
virtual Opcode opcode() const { return LInstruction::k##type; } \
virtual void CompileToNative(LCodeGen* generator); \
virtual const char* Mnemonic() const { return mnemonic; } \
static L##type* cast(LInstruction* instr) { \
ASSERT(instr->Is##type()); \
return reinterpret_cast<L##type*>(instr); \
}
#define DECLARE_CONCRETE_INSTRUCTION(type, mnemonic) \
virtual void CompileToNative(LCodeGen* generator); \
virtual const char* Mnemonic() const { return mnemonic; } \
DECLARE_INSTRUCTION(type)
#define DECLARE_HYDROGEN_ACCESSOR(type) \
H##type* hydrogen() const { \
return H##type::cast(hydrogen_value()); \
@ -197,10 +211,25 @@ class LInstruction: public ZoneObject {
virtual void PrintDataTo(StringStream* stream) = 0;
virtual void PrintOutputOperandTo(StringStream* stream) = 0;
// Declare virtual type testers.
#define DECLARE_DO(type) virtual bool Is##type() const { return false; }
LITHIUM_ALL_INSTRUCTION_LIST(DECLARE_DO)
#undef DECLARE_DO
enum Opcode {
// Declare a unique enum value for each instruction.
#define DECLARE_OPCODE(type) k##type,
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_OPCODE)
kNumberOfInstructions
#undef DECLARE_OPCODE
};
virtual Opcode opcode() const = 0;
// Declare non-virtual type testers for all leaf IR classes.
#define DECLARE_PREDICATE(type) \
bool Is##type() const { return opcode() == k##type; }
LITHIUM_CONCRETE_INSTRUCTION_LIST(DECLARE_PREDICATE)
#undef DECLARE_PREDICATE
// Declare virtual predicates for instructions that don't have
// an opcode.
virtual bool IsGap() const { return false; }
virtual bool IsControl() const { return false; }
virtual void SetBranchTargets(int true_block_id, int false_block_id) { }
@ -258,37 +287,6 @@ class LInstruction: public ZoneObject {
};
template<typename ElementType, int NumElements>
class OperandContainer {
public:
OperandContainer() {
for (int i = 0; i < NumElements; i++) elems_[i] = NULL;
}
int length() { return NumElements; }
ElementType& operator[](int i) {
ASSERT(i < length());
return elems_[i];
}
void PrintOperandsTo(StringStream* stream);
private:
ElementType elems_[NumElements];
};
template<typename ElementType>
class OperandContainer<ElementType, 0> {
public:
int length() { return 0; }
void PrintOperandsTo(StringStream* stream) { }
ElementType& operator[](int i) {
UNREACHABLE();
static ElementType t = 0;
return t;
}
};
// R = number of result operands (0 or 1).
// I = number of input operands.
// T = number of temporary operands.
@ -311,9 +309,9 @@ class LTemplateInstruction: public LInstruction {
virtual void PrintOutputOperandTo(StringStream* stream);
protected:
OperandContainer<LOperand*, R> results_;
OperandContainer<LOperand*, I> inputs_;
OperandContainer<LOperand*, T> temps_;
EmbeddedContainer<LOperand*, R> results_;
EmbeddedContainer<LOperand*, I> inputs_;
EmbeddedContainer<LOperand*, T> temps_;
};
@ -327,8 +325,13 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
parallel_moves_[AFTER] = NULL;
}
DECLARE_CONCRETE_INSTRUCTION(Gap, "gap")
virtual void PrintDataTo(StringStream* stream) const;
// Can't use the DECLARE-macro here because of sub-classes.
virtual bool IsGap() const { return true; }
virtual void PrintDataTo(StringStream* stream);
static LGap* cast(LInstruction* instr) {
ASSERT(instr->IsGap());
return reinterpret_cast<LGap*>(instr);
}
bool IsRedundant() const;
@ -358,21 +361,26 @@ class LGap: public LTemplateInstruction<0, 0, 0> {
};
class LInstructionGap: public LGap {
public:
explicit LInstructionGap(HBasicBlock* block) : LGap(block) { }
DECLARE_CONCRETE_INSTRUCTION(InstructionGap, "gap")
};
class LGoto: public LTemplateInstruction<0, 0, 0> {
public:
LGoto(int block_id, bool include_stack_check = false)
: block_id_(block_id), include_stack_check_(include_stack_check) { }
explicit LGoto(int block_id) : block_id_(block_id) { }
DECLARE_CONCRETE_INSTRUCTION(Goto, "goto")
virtual void PrintDataTo(StringStream* stream);
virtual bool IsControl() const { return true; }
int block_id() const { return block_id_; }
bool include_stack_check() const { return include_stack_check_; }
private:
int block_id_;
bool include_stack_check_;
};
@ -446,7 +454,6 @@ class LUnknownOSRValue: public LTemplateInstruction<1, 0, 0> {
template<int I, int T>
class LControlInstruction: public LTemplateInstruction<0, I, T> {
public:
DECLARE_INSTRUCTION(ControlInstruction)
virtual bool IsControl() const { return true; }
int true_block_id() const { return true_block_id_; }
@ -519,11 +526,29 @@ class LArgumentsElements: public LTemplateInstruction<1, 0, 0> {
};
class LModI: public LTemplateInstruction<1, 2, 0> {
class LModI: public LTemplateInstruction<1, 2, 3> {
public:
LModI(LOperand* left, LOperand* right) {
// Used when the right hand is a constant power of 2.
LModI(LOperand* left,
LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = NULL;
temps_[1] = NULL;
temps_[2] = NULL;
}
// Used for the standard case.
LModI(LOperand* left,
LOperand* right,
LOperand* temp1,
LOperand* temp2,
LOperand* temp3) {
inputs_[0] = left;
inputs_[1] = right;
temps_[0] = temp1;
temps_[1] = temp2;
temps_[2] = temp3;
}
DECLARE_CONCRETE_INSTRUCTION(ModI, "mod-i")
@ -607,26 +632,49 @@ class LUnaryMathOperation: public LTemplateInstruction<1, 1, 1> {
};
class LCmpJSObjectEq: public LTemplateInstruction<1, 2, 0> {
class LCmpObjectEq: public LTemplateInstruction<1, 2, 0> {
public:
LCmpJSObjectEq(LOperand* left, LOperand* right) {
LCmpObjectEq(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEq, "cmp-jsobject-eq")
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEq, "cmp-object-eq")
};
class LCmpJSObjectEqAndBranch: public LControlInstruction<2, 0> {
class LCmpObjectEqAndBranch: public LControlInstruction<2, 0> {
public:
LCmpJSObjectEqAndBranch(LOperand* left, LOperand* right) {
LCmpObjectEqAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(CmpJSObjectEqAndBranch,
"cmp-jsobject-eq-and-branch")
DECLARE_CONCRETE_INSTRUCTION(CmpObjectEqAndBranch,
"cmp-object-eq-and-branch")
};
class LCmpConstantEq: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCmpConstantEq(LOperand* left) {
inputs_[0] = left;
}
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEq, "cmp-constant-eq")
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
};
class LCmpConstantEqAndBranch: public LControlInstruction<1, 0> {
public:
explicit LCmpConstantEqAndBranch(LOperand* left) {
inputs_[0] = left;
}
DECLARE_CONCRETE_INSTRUCTION(CmpConstantEqAndBranch,
"cmp-constant-eq-and-branch")
DECLARE_HYDROGEN_ACCESSOR(CompareConstantEq)
};
@ -657,7 +705,7 @@ class LIsNullAndBranch: public LControlInstruction<1, 0> {
};
class LIsObject: public LTemplateInstruction<1, 1, 1> {
class LIsObject: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsObject(LOperand* value) {
inputs_[0] = value;
@ -667,7 +715,7 @@ class LIsObject: public LTemplateInstruction<1, 1, 1> {
};
class LIsObjectAndBranch: public LControlInstruction<1, 2> {
class LIsObjectAndBranch: public LControlInstruction<1, 1> {
public:
LIsObjectAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
@ -703,6 +751,31 @@ class LIsSmiAndBranch: public LControlInstruction<1, 0> {
};
class LIsUndetectable: public LTemplateInstruction<1, 1, 0> {
public:
explicit LIsUndetectable(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(IsUndetectable, "is-undetectable")
DECLARE_HYDROGEN_ACCESSOR(IsUndetectable)
};
class LIsUndetectableAndBranch: public LControlInstruction<1, 1> {
public:
explicit LIsUndetectableAndBranch(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(IsUndetectableAndBranch,
"is-undetectable-and-branch")
virtual void PrintDataTo(StringStream* stream);
};
class LHasInstanceType: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasInstanceType(LOperand* value) {
@ -728,25 +801,25 @@ class LHasInstanceTypeAndBranch: public LControlInstruction<1, 0> {
};
class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LHasCachedArrayIndex(LOperand* value) {
explicit LGetCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
};
class LGetCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
class LHasCachedArrayIndex: public LTemplateInstruction<1, 1, 0> {
public:
explicit LGetCachedArrayIndex(LOperand* value) {
explicit LHasCachedArrayIndex(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(GetCachedArrayIndex, "get-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(GetCachedArrayIndex)
DECLARE_CONCRETE_INSTRUCTION(HasCachedArrayIndex, "has-cached-array-index")
DECLARE_HYDROGEN_ACCESSOR(HasCachedArrayIndex)
};
@ -804,20 +877,6 @@ class LCmpT: public LTemplateInstruction<1, 2, 0> {
};
class LCmpTAndBranch: public LControlInstruction<2, 0> {
public:
LCmpTAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(CmpTAndBranch, "cmp-t-and-branch")
DECLARE_HYDROGEN_ACCESSOR(Compare)
Token::Value op() const { return hydrogen()->token(); }
};
class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
public:
LInstanceOf(LOperand* left, LOperand* right) {
@ -829,17 +888,6 @@ class LInstanceOf: public LTemplateInstruction<1, 2, 0> {
};
class LInstanceOfAndBranch: public LControlInstruction<2, 0> {
public:
LInstanceOfAndBranch(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(InstanceOfAndBranch, "instance-of-and-branch")
};
class LInstanceOfKnownGlobal: public LTemplateInstruction<1, 1, 1> {
public:
LInstanceOfKnownGlobal(LOperand* value, LOperand* temp) {
@ -991,14 +1039,14 @@ class LJSArrayLength: public LTemplateInstruction<1, 1, 0> {
};
class LPixelArrayLength: public LTemplateInstruction<1, 1, 0> {
class LExternalArrayLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LPixelArrayLength(LOperand* value) {
explicit LExternalArrayLength(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(PixelArrayLength, "pixel-array-length")
DECLARE_HYDROGEN_ACCESSOR(PixelArrayLength)
DECLARE_CONCRETE_INSTRUCTION(ExternalArrayLength, "external-array-length")
DECLARE_HYDROGEN_ACCESSOR(ExternalArrayLength)
};
@ -1013,6 +1061,17 @@ class LFixedArrayLength: public LTemplateInstruction<1, 1, 0> {
};
class LElementsKind: public LTemplateInstruction<1, 1, 0> {
public:
explicit LElementsKind(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(ElementsKind, "elements-kind")
DECLARE_HYDROGEN_ACCESSOR(ElementsKind)
};
class LValueOf: public LTemplateInstruction<1, 1, 1> {
public:
LValueOf(LOperand* value, LOperand* temp) {
@ -1079,6 +1138,7 @@ class LArithmeticD: public LTemplateInstruction<1, 2, 0> {
Token::Value op() const { return op_; }
virtual Opcode opcode() const { return LInstruction::kArithmeticD; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
@ -1095,6 +1155,7 @@ class LArithmeticT: public LTemplateInstruction<1, 2, 0> {
inputs_[1] = right;
}
virtual Opcode opcode() const { return LInstruction::kArithmeticT; }
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const;
@ -1126,6 +1187,19 @@ class LLoadNamedField: public LTemplateInstruction<1, 1, 0> {
};
class LLoadNamedFieldPolymorphic: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedFieldPolymorphic(LOperand* object) {
inputs_[0] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadNamedField, "load-named-field-polymorphic")
DECLARE_HYDROGEN_ACCESSOR(LoadNamedFieldPolymorphic)
LOperand* object() { return inputs_[0]; }
};
class LLoadNamedGeneric: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadNamedGeneric(LOperand* object) {
@ -1163,14 +1237,14 @@ class LLoadElements: public LTemplateInstruction<1, 1, 0> {
};
class LLoadPixelArrayExternalPointer: public LTemplateInstruction<1, 1, 0> {
class LLoadExternalArrayPointer: public LTemplateInstruction<1, 1, 0> {
public:
explicit LLoadPixelArrayExternalPointer(LOperand* object) {
explicit LLoadExternalArrayPointer(LOperand* object) {
inputs_[0] = object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayExternalPointer,
"load-pixel-array-external-pointer")
DECLARE_CONCRETE_INSTRUCTION(LoadExternalArrayPointer,
"load-external-array-pointer")
};
@ -1189,19 +1263,23 @@ class LLoadKeyedFastElement: public LTemplateInstruction<1, 2, 0> {
};
class LLoadPixelArrayElement: public LTemplateInstruction<1, 2, 0> {
class LLoadKeyedSpecializedArrayElement: public LTemplateInstruction<1, 2, 0> {
public:
LLoadPixelArrayElement(LOperand* external_pointer, LOperand* key) {
LLoadKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key) {
inputs_[0] = external_pointer;
inputs_[1] = key;
}
DECLARE_CONCRETE_INSTRUCTION(LoadPixelArrayElement,
"load-pixel-array-element")
DECLARE_HYDROGEN_ACCESSOR(LoadPixelArrayElement)
DECLARE_CONCRETE_INSTRUCTION(LoadKeyedSpecializedArrayElement,
"load-keyed-specialized-array-element")
DECLARE_HYDROGEN_ACCESSOR(LoadKeyedSpecializedArrayElement)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
JSObject::ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
};
@ -1219,22 +1297,55 @@ class LLoadKeyedGeneric: public LTemplateInstruction<1, 2, 0> {
};
class LLoadGlobal: public LTemplateInstruction<1, 0, 0> {
class LLoadGlobalCell: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(LoadGlobal, "load-global")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobal)
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalCell, "load-global-cell")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalCell)
};
class LStoreGlobal: public LTemplateInstruction<0, 1, 1> {
class LLoadGlobalGeneric: public LTemplateInstruction<1, 1, 0> {
public:
LStoreGlobal(LOperand* value, LOperand* temp) {
explicit LLoadGlobalGeneric(LOperand* global_object) {
inputs_[0] = global_object;
}
DECLARE_CONCRETE_INSTRUCTION(LoadGlobalGeneric, "load-global-generic")
DECLARE_HYDROGEN_ACCESSOR(LoadGlobalGeneric)
LOperand* global_object() { return inputs_[0]; }
Handle<Object> name() const { return hydrogen()->name(); }
bool for_typeof() const { return hydrogen()->for_typeof(); }
};
class LStoreGlobalCell: public LTemplateInstruction<0, 1, 1> {
public:
LStoreGlobalCell(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobal, "store-global")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobal)
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalCell, "store-global-cell")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalCell)
};
class LStoreGlobalGeneric: public LTemplateInstruction<0, 2, 0> {
public:
explicit LStoreGlobalGeneric(LOperand* global_object,
LOperand* value) {
inputs_[0] = global_object;
inputs_[1] = value;
}
DECLARE_CONCRETE_INSTRUCTION(StoreGlobalGeneric, "store-global-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreGlobalGeneric)
LOperand* global_object() { return InputAt(0); }
Handle<Object> name() const { return hydrogen()->name(); }
LOperand* value() { return InputAt(1); }
bool strict_mode() { return hydrogen()->strict_mode(); }
};
@ -1283,6 +1394,11 @@ class LPushArgument: public LTemplateInstruction<0, 1, 0> {
};
class LThisFunction: public LTemplateInstruction<1, 0, 0> {
DECLARE_CONCRETE_INSTRUCTION(ThisFunction, "this-function")
};
class LContext: public LTemplateInstruction<1, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(Context, "context")
@ -1337,6 +1453,23 @@ class LCallConstantFunction: public LTemplateInstruction<1, 0, 0> {
};
class LInvokeFunction: public LTemplateInstruction<1, 1, 0> {
public:
explicit LInvokeFunction(LOperand* function) {
inputs_[0] = function;
}
DECLARE_CONCRETE_INSTRUCTION(InvokeFunction, "invoke-function")
DECLARE_HYDROGEN_ACCESSOR(InvokeFunction)
LOperand* function() { return inputs_[0]; }
virtual void PrintDataTo(StringStream* stream);
int arity() const { return hydrogen()->argument_count() - 1; }
};
class LCallKeyed: public LTemplateInstruction<1, 1, 0> {
public:
explicit LCallKeyed(LOperand* key) {
@ -1418,7 +1551,7 @@ class LCallRuntime: public LTemplateInstruction<1, 0, 0> {
DECLARE_CONCRETE_INSTRUCTION(CallRuntime, "call-runtime")
DECLARE_HYDROGEN_ACCESSOR(CallRuntime)
Runtime::Function* function() const { return hydrogen()->function(); }
const Runtime::Function* function() const { return hydrogen()->function(); }
int arity() const { return hydrogen()->argument_count(); }
};
@ -1456,30 +1589,36 @@ class LNumberTagD: public LTemplateInstruction<1, 1, 2> {
// Sometimes truncating conversion from a tagged value to an int32.
class LDoubleToI: public LTemplateInstruction<1, 1, 1> {
class LDoubleToI: public LTemplateInstruction<1, 1, 2> {
public:
explicit LDoubleToI(LOperand* value, LOperand* temp1) {
LDoubleToI(LOperand* value, LOperand* temp1, LOperand* temp2) {
inputs_[0] = value;
temps_[0] = temp1;
temps_[1] = temp2;
}
DECLARE_CONCRETE_INSTRUCTION(DoubleToI, "double-to-i")
DECLARE_HYDROGEN_ACCESSOR(Change)
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
// Truncating conversion from a tagged value to an int32.
class LTaggedToI: public LTemplateInstruction<1, 1, 1> {
class LTaggedToI: public LTemplateInstruction<1, 1, 3> {
public:
LTaggedToI(LOperand* value, LOperand* temp) {
LTaggedToI(LOperand* value,
LOperand* temp1,
LOperand* temp2,
LOperand* temp3) {
inputs_[0] = value;
temps_[0] = temp;
temps_[0] = temp1;
temps_[1] = temp2;
temps_[2] = temp3;
}
DECLARE_CONCRETE_INSTRUCTION(TaggedToI, "tagged-to-i")
DECLARE_HYDROGEN_ACCESSOR(Change)
DECLARE_HYDROGEN_ACCESSOR(UnaryOperation)
bool truncating() { return hydrogen()->CanTruncateToInt32(); }
};
@ -1502,6 +1641,7 @@ class LNumberUntagD: public LTemplateInstruction<1, 1, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(NumberUntagD, "double-untag")
DECLARE_HYDROGEN_ACCESSOR(Change)
};
@ -1559,6 +1699,7 @@ class LStoreNamedGeneric: public LTemplateInstruction<0, 2, 0> {
LOperand* object() { return inputs_[0]; }
LOperand* value() { return inputs_[1]; }
Handle<Object> name() const { return hydrogen()->name(); }
bool strict_mode() { return hydrogen()->strict_mode(); }
};
@ -1591,15 +1732,55 @@ class LStoreKeyedGeneric: public LTemplateInstruction<0, 3, 0> {
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedGeneric, "store-keyed-generic")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedGeneric)
virtual void PrintDataTo(StringStream* stream);
LOperand* object() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
bool strict_mode() { return hydrogen()->strict_mode(); }
};
class LStoreKeyedSpecializedArrayElement: public LTemplateInstruction<0, 3, 0> {
public:
LStoreKeyedSpecializedArrayElement(LOperand* external_pointer,
LOperand* key,
LOperand* val) {
inputs_[0] = external_pointer;
inputs_[1] = key;
inputs_[2] = val;
}
DECLARE_CONCRETE_INSTRUCTION(StoreKeyedSpecializedArrayElement,
"store-keyed-specialized-array-element")
DECLARE_HYDROGEN_ACCESSOR(StoreKeyedSpecializedArrayElement)
LOperand* external_pointer() { return inputs_[0]; }
LOperand* key() { return inputs_[1]; }
LOperand* value() { return inputs_[2]; }
JSObject::ElementsKind elements_kind() const {
return hydrogen()->elements_kind();
}
};
class LStringAdd: public LTemplateInstruction<1, 2, 0> {
public:
LStringAdd(LOperand* left, LOperand* right) {
inputs_[0] = left;
inputs_[1] = right;
}
DECLARE_CONCRETE_INSTRUCTION(StringAdd, "string-add")
DECLARE_HYDROGEN_ACCESSOR(StringAdd)
LOperand* left() { return inputs_[0]; }
LOperand* right() { return inputs_[1]; }
};
class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
public:
LStringCharCodeAt(LOperand* string, LOperand* index) {
@ -1615,6 +1796,19 @@ class LStringCharCodeAt: public LTemplateInstruction<1, 2, 0> {
};
class LStringCharFromCode: public LTemplateInstruction<1, 1, 0> {
public:
explicit LStringCharFromCode(LOperand* char_code) {
inputs_[0] = char_code;
}
DECLARE_CONCRETE_INSTRUCTION(StringCharFromCode, "string-char-from-code")
DECLARE_HYDROGEN_ACCESSOR(StringCharFromCode)
LOperand* char_code() { return inputs_[0]; }
};
class LStringLength: public LTemplateInstruction<1, 1, 0> {
public:
explicit LStringLength(LOperand* string) {
@ -1678,20 +1872,59 @@ class LCheckPrototypeMaps: public LTemplateInstruction<0, 0, 2> {
class LCheckSmi: public LTemplateInstruction<0, 1, 0> {
public:
LCheckSmi(LOperand* value, Condition condition)
: condition_(condition) {
explicit LCheckSmi(LOperand* value) {
inputs_[0] = value;
}
Condition condition() const { return condition_; }
DECLARE_CONCRETE_INSTRUCTION(CheckSmi, "check-smi")
};
virtual void CompileToNative(LCodeGen* generator);
virtual const char* Mnemonic() const {
return (condition_ == eq) ? "check-non-smi" : "check-smi";
class LCheckNonSmi: public LTemplateInstruction<0, 1, 0> {
public:
explicit LCheckNonSmi(LOperand* value) {
inputs_[0] = value;
}
private:
Condition condition_;
DECLARE_CONCRETE_INSTRUCTION(CheckNonSmi, "check-non-smi")
};
class LClampDToUint8: public LTemplateInstruction<1, 1, 1> {
public:
LClampDToUint8(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
LOperand* unclamped() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampDToUint8, "clamp-d-to-uint8")
};
class LClampIToUint8: public LTemplateInstruction<1, 1, 0> {
public:
explicit LClampIToUint8(LOperand* value) {
inputs_[0] = value;
}
LOperand* unclamped() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampIToUint8, "clamp-i-to-uint8")
};
class LClampTToUint8: public LTemplateInstruction<1, 1, 1> {
public:
LClampTToUint8(LOperand* value, LOperand* temp) {
inputs_[0] = value;
temps_[0] = temp;
}
LOperand* unclamped() { return inputs_[0]; }
DECLARE_CONCRETE_INSTRUCTION(ClampTToUint8, "clamp-t-to-uint8")
};
@ -1725,6 +1958,17 @@ class LFunctionLiteral: public LTemplateInstruction<1, 0, 0> {
};
class LToFastProperties: public LTemplateInstruction<1, 1, 0> {
public:
explicit LToFastProperties(LOperand* value) {
inputs_[0] = value;
}
DECLARE_CONCRETE_INSTRUCTION(ToFastProperties, "to-fast-properties")
DECLARE_HYDROGEN_ACCESSOR(ToFastProperties)
};
class LTypeof: public LTemplateInstruction<1, 1, 0> {
public:
explicit LTypeof(LOperand* value) {
@ -1823,13 +2067,33 @@ class LOsrEntry: public LTemplateInstruction<0, 0, 0> {
class LStackCheck: public LTemplateInstruction<0, 0, 0> {
public:
DECLARE_CONCRETE_INSTRUCTION(StackCheck, "stack-check")
DECLARE_HYDROGEN_ACCESSOR(StackCheck)
Label* done_label() { return &done_label_; }
private:
Label done_label_;
};
class LIn: public LTemplateInstruction<1, 2, 0> {
public:
LIn(LOperand* key, LOperand* object) {
inputs_[0] = key;
inputs_[1] = object;
}
LOperand* key() { return inputs_[0]; }
LOperand* object() { return inputs_[1]; }
DECLARE_CONCRETE_INSTRUCTION(In, "in")
};
class LChunkBuilder;
class LChunk: public ZoneObject {
public:
explicit LChunk(HGraph* graph);
explicit LChunk(CompilationInfo* info, HGraph* graph);
void AddInstruction(LInstruction* instruction, HBasicBlock* block);
LConstantOperand* DefineConstantOperand(HConstant* constant);
@ -1842,6 +2106,7 @@ class LChunk: public ZoneObject {
int ParameterAt(int index);
int GetParameterStackSlot(int index) const;
int spill_slot_count() const { return spill_slot_count_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
const ZoneList<LInstruction*>* instructions() const { return &instructions_; }
void AddGapMove(int index, LOperand* from, LOperand* to);
@ -1878,6 +2143,7 @@ class LChunk: public ZoneObject {
private:
int spill_slot_count_;
CompilationInfo* info_;
HGraph* const graph_;
ZoneList<LInstruction*> instructions_;
ZoneList<LPointerMap*> pointer_maps_;
@ -1887,8 +2153,9 @@ class LChunk: public ZoneObject {
class LChunkBuilder BASE_EMBEDDED {
public:
LChunkBuilder(HGraph* graph, LAllocator* allocator)
LChunkBuilder(CompilationInfo* info, HGraph* graph, LAllocator* allocator)
: chunk_(NULL),
info_(info),
graph_(graph),
status_(UNUSED),
current_instruction_(NULL),
@ -1917,6 +2184,7 @@ class LChunkBuilder BASE_EMBEDDED {
};
LChunk* chunk() const { return chunk_; }
CompilationInfo* info() const { return info_; }
HGraph* graph() const { return graph_; }
bool is_unused() const { return status_ == UNUSED; }
@ -2023,6 +2291,7 @@ class LChunkBuilder BASE_EMBEDDED {
HArithmeticBinaryOperation* instr);
LChunk* chunk_;
CompilationInfo* info_;
HGraph* const graph_;
Status status_;
HInstruction* current_instruction_;
@ -2038,7 +2307,6 @@ class LChunkBuilder BASE_EMBEDDED {
};
#undef DECLARE_HYDROGEN_ACCESSOR
#undef DECLARE_INSTRUCTION
#undef DECLARE_CONCRETE_INSTRUCTION
} } // namespace v8::internal

1832
deps/v8/src/arm/lithium-codegen-arm.cc

File diff suppressed because it is too large

52
deps/v8/src/arm/lithium-codegen-arm.h

@ -51,9 +51,10 @@ class LCodeGen BASE_EMBEDDED {
current_instruction_(-1),
instructions_(chunk->instructions()),
deoptimizations_(4),
deopt_jump_table_(4),
deoptimization_literals_(8),
inlined_function_count_(0),
scope_(chunk->graph()->info()->scope()),
scope_(info->scope()),
status_(UNUSED),
deferred_(8),
osr_pc_offset_(-1),
@ -65,6 +66,10 @@ class LCodeGen BASE_EMBEDDED {
// Simple accessors.
MacroAssembler* masm() const { return masm_; }
CompilationInfo* info() const { return info_; }
Isolate* isolate() const { return info_->isolate(); }
Factory* factory() const { return isolate()->factory(); }
Heap* heap() const { return isolate()->heap(); }
// Support for converting LOperands to assembler types.
// LOperand must be a register.
@ -103,13 +108,15 @@ class LCodeGen BASE_EMBEDDED {
void DoDeferredNumberTagI(LNumberTagI* instr);
void DoDeferredTaggedToI(LTaggedToI* instr);
void DoDeferredMathAbsTaggedHeapNumber(LUnaryMathOperation* instr);
void DoDeferredStackCheck(LGoto* instr);
void DoDeferredStackCheck(LStackCheck* instr);
void DoDeferredStringCharCodeAt(LStringCharCodeAt* instr);
void DoDeferredStringCharFromCode(LStringCharFromCode* instr);
void DoDeferredLInstanceOfKnownGlobal(LInstanceOfKnownGlobal* instr,
Label* map_check);
// Parallel move support.
void DoParallelMove(LParallelMove* move);
void DoGap(LGap* instr);
// Emit frame translation commands for an environment.
void WriteTranslation(LEnvironment* environment, Translation* translation);
@ -133,7 +140,7 @@ class LCodeGen BASE_EMBEDDED {
bool is_aborted() const { return status_ == ABORTED; }
int strict_mode_flag() const {
return info_->is_strict() ? kStrictMode : kNonStrictMode;
return info()->is_strict_mode() ? kStrictMode : kNonStrictMode;
}
LChunk* chunk() const { return chunk_; }
@ -141,7 +148,7 @@ class LCodeGen BASE_EMBEDDED {
HGraph* graph() const { return chunk_->graph(); }
Register scratch0() { return r9; }
DwVfpRegister double_scratch0() { return d0; }
DwVfpRegister double_scratch0() { return d15; }
int GetNextEmittedBlock(int block);
LInstruction* GetNextInstruction();
@ -153,8 +160,8 @@ class LCodeGen BASE_EMBEDDED {
Register temporary,
Register temporary2);
int StackSlotCount() const { return chunk()->spill_slot_count(); }
int ParameterCount() const { return scope()->num_parameters(); }
int GetStackSlotCount() const { return chunk()->spill_slot_count(); }
int GetParameterCount() const { return scope()->num_parameters(); }
void Abort(const char* format, ...);
void Comment(const char* format, ...);
@ -166,6 +173,7 @@ class LCodeGen BASE_EMBEDDED {
bool GeneratePrologue();
bool GenerateBody();
bool GenerateDeferredCode();
bool GenerateDeoptJumpTable();
bool GenerateSafepointTable();
enum SafepointMode {
@ -182,14 +190,14 @@ class LCodeGen BASE_EMBEDDED {
LInstruction* instr,
SafepointMode safepoint_mode);
void CallRuntime(Runtime::Function* function,
void CallRuntime(const Runtime::Function* function,
int num_arguments,
LInstruction* instr);
void CallRuntime(Runtime::FunctionId id,
int num_arguments,
LInstruction* instr) {
Runtime::Function* function = Runtime::FunctionForId(id);
const Runtime::Function* function = Runtime::FunctionForId(id);
CallRuntime(function, num_arguments, instr);
}
@ -201,7 +209,8 @@ class LCodeGen BASE_EMBEDDED {
// to be in edi.
void CallKnownFunction(Handle<JSFunction> function,
int arity,
LInstruction* instr);
LInstruction* instr,
CallKind call_kind);
void LoadHeapObject(Register result, Handle<HeapObject> object);
@ -228,6 +237,10 @@ class LCodeGen BASE_EMBEDDED {
void DoMathFloor(LUnaryMathOperation* instr);
void DoMathRound(LUnaryMathOperation* instr);
void DoMathSqrt(LUnaryMathOperation* instr);
void DoMathPowHalf(LUnaryMathOperation* instr);
void DoMathLog(LUnaryMathOperation* instr);
void DoMathCos(LUnaryMathOperation* instr);
void DoMathSin(LUnaryMathOperation* instr);
// Support for recording safepoint and position information.
void RecordSafepoint(LPointerMap* pointers,
@ -243,13 +256,17 @@ class LCodeGen BASE_EMBEDDED {
int arguments,
int deoptimization_index);
void RecordPosition(int position);
int LastSafepointEnd() {
return static_cast<int>(safepoints_.GetPcAfterGap());
}
static Condition TokenToCondition(Token::Value op, bool is_unsigned);
void EmitGoto(int block, LDeferredCode* deferred_stack_check = NULL);
void EmitGoto(int block);
void EmitBranch(int left_block, int right_block, Condition cc);
void EmitCmpI(LOperand* left, LOperand* right);
void EmitNumberUntagD(Register input,
DoubleRegister result,
bool deoptimize_on_undefined,
LEnvironment* env);
// Emits optimized code for typeof x == "y". Modifies input register.
@ -263,7 +280,6 @@ class LCodeGen BASE_EMBEDDED {
// true and false label should be made, to optimize fallthrough.
Condition EmitIsObject(Register input,
Register temp1,
Register temp2,
Label* is_not_object,
Label* is_object);
@ -271,6 +287,19 @@ class LCodeGen BASE_EMBEDDED {
// Caller should branch on equal condition.
void EmitIsConstructCall(Register temp1, Register temp2);
void EmitLoadFieldOrConstantFunction(Register result,
Register object,
Handle<Map> type,
Handle<String> name);
struct JumpTableEntry {
explicit inline JumpTableEntry(Address entry)
: label(),
address(entry) { }
Label label;
Address address;
};
LChunk* const chunk_;
MacroAssembler* const masm_;
CompilationInfo* const info_;
@ -279,6 +308,7 @@ class LCodeGen BASE_EMBEDDED {
int current_instruction_;
const ZoneList<LInstruction*>* instructions_;
ZoneList<LEnvironment*> deoptimizations_;
ZoneList<JumpTableEntry> deopt_jump_table_;
ZoneList<Handle<Object> > deoptimization_literals_;
int inlined_function_count_;
Scope* const scope_;

2
deps/v8/src/arm/lithium-gap-resolver-arm.cc

@ -25,6 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#include "arm/lithium-gap-resolver-arm.h"
#include "arm/lithium-codegen-arm.h"

844
deps/v8/src/arm/macro-assembler-arm.cc

File diff suppressed because it is too large

217
deps/v8/src/arm/macro-assembler-arm.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -29,13 +29,11 @@
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "assembler.h"
#include "v8globals.h"
namespace v8 {
namespace internal {
// Forward declaration.
class PostCallGenerator;
// ----------------------------------------------------------------------------
// Static helper functions
@ -55,12 +53,6 @@ static inline Operand SmiUntagOperand(Register object) {
const Register cp = { 8 }; // JavaScript context pointer
const Register roots = { 10 }; // Roots array pointer.
enum InvokeJSFlags {
CALL_JS,
JUMP_JS
};
// Flags used for the AllocateInNewSpace functions.
enum AllocationFlags {
// No special flags.
@ -90,15 +82,28 @@ enum ObjectToDoubleFlags {
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
MacroAssembler(void* buffer, int size);
// The isolate parameter can be NULL if the macro assembler should
// not use isolate-dependent functionality. In this case, it's the
// responsibility of the caller to never invoke such function on the
// macro assembler.
MacroAssembler(Isolate* isolate, void* buffer, int size);
// Jump, Call, and Ret pseudo instructions implementing inter-working.
void Jump(Register target, Condition cond = al);
void Jump(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Jump(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Register target, Condition cond = al);
void Call(Register target, Condition cond = al);
int CallSize(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Call(Handle<Code> code,
RelocInfo::Mode rmode,
Condition cond = al);
void CallWithAstId(Handle<Code> code,
RelocInfo::Mode rmode,
unsigned ast_id,
Condition cond = al);
void Ret(Condition cond = al);
// Emit code to discard a non-negative number of pointer-sized elements
@ -135,11 +140,12 @@ class MacroAssembler: public Assembler {
Condition cond = al);
void Call(Label* target);
// Register move. May do nothing if the registers are identical.
void Move(Register dst, Handle<Object> value);
// May do nothing if the registers are identical.
void Move(Register dst, Register src);
// Jumps to the label at the index given by the Smi in "index".
void SmiJumpTable(Register index, Vector<Label*> targets);
void Move(Register dst, Register src, Condition cond = al);
void Move(DoubleRegister dst, DoubleRegister src);
// Load an object from the root table.
void LoadRoot(Register destination,
Heap::RootListIndex index,
@ -184,6 +190,9 @@ class MacroAssembler: public Assembler {
Register address,
Register scratch);
// Push a handle.
void Push(Handle<Object> handle);
// Push two registers. Pushes leftmost register first (to highest address).
void Push(Register src1, Register src2, Condition cond = al) {
ASSERT(!src1.is(src2));
@ -303,6 +312,10 @@ class MacroAssembler: public Assembler {
const Register fpscr_flags,
const Condition cond = al);
void Vmov(const DwVfpRegister dst,
const double imm,
const Condition cond = al);
// ---------------------------------------------------------------------------
// Activation frames
@ -338,29 +351,38 @@ class MacroAssembler: public Assembler {
// ---------------------------------------------------------------------------
// JavaScript invokes
// Setup call kind marking in ecx. The method takes ecx as an
// explicit first parameter to make the code more readable at the
// call sites.
void SetCallKind(Register dst, CallKind kind);
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(Register code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag,
PostCallGenerator* post_call_generator = NULL);
const CallWrapper& call_wrapper,
CallKind call_kind);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag);
InvokeFlag flag,
CallKind call_kind);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag,
PostCallGenerator* post_call_generator = NULL);
const CallWrapper& call_wrapper,
CallKind call_kind);
void InvokeFunction(JSFunction* function,
const ParameterCount& actual,
InvokeFlag flag);
InvokeFlag flag,
CallKind call_kind);
void IsObjectJSObjectType(Register heap_object,
Register map,
@ -560,6 +582,12 @@ class MacroAssembler: public Assembler {
InstanceType type);
// Check if a map for a JSObject indicates that the object has fast elements.
// Jump to the specified label if it does not.
void CheckFastElements(Register map,
Register scratch,
Label* fail);
// Check if the map of an object is equal to a specified map (either
// given directly or as an index into the root list) and branch to
// label if not. Skip the smi check if not required (object is known
@ -568,13 +596,29 @@ class MacroAssembler: public Assembler {
Register scratch,
Handle<Map> map,
Label* fail,
bool is_heap_object);
SmiCheckType smi_check_type);
void CheckMap(Register obj,
Register scratch,
Heap::RootListIndex index,
Label* fail,
bool is_heap_object);
SmiCheckType smi_check_type);
// Check if the map of an object is equal to a specified map and branch to a
// specified target if equal. Skip the smi check if not required (object is
// known to be a heap object)
void DispatchMap(Register obj,
Register scratch,
Handle<Map> map,
Handle<Code> success,
SmiCheckType smi_check_type);
// Compare the object in a register to a value from the root list.
// Uses the ip register as scratch.
void CompareRoot(Register obj, Heap::RootListIndex index);
// Load and check the instance type of an object for being a string.
@ -641,11 +685,11 @@ class MacroAssembler: public Assembler {
DwVfpRegister double_scratch,
Label *not_int32);
// Truncates a double using a specific rounding mode.
// Clears the z flag (ne condition) if an overflow occurs.
// If exact_conversion is true, the z flag is also cleared if the conversion
// was inexact, ie. if the double value could not be converted exactly
// to a 32bit integer.
// Truncates a double using a specific rounding mode.
// Clears the z flag (ne condition) if an overflow occurs.
// If exact_conversion is true, the z flag is also cleared if the conversion
// was inexact, ie. if the double value could not be converted exactly
// to a 32bit integer.
void EmitVFPTruncate(VFPRoundingMode rounding_mode,
SwVfpRegister result,
DwVfpRegister double_input,
@ -654,6 +698,27 @@ class MacroAssembler: public Assembler {
CheckForInexactConversion check
= kDontCheckForInexactConversion);
// Helper for EmitECMATruncate.
// This will truncate a floating-point value outside of the singed 32bit
// integer range to a 32bit signed integer.
// Expects the double value loaded in input_high and input_low.
// Exits with the answer in 'result'.
// Note that this code does not work for values in the 32bit range!
void EmitOutOfInt32RangeTruncate(Register result,
Register input_high,
Register input_low,
Register scratch);
// Performs a truncating conversion of a floating point number as used by
// the JS bitwise operations. See ECMA-262 9.5: ToInt32.
// Exits with 'result' holding the answer and all other registers clobbered.
void EmitECMATruncate(Register result,
DwVfpRegister double_input,
SwVfpRegister single_scratch,
Register scratch,
Register scratch2,
Register scratch3);
// Count leading zeros in a 32 bit word. On ARM5 and later it uses the clz
// instruction. On pre-ARM5 hardware this routine gives the wrong answer
// for 0 (31 instead of 32). Source and scratch can be the same in which case
@ -669,6 +734,11 @@ class MacroAssembler: public Assembler {
// Call a code stub.
void CallStub(CodeStub* stub, Condition cond = al);
// Call a code stub and return the code object called. Try to generate
// the code if necessary. Do not perform a GC but instead return a retry
// after GC failure.
MUST_USE_RESULT MaybeObject* TryCallStub(CodeStub* stub, Condition cond = al);
// Call a code stub.
void TailCallStub(CodeStub* stub, Condition cond = al);
@ -679,7 +749,7 @@ class MacroAssembler: public Assembler {
Condition cond = al);
// Call a runtime routine.
void CallRuntime(Runtime::Function* f, int num_arguments);
void CallRuntime(const Runtime::Function* f, int num_arguments);
void CallRuntimeSaveDoubles(Runtime::FunctionId id);
// Convenience function: Same as above, but takes the fid instead.
@ -707,15 +777,32 @@ class MacroAssembler: public Assembler {
int num_arguments,
int result_size);
int CalculateStackPassedWords(int num_reg_arguments,
int num_double_arguments);
// Before calling a C-function from generated code, align arguments on stack.
// After aligning the frame, non-register arguments must be stored in
// sp[0], sp[4], etc., not pushed. The argument count assumes all arguments
// are word sized.
// are word sized. If double arguments are used, this function assumes that
// all double arguments are stored before core registers; otherwise the
// correct alignment of the double values is not guaranteed.
// Some compilers/platforms require the stack to be aligned when calling
// C++ code.
// Needs a scratch register to do some arithmetic. This register will be
// trashed.
void PrepareCallCFunction(int num_arguments, Register scratch);
void PrepareCallCFunction(int num_reg_arguments,
int num_double_registers,
Register scratch);
void PrepareCallCFunction(int num_reg_arguments,
Register scratch);
// There are two ways of passing double arguments on ARM, depending on
// whether soft or hard floating point ABI is used. These functions
// abstract parameter passing for the three different ways we call
// C functions from generated code.
void SetCallCDoubleArguments(DoubleRegister dreg);
void SetCallCDoubleArguments(DoubleRegister dreg1, DoubleRegister dreg2);
void SetCallCDoubleArguments(DoubleRegister dreg, Register reg);
// Calls a C function and cleans up the space for arguments allocated
// by PrepareCallCFunction. The called function is not allowed to trigger a
@ -723,7 +810,13 @@ class MacroAssembler: public Assembler {
// return address (unless this is somehow accounted for by the called
// function).
void CallCFunction(ExternalReference function, int num_arguments);
void CallCFunction(Register function, int num_arguments);
void CallCFunction(Register function, Register scratch, int num_arguments);
void CallCFunction(ExternalReference function,
int num_reg_arguments,
int num_double_arguments);
void CallCFunction(Register function, Register scratch,
int num_reg_arguments,
int num_double_arguments);
void GetCFunctionDoubleResult(const DoubleRegister dst);
@ -742,8 +835,8 @@ class MacroAssembler: public Assembler {
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
PostCallGenerator* post_call_generator = NULL);
InvokeFlag flag,
const CallWrapper& call_wrapper = NullCallWrapper());
// Store the code object for the given builtin in the target register and
// setup the function in r1.
@ -752,7 +845,10 @@ class MacroAssembler: public Assembler {
// Store the function for the given builtin in the target register.
void GetBuiltinFunction(Register target, Builtins::JavaScript id);
Handle<Object> CodeObject() { return code_object_; }
Handle<Object> CodeObject() {
ASSERT(!code_object_.is_null());
return code_object_;
}
// ---------------------------------------------------------------------------
@ -787,6 +883,15 @@ class MacroAssembler: public Assembler {
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
#if USE_EABI_HARDFLOAT
return true;
#else
return false;
#endif
}
// ---------------------------------------------------------------------------
// Number utilities
@ -797,6 +902,16 @@ class MacroAssembler: public Assembler {
void JumpIfNotPowerOfTwoOrZero(Register reg,
Register scratch,
Label* not_power_of_two_or_zero);
// Check whether the value of reg is a power of two and not zero.
// Control falls through if it is, with scratch containing the mask
// value (reg - 1).
// Otherwise control jumps to the 'zero_and_neg' label if the value of reg is
// zero or negative, or jumps to the 'not_power_of_two' label if the value is
// strictly positive but not a power of two.
void JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
Register scratch,
Label* zero_and_neg,
Label* not_power_of_two);
// ---------------------------------------------------------------------------
// Smi utilities
@ -904,9 +1019,27 @@ class MacroAssembler: public Assembler {
Register result);
void ClampUint8(Register output_reg, Register input_reg);
void ClampDoubleToUint8(Register result_reg,
DoubleRegister input_reg,
DoubleRegister temp_double_reg);
void LoadInstanceDescriptors(Register map, Register descriptors);
private:
void CallCFunctionHelper(Register function,
ExternalReference function_reference,
Register scratch,
int num_reg_arguments,
int num_double_arguments);
void Jump(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
void Call(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
int CallSize(intptr_t target, RelocInfo::Mode rmode, Condition cond = al);
void Call(intptr_t target,
RelocInfo::Mode rmode,
Condition cond = al);
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
@ -915,7 +1048,8 @@ class MacroAssembler: public Assembler {
Register code_reg,
Label* done,
InvokeFlag flag,
PostCallGenerator* post_call_generator = NULL);
const CallWrapper& call_wrapper,
CallKind call_kind);
// Activation support.
void EnterFrame(StackFrame::Type type);
@ -976,17 +1110,6 @@ class CodePatcher {
#endif // ENABLE_DEBUGGER_SUPPORT
// Helper class for generating code or data associated with the code
// right after a call instruction. As an example this can be used to
// generate safepoint data after calls for crankshaft.
class PostCallGenerator {
public:
PostCallGenerator() { }
virtual ~PostCallGenerator() { }
virtual void Generate() = 0;
};
// -----------------------------------------------------------------------------
// Static helper functions.

45
deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -60,6 +60,7 @@ namespace internal {
* Each call to a public method should retain this convention.
*
* The stack will have the following structure:
* - fp[52] Isolate* isolate (Address of the current isolate)
* - fp[48] direct_call (if 1, direct call from JavaScript code,
* if 0, call through the runtime system).
* - fp[44] stack_area_base (High end of the memory area to use as
@ -115,7 +116,7 @@ namespace internal {
RegExpMacroAssemblerARM::RegExpMacroAssemblerARM(
Mode mode,
int registers_to_save)
: masm_(new MacroAssembler(NULL, kRegExpCodeSize)),
: masm_(new MacroAssembler(Isolate::Current(), NULL, kRegExpCodeSize)),
mode_(mode),
num_registers_(registers_to_save),
num_saved_registers_(registers_to_save),
@ -346,7 +347,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ sub(current_input_offset(), r2, end_of_input_address());
} else {
ASSERT(mode_ == UC16);
int argument_count = 3;
int argument_count = 4;
__ PrepareCallCFunction(argument_count, r2);
// r0 - offset of start of capture
@ -357,6 +358,7 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
// r0: Address byte_offset1 - Address captured substring's start.
// r1: Address byte_offset2 - Address of current character position.
// r2: size_t byte_length - length of capture in bytes(!)
// r3: Isolate* isolate
// Address of start of capture.
__ add(r0, r0, Operand(end_of_input_address()));
@ -366,9 +368,11 @@ void RegExpMacroAssemblerARM::CheckNotBackReferenceIgnoreCase(
__ mov(r4, Operand(r1));
// Address of current input position.
__ add(r1, current_input_offset(), Operand(end_of_input_address()));
// Isolate.
__ mov(r3, Operand(ExternalReference::isolate_address()));
ExternalReference function =
ExternalReference::re_case_insensitive_compare_uc16();
ExternalReference::re_case_insensitive_compare_uc16(masm_->isolate());
__ CallCFunction(function, argument_count);
// Check if function returned non-zero for success or zero for failure.
@ -601,7 +605,7 @@ void RegExpMacroAssemblerARM::Fail() {
}
Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Handle<HeapObject> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
// Finalize code - write the entry point code now we know how many
// registers we need.
@ -626,7 +630,7 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label stack_ok;
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit();
ExternalReference::address_of_stack_limit(masm_->isolate());
__ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ sub(r0, sp, r0, SetCC);
@ -777,12 +781,13 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
Label grow_failed;
// Call GrowStack(backtrack_stackpointer(), &stack_base)
static const int num_arguments = 2;
static const int num_arguments = 3;
__ PrepareCallCFunction(num_arguments, r0);
__ mov(r0, backtrack_stackpointer());
__ add(r1, frame_pointer(), Operand(kStackHighEnd));
__ mov(r2, Operand(ExternalReference::isolate_address()));
ExternalReference grow_stack =
ExternalReference::re_grow_stack();
ExternalReference::re_grow_stack(masm_->isolate());
__ CallCFunction(grow_stack, num_arguments);
// If return NULL, we have failed to grow the stack, and
// must exit with a stack-overflow exception.
@ -804,11 +809,11 @@ Handle<Object> RegExpMacroAssemblerARM::GetCode(Handle<String> source) {
CodeDesc code_desc;
masm_->GetCode(&code_desc);
Handle<Code> code = Factory::NewCode(code_desc,
Handle<Code> code = FACTORY->NewCode(code_desc,
Code::ComputeFlags(Code::REGEXP),
masm_->CodeObject());
PROFILE(RegExpCodeCreateEvent(*code, *source));
return Handle<Object>::cast(code);
PROFILE(Isolate::Current(), RegExpCodeCreateEvent(*code, *source));
return Handle<HeapObject>::cast(code);
}
@ -894,13 +899,12 @@ void RegExpMacroAssemblerARM::PushBacktrack(Label* label) {
constant_offset - offset_of_pc_register_read;
ASSERT(pc_offset_of_constant < 0);
if (is_valid_memory_offset(pc_offset_of_constant)) {
masm_->BlockConstPoolBefore(masm_->pc_offset() + Assembler::kInstrSize);
Assembler::BlockConstPoolScope block_const_pool(masm_);
__ ldr(r0, MemOperand(pc, pc_offset_of_constant));
} else {
// Not a 12-bit offset, so it needs to be loaded from the constant
// pool.
masm_->BlockConstPoolBefore(
masm_->pc_offset() + 2 * Assembler::kInstrSize);
Assembler::BlockConstPoolScope block_const_pool(masm_);
__ mov(r0, Operand(pc_offset_of_constant + Assembler::kInstrSize));
__ ldr(r0, MemOperand(pc, r0));
}
@ -998,7 +1002,7 @@ void RegExpMacroAssemblerARM::CallCheckStackGuardState(Register scratch) {
__ mov(r1, Operand(masm_->CodeObject()));
// r0 becomes return address pointer.
ExternalReference stack_guard_check =
ExternalReference::re_check_stack_guard_state();
ExternalReference::re_check_stack_guard_state(masm_->isolate());
CallCFunctionUsingStub(stack_guard_check, num_arguments);
}
@ -1013,8 +1017,10 @@ static T& frame_entry(Address re_frame, int frame_offset) {
int RegExpMacroAssemblerARM::CheckStackGuardState(Address* return_address,
Code* re_code,
Address re_frame) {
if (StackGuard::IsStackOverflow()) {
Top::StackOverflow();
Isolate* isolate = frame_entry<Isolate*>(re_frame, kIsolate);
ASSERT(isolate == Isolate::Current());
if (isolate->stack_guard()->IsStackOverflow()) {
isolate->StackOverflow();
return EXCEPTION;
}
@ -1158,7 +1164,7 @@ void RegExpMacroAssemblerARM::Pop(Register target) {
void RegExpMacroAssemblerARM::CheckPreemption() {
// Check for preemption.
ExternalReference stack_limit =
ExternalReference::address_of_stack_limit();
ExternalReference::address_of_stack_limit(masm_->isolate());
__ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ cmp(sp, r0);
@ -1168,7 +1174,7 @@ void RegExpMacroAssemblerARM::CheckPreemption() {
void RegExpMacroAssemblerARM::CheckStackLimit() {
ExternalReference stack_limit =
ExternalReference::address_of_regexp_stack_limit();
ExternalReference::address_of_regexp_stack_limit(masm_->isolate());
__ mov(r0, Operand(stack_limit));
__ ldr(r0, MemOperand(r0));
__ cmp(backtrack_stackpointer(), Operand(r0));
@ -1178,8 +1184,7 @@ void RegExpMacroAssemblerARM::CheckStackLimit() {
void RegExpMacroAssemblerARM::EmitBacktrackConstantPool() {
__ CheckConstPool(false, false);
__ BlockConstPoolBefore(
masm_->pc_offset() + kBacktrackConstantPoolSize * Assembler::kInstrSize);
Assembler::BlockConstPoolScope block_const_pool(masm_);
backtrack_constant_pool_offset_ = masm_->pc_offset();
for (int i = 0; i < kBacktrackConstantPoolSize; i++) {
__ emit(0);

3
deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -82,7 +82,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
virtual bool CheckSpecialCharacterClass(uc16 type,
Label* on_no_match);
virtual void Fail();
virtual Handle<Object> GetCode(Handle<String> source);
virtual Handle<HeapObject> GetCode(Handle<String> source);
virtual void GoTo(Label* label);
virtual void IfRegisterGE(int reg, int comparand, Label* if_ge);
virtual void IfRegisterLT(int reg, int comparand, Label* if_lt);
@ -127,6 +127,7 @@ class RegExpMacroAssemblerARM: public NativeRegExpMacroAssembler {
static const int kRegisterOutput = kSecondaryReturnAddress + kPointerSize;
static const int kStackHighEnd = kRegisterOutput + kPointerSize;
static const int kDirectCall = kStackHighEnd + kPointerSize;
static const int kIsolate = kDirectCall + kPointerSize;
// Below the frame pointer.
// Register parameters stored by setup code.

555
deps/v8/src/arm/simulator-arm.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -49,12 +49,12 @@ namespace internal {
// Windows C Run-Time Library does not provide vsscanf.
#define SScanF sscanf // NOLINT
// The Debugger class is used by the simulator while debugging simulated ARM
// The ArmDebugger class is used by the simulator while debugging simulated ARM
// code.
class Debugger {
class ArmDebugger {
public:
explicit Debugger(Simulator* sim);
~Debugger();
explicit ArmDebugger(Simulator* sim);
~ArmDebugger();
void Stop(Instruction* instr);
void Debug();
@ -67,6 +67,7 @@ class Debugger {
Simulator* sim_;
int32_t GetRegisterValue(int regnum);
double GetRegisterPairDoubleValue(int regnum);
double GetVFPDoubleRegisterValue(int regnum);
bool GetValue(const char* desc, int32_t* value);
bool GetVFPSingleValue(const char* desc, float* value);
@ -83,12 +84,12 @@ class Debugger {
};
Debugger::Debugger(Simulator* sim) {
ArmDebugger::ArmDebugger(Simulator* sim) {
sim_ = sim;
}
Debugger::~Debugger() {
ArmDebugger::~ArmDebugger() {
}
@ -105,7 +106,7 @@ static void InitializeCoverage() {
}
void Debugger::Stop(Instruction* instr) {
void ArmDebugger::Stop(Instruction* instr) {
// Get the stop code.
uint32_t code = instr->SvcValue() & kStopCodeMask;
// Retrieve the encoded address, which comes just after this stop.
@ -137,7 +138,7 @@ static void InitializeCoverage() {
}
void Debugger::Stop(Instruction* instr) {
void ArmDebugger::Stop(Instruction* instr) {
// Get the stop code.
uint32_t code = instr->SvcValue() & kStopCodeMask;
// Retrieve the encoded address, which comes just after this stop.
@ -159,7 +160,7 @@ void Debugger::Stop(Instruction* instr) {
#endif
int32_t Debugger::GetRegisterValue(int regnum) {
int32_t ArmDebugger::GetRegisterValue(int regnum) {
if (regnum == kPCRegister) {
return sim_->get_pc();
} else {
@ -168,12 +169,17 @@ int32_t Debugger::GetRegisterValue(int regnum) {
}
double Debugger::GetVFPDoubleRegisterValue(int regnum) {
double ArmDebugger::GetRegisterPairDoubleValue(int regnum) {
return sim_->get_double_from_register_pair(regnum);
}
double ArmDebugger::GetVFPDoubleRegisterValue(int regnum) {
return sim_->get_double_from_d_register(regnum);
}
bool Debugger::GetValue(const char* desc, int32_t* value) {
bool ArmDebugger::GetValue(const char* desc, int32_t* value) {
int regnum = Registers::Number(desc);
if (regnum != kNoRegister) {
*value = GetRegisterValue(regnum);
@ -189,7 +195,7 @@ bool Debugger::GetValue(const char* desc, int32_t* value) {
}
bool Debugger::GetVFPSingleValue(const char* desc, float* value) {
bool ArmDebugger::GetVFPSingleValue(const char* desc, float* value) {
bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double);
if (regnum != kNoRegister && !is_double) {
@ -200,7 +206,7 @@ bool Debugger::GetVFPSingleValue(const char* desc, float* value) {
}
bool Debugger::GetVFPDoubleValue(const char* desc, double* value) {
bool ArmDebugger::GetVFPDoubleValue(const char* desc, double* value) {
bool is_double;
int regnum = VFPRegisters::Number(desc, &is_double);
if (regnum != kNoRegister && is_double) {
@ -211,7 +217,7 @@ bool Debugger::GetVFPDoubleValue(const char* desc, double* value) {
}
bool Debugger::SetBreakpoint(Instruction* breakpc) {
bool ArmDebugger::SetBreakpoint(Instruction* breakpc) {
// Check if a breakpoint can be set. If not return without any side-effects.
if (sim_->break_pc_ != NULL) {
return false;
@ -226,7 +232,7 @@ bool Debugger::SetBreakpoint(Instruction* breakpc) {
}
bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
bool ArmDebugger::DeleteBreakpoint(Instruction* breakpc) {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
@ -237,21 +243,21 @@ bool Debugger::DeleteBreakpoint(Instruction* breakpc) {
}
void Debugger::UndoBreakpoints() {
void ArmDebugger::UndoBreakpoints() {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(sim_->break_instr_);
}
}
void Debugger::RedoBreakpoints() {
void ArmDebugger::RedoBreakpoints() {
if (sim_->break_pc_ != NULL) {
sim_->break_pc_->SetInstructionBits(kBreakpointInstr);
}
}
void Debugger::Debug() {
void ArmDebugger::Debug() {
intptr_t last_pc = -1;
bool done = false;
@ -305,27 +311,45 @@ void Debugger::Debug() {
// Leave the debugger shell.
done = true;
} else if ((strcmp(cmd, "p") == 0) || (strcmp(cmd, "print") == 0)) {
if (argc == 2) {
if (argc == 2 || (argc == 3 && strcmp(arg2, "fp") == 0)) {
int32_t value;
float svalue;
double dvalue;
if (strcmp(arg1, "all") == 0) {
for (int i = 0; i < kNumRegisters; i++) {
value = GetRegisterValue(i);
PrintF("%3s: 0x%08x %10d\n", Registers::Name(i), value, value);
PrintF("%3s: 0x%08x %10d", Registers::Name(i), value, value);
if ((argc == 3 && strcmp(arg2, "fp") == 0) &&
i < 8 &&
(i % 2) == 0) {
dvalue = GetRegisterPairDoubleValue(i);
PrintF(" (%f)\n", dvalue);
} else {
PrintF("\n");
}
}
for (int i = 0; i < kNumVFPDoubleRegisters; i++) {
dvalue = GetVFPDoubleRegisterValue(i);
PrintF("%3s: %f\n",
VFPRegisters::Name(i, true), dvalue);
uint64_t as_words = BitCast<uint64_t>(dvalue);
PrintF("%3s: %f 0x%08x %08x\n",
VFPRegisters::Name(i, true),
dvalue,
static_cast<uint32_t>(as_words >> 32),
static_cast<uint32_t>(as_words & 0xffffffff));
}
} else {
if (GetValue(arg1, &value)) {
PrintF("%s: 0x%08x %d \n", arg1, value, value);
} else if (GetVFPSingleValue(arg1, &svalue)) {
PrintF("%s: %f \n", arg1, svalue);
uint32_t as_word = BitCast<uint32_t>(svalue);
PrintF("%s: %f 0x%08x\n", arg1, svalue, as_word);
} else if (GetVFPDoubleValue(arg1, &dvalue)) {
PrintF("%s: %f \n", arg1, dvalue);
uint64_t as_words = BitCast<uint64_t>(dvalue);
PrintF("%s: %f 0x%08x %08x\n",
arg1,
dvalue,
static_cast<uint32_t>(as_words >> 32),
static_cast<uint32_t>(as_words & 0xffffffff));
} else {
PrintF("%s unrecognized\n", arg1);
}
@ -380,11 +404,24 @@ void Debugger::Debug() {
end = cur + words;
while (cur < end) {
PrintF(" 0x%08x: 0x%08x %10d\n",
PrintF(" 0x%08x: 0x%08x %10d",
reinterpret_cast<intptr_t>(cur), *cur, *cur);
HeapObject* obj = reinterpret_cast<HeapObject*>(*cur);
int value = *cur;
Heap* current_heap = v8::internal::Isolate::Current()->heap();
if (current_heap->Contains(obj) || ((value & 1) == 0)) {
PrintF(" (");
if ((value & 1) == 0) {
PrintF("smi %d", value / 2);
} else {
obj->ShortPrint();
}
PrintF(")");
}
PrintF("\n");
cur++;
}
} else if (strcmp(cmd, "disasm") == 0) {
} else if (strcmp(cmd, "disasm") == 0 || strcmp(cmd, "di") == 0) {
disasm::NameConverter converter;
disasm::Disassembler dasm(converter);
// use a reasonably large buffer
@ -398,11 +435,23 @@ void Debugger::Debug() {
cur = reinterpret_cast<byte*>(sim_->get_pc());
end = cur + (10 * Instruction::kInstrSize);
} else if (argc == 2) {
int32_t value;
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
// Disassemble <arg1> instructions.
end = cur + (value * Instruction::kInstrSize);
int regnum = Registers::Number(arg1);
if (regnum != kNoRegister || strncmp(arg1, "0x", 2) == 0) {
// The argument is an address or a register name.
int32_t value;
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(value);
// Disassemble 10 instructions at <arg1>.
end = cur + (10 * Instruction::kInstrSize);
}
} else {
// The argument is the number of instructions.
int32_t value;
if (GetValue(arg1, &value)) {
cur = reinterpret_cast<byte*>(sim_->get_pc());
// Disassemble <arg1> instructions.
end = cur + (value * Instruction::kInstrSize);
}
}
} else {
int32_t value1;
@ -515,6 +564,7 @@ void Debugger::Debug() {
PrintF("print <register>\n");
PrintF(" print register content (alias 'p')\n");
PrintF(" use register name 'all' to print all registers\n");
PrintF(" add argument 'fp' to print register pair double values\n");
PrintF("printobject <register>\n");
PrintF(" print an object from a register (alias 'po')\n");
PrintF("flags\n");
@ -524,8 +574,10 @@ void Debugger::Debug() {
PrintF("mem <address> [<words>]\n");
PrintF(" dump memory content, default dump 10 words)\n");
PrintF("disasm [<instructions>]\n");
PrintF("disasm [[<address>] <instructions>]\n");
PrintF(" disassemble code, default is 10 instructions from pc\n");
PrintF("disasm [<address/register>]\n");
PrintF("disasm [[<address/register>] <instructions>]\n");
PrintF(" disassemble code, default is 10 instructions\n");
PrintF(" from pc (alias 'di')\n");
PrintF("gdb\n");
PrintF(" enter gdb\n");
PrintF("break <address>\n");
@ -539,11 +591,11 @@ void Debugger::Debug() {
PrintF(" Stops are debug instructions inserted by\n");
PrintF(" the Assembler::stop() function.\n");
PrintF(" When hitting a stop, the Simulator will\n");
PrintF(" stop and and give control to the Debugger.\n");
PrintF(" stop and and give control to the ArmDebugger.\n");
PrintF(" The first %d stop codes are watched:\n",
Simulator::kNumOfWatchedStops);
PrintF(" - They can be enabled / disabled: the Simulator\n");
PrintF(" will / won't stop when hitting them.\n");
PrintF(" will / won't stop when hitting them.\n");
PrintF(" - The Simulator keeps track of how many times they \n");
PrintF(" are met. (See the info command.) Going over a\n");
PrintF(" disabled stop still increases its counter. \n");
@ -593,7 +645,9 @@ static bool AllOnOnePage(uintptr_t start, int size) {
}
void Simulator::FlushICache(void* start_addr, size_t size) {
void Simulator::FlushICache(v8::internal::HashMap* i_cache,
void* start_addr,
size_t size) {
intptr_t start = reinterpret_cast<intptr_t>(start_addr);
int intra_line = (start & CachePage::kLineMask);
start -= intra_line;
@ -602,22 +656,22 @@ void Simulator::FlushICache(void* start_addr, size_t size) {
int offset = (start & CachePage::kPageMask);
while (!AllOnOnePage(start, size - 1)) {
int bytes_to_flush = CachePage::kPageSize - offset;
FlushOnePage(start, bytes_to_flush);
FlushOnePage(i_cache, start, bytes_to_flush);
start += bytes_to_flush;
size -= bytes_to_flush;
ASSERT_EQ(0, start & CachePage::kPageMask);
offset = 0;
}
if (size != 0) {
FlushOnePage(start, size);
FlushOnePage(i_cache, start, size);
}
}
CachePage* Simulator::GetCachePage(void* page) {
v8::internal::HashMap::Entry* entry = i_cache_->Lookup(page,
ICacheHash(page),
true);
CachePage* Simulator::GetCachePage(v8::internal::HashMap* i_cache, void* page) {
v8::internal::HashMap::Entry* entry = i_cache->Lookup(page,
ICacheHash(page),
true);
if (entry->value == NULL) {
CachePage* new_page = new CachePage();
entry->value = new_page;
@ -627,25 +681,28 @@ CachePage* Simulator::GetCachePage(void* page) {
// Flush from start up to and not including start + size.
void Simulator::FlushOnePage(intptr_t start, int size) {
void Simulator::FlushOnePage(v8::internal::HashMap* i_cache,
intptr_t start,
int size) {
ASSERT(size <= CachePage::kPageSize);
ASSERT(AllOnOnePage(start, size - 1));
ASSERT((start & CachePage::kLineMask) == 0);
ASSERT((size & CachePage::kLineMask) == 0);
void* page = reinterpret_cast<void*>(start & (~CachePage::kPageMask));
int offset = (start & CachePage::kPageMask);
CachePage* cache_page = GetCachePage(page);
CachePage* cache_page = GetCachePage(i_cache, page);
char* valid_bytemap = cache_page->ValidityByte(offset);
memset(valid_bytemap, CachePage::LINE_INVALID, size >> CachePage::kLineShift);
}
void Simulator::CheckICache(Instruction* instr) {
void Simulator::CheckICache(v8::internal::HashMap* i_cache,
Instruction* instr) {
intptr_t address = reinterpret_cast<intptr_t>(instr);
void* page = reinterpret_cast<void*>(address & (~CachePage::kPageMask));
void* line = reinterpret_cast<void*>(address & (~CachePage::kLineMask));
int offset = (address & CachePage::kPageMask);
CachePage* cache_page = GetCachePage(page);
CachePage* cache_page = GetCachePage(i_cache, page);
char* cache_valid_byte = cache_page->ValidityByte(offset);
bool cache_hit = (*cache_valid_byte == CachePage::LINE_VALID);
char* cached_line = cache_page->CachedData(offset & ~CachePage::kLineMask);
@ -662,29 +719,21 @@ void Simulator::CheckICache(Instruction* instr) {
}
// Create one simulator per thread and keep it in thread local storage.
static v8::internal::Thread::LocalStorageKey simulator_key;
bool Simulator::initialized_ = false;
void Simulator::Initialize() {
if (initialized_) return;
simulator_key = v8::internal::Thread::CreateThreadLocalKey();
initialized_ = true;
::v8::internal::ExternalReference::set_redirector(&RedirectExternalReference);
void Simulator::Initialize(Isolate* isolate) {
if (isolate->simulator_initialized()) return;
isolate->set_simulator_initialized(true);
::v8::internal::ExternalReference::set_redirector(isolate,
&RedirectExternalReference);
}
v8::internal::HashMap* Simulator::i_cache_ = NULL;
Simulator::Simulator() {
Simulator::Simulator(Isolate* isolate) : isolate_(isolate) {
i_cache_ = isolate_->simulator_i_cache();
if (i_cache_ == NULL) {
i_cache_ = new v8::internal::HashMap(&ICacheMatch);
isolate_->set_simulator_i_cache(i_cache_);
}
Initialize();
Initialize(isolate);
// Setup simulator support first. Some of this information is needed to
// setup the architecture state.
size_t stack_size = 1 * 1024*1024; // allocate 1MB for stack
@ -748,11 +797,14 @@ class Redirection {
: external_function_(external_function),
swi_instruction_(al | (0xf*B24) | kCallRtRedirected),
type_(type),
next_(list_) {
Simulator::current()->
FlushICache(reinterpret_cast<void*>(&swi_instruction_),
Instruction::kInstrSize);
list_ = this;
next_(NULL) {
Isolate* isolate = Isolate::Current();
next_ = isolate->simulator_redirection();
Simulator::current(isolate)->
FlushICache(isolate->simulator_i_cache(),
reinterpret_cast<void*>(&swi_instruction_),
Instruction::kInstrSize);
isolate->set_simulator_redirection(this);
}
void* address_of_swi_instruction() {
@ -764,8 +816,9 @@ class Redirection {
static Redirection* Get(void* external_function,
ExternalReference::Type type) {
Redirection* current;
for (current = list_; current != NULL; current = current->next_) {
Isolate* isolate = Isolate::Current();
Redirection* current = isolate->simulator_redirection();
for (; current != NULL; current = current->next_) {
if (current->external_function_ == external_function) return current;
}
return new Redirection(external_function, type);
@ -783,13 +836,9 @@ class Redirection {
uint32_t swi_instruction_;
ExternalReference::Type type_;
Redirection* next_;
static Redirection* list_;
};
Redirection* Redirection::list_ = NULL;
void* Simulator::RedirectExternalReference(void* external_function,
ExternalReference::Type type) {
Redirection* redirection = Redirection::Get(external_function, type);
@ -798,14 +847,16 @@ void* Simulator::RedirectExternalReference(void* external_function,
// Get the active Simulator for the current thread.
Simulator* Simulator::current() {
Initialize();
Simulator* sim = reinterpret_cast<Simulator*>(
v8::internal::Thread::GetThreadLocal(simulator_key));
Simulator* Simulator::current(Isolate* isolate) {
v8::internal::Isolate::PerIsolateThreadData* isolate_data =
isolate->FindOrAllocatePerThreadDataForThisThread();
ASSERT(isolate_data != NULL);
Simulator* sim = isolate_data->simulator();
if (sim == NULL) {
// TODO(146): delete the simulator object when a thread goes away.
sim = new Simulator();
v8::internal::Thread::SetThreadLocal(simulator_key, sim);
// TODO(146): delete the simulator object when a thread/isolate goes away.
sim = new Simulator(isolate);
isolate_data->set_simulator(sim);
}
return sim;
}
@ -834,6 +885,19 @@ int32_t Simulator::get_register(int reg) const {
}
double Simulator::get_double_from_register_pair(int reg) {
ASSERT((reg >= 0) && (reg < num_registers) && ((reg % 2) == 0));
double dm_val = 0.0;
// Read the bits from the unsigned integer register_[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(vfp_register[0])];
memcpy(buffer, &registers_[reg], 2 * sizeof(registers_[0]));
memcpy(&dm_val, buffer, 2 * sizeof(registers_[0]));
return(dm_val);
}
void Simulator::set_dw_register(int dreg, const int* dbl) {
ASSERT((dreg >= 0) && (dreg < num_d_registers));
registers_[dreg] = dbl[0];
@ -899,12 +963,7 @@ void Simulator::set_d_register_from_double(int dreg, const double& dbl) {
// 2*sreg and 2*sreg+1.
char buffer[2 * sizeof(vfp_register[0])];
memcpy(buffer, &dbl, 2 * sizeof(vfp_register[0]));
#ifndef BIG_ENDIAN_FLOATING_POINT
memcpy(&vfp_register[dreg * 2], buffer, 2 * sizeof(vfp_register[0]));
#else
memcpy(&vfp_register[dreg * 2], &buffer[4], sizeof(vfp_register[0]));
memcpy(&vfp_register[dreg * 2 + 1], &buffer[0], sizeof(vfp_register[0]));
#endif
}
@ -941,37 +1000,80 @@ double Simulator::get_double_from_d_register(int dreg) {
// Read the bits from the unsigned integer vfp_register[] array
// into the double precision floating point value and return it.
char buffer[2 * sizeof(vfp_register[0])];
#ifdef BIG_ENDIAN_FLOATING_POINT
memcpy(&buffer[0], &vfp_register[2 * dreg + 1], sizeof(vfp_register[0]));
memcpy(&buffer[4], &vfp_register[2 * dreg], sizeof(vfp_register[0]));
#else
memcpy(buffer, &vfp_register[2 * dreg], 2 * sizeof(vfp_register[0]));
#endif
memcpy(&dm_val, buffer, 2 * sizeof(vfp_register[0]));
return(dm_val);
}
// For use in calls that take two double values, constructed from r0, r1, r2
// and r3.
// For use in calls that take two double values, constructed either
// from r0-r3 or d0 and d1.
void Simulator::GetFpArgs(double* x, double* y) {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[2 * sizeof(registers_[0])];
// Registers 0 and 1 -> x.
memcpy(buffer, registers_, sizeof(buffer));
memcpy(x, buffer, sizeof(buffer));
// Registers 2 and 3 -> y.
memcpy(buffer, registers_ + 2, sizeof(buffer));
memcpy(y, buffer, sizeof(buffer));
if (use_eabi_hardfloat()) {
*x = vfp_register[0];
*y = vfp_register[1];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
memcpy(buffer, registers_, sizeof(*x));
memcpy(x, buffer, sizeof(*x));
// Registers 2 and 3 -> y.
memcpy(buffer, registers_ + 2, sizeof(*y));
memcpy(y, buffer, sizeof(*y));
}
}
// For use in calls that take one double value, constructed either
// from r0 and r1 or d0.
void Simulator::GetFpArgs(double* x) {
if (use_eabi_hardfloat()) {
*x = vfp_register[0];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
memcpy(buffer, registers_, sizeof(*x));
memcpy(x, buffer, sizeof(*x));
}
}
// For use in calls that take one double value constructed either
// from r0 and r1 or d0 and one integer value.
void Simulator::GetFpArgs(double* x, int32_t* y) {
if (use_eabi_hardfloat()) {
*x = vfp_register[0];
*y = registers_[1];
} else {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[sizeof(*x)];
// Registers 0 and 1 -> x.
memcpy(buffer, registers_, sizeof(*x));
memcpy(x, buffer, sizeof(*x));
// Register 2 -> y.
memcpy(buffer, registers_ + 2, sizeof(*y));
memcpy(y, buffer, sizeof(*y));
}
}
// The return value is either in r0/r1 or d0.
void Simulator::SetFpResult(const double& result) {
char buffer[2 * sizeof(registers_[0])];
memcpy(buffer, &result, sizeof(buffer));
// result -> registers 0 and 1.
memcpy(registers_, buffer, sizeof(buffer));
if (use_eabi_hardfloat()) {
char buffer[2 * sizeof(vfp_register[0])];
memcpy(buffer, &result, sizeof(buffer));
// Copy result to d0.
memcpy(vfp_register, buffer, sizeof(buffer));
} else {
char buffer[2 * sizeof(registers_[0])];
memcpy(buffer, &result, sizeof(buffer));
// Copy result to r0 and r1.
memcpy(registers_, buffer, sizeof(buffer));
}
}
@ -1225,12 +1327,13 @@ void Simulator::SetVFlag(bool val) {
// Calculate C flag value for additions.
bool Simulator::CarryFrom(int32_t left, int32_t right) {
bool Simulator::CarryFrom(int32_t left, int32_t right, int32_t carry) {
uint32_t uleft = static_cast<uint32_t>(left);
uint32_t uright = static_cast<uint32_t>(right);
uint32_t urest = 0xffffffffU - uleft;
return (uright > urest);
return (uright > urest) ||
(carry && (((uright + 1) > urest) || (uright > (urest - 1))));
}
@ -1465,36 +1568,34 @@ static int count_bits(int bit_vector) {
}
// Addressing Mode 4 - Load and Store Multiple
void Simulator::HandleRList(Instruction* instr, bool load) {
void Simulator::ProcessPUW(Instruction* instr,
int num_regs,
int reg_size,
intptr_t* start_address,
intptr_t* end_address) {
int rn = instr->RnValue();
int32_t rn_val = get_register(rn);
int rlist = instr->RlistValue();
int num_regs = count_bits(rlist);
intptr_t start_address = 0;
intptr_t end_address = 0;
switch (instr->PUField()) {
case da_x: {
UNIMPLEMENTED();
break;
}
case ia_x: {
start_address = rn_val;
end_address = rn_val + (num_regs * 4) - 4;
rn_val = rn_val + (num_regs * 4);
*start_address = rn_val;
*end_address = rn_val + (num_regs * reg_size) - reg_size;
rn_val = rn_val + (num_regs * reg_size);
break;
}
case db_x: {
start_address = rn_val - (num_regs * 4);
end_address = rn_val - 4;
rn_val = start_address;
*start_address = rn_val - (num_regs * reg_size);
*end_address = rn_val - reg_size;
rn_val = *start_address;
break;
}
case ib_x: {
start_address = rn_val + 4;
end_address = rn_val + (num_regs * 4);
rn_val = end_address;
*start_address = rn_val + reg_size;
*end_address = rn_val + (num_regs * reg_size);
rn_val = *end_address;
break;
}
default: {
@ -1505,6 +1606,17 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
if (instr->HasW()) {
set_register(rn, rn_val);
}
}
// Addressing Mode 4 - Load and Store Multiple
void Simulator::HandleRList(Instruction* instr, bool load) {
int rlist = instr->RlistValue();
int num_regs = count_bits(rlist);
intptr_t start_address = 0;
intptr_t end_address = 0;
ProcessPUW(instr, num_regs, kPointerSize, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
int reg = 0;
while (rlist != 0) {
@ -1523,6 +1635,57 @@ void Simulator::HandleRList(Instruction* instr, bool load) {
}
// Addressing Mode 6 - Load and Store Multiple Coprocessor registers.
void Simulator::HandleVList(Instruction* instr) {
VFPRegPrecision precision =
(instr->SzValue() == 0) ? kSinglePrecision : kDoublePrecision;
int operand_size = (precision == kSinglePrecision) ? 4 : 8;
bool load = (instr->VLValue() == 0x1);
int vd;
int num_regs;
vd = instr->VFPDRegValue(precision);
if (precision == kSinglePrecision) {
num_regs = instr->Immed8Value();
} else {
num_regs = instr->Immed8Value() / 2;
}
intptr_t start_address = 0;
intptr_t end_address = 0;
ProcessPUW(instr, num_regs, operand_size, &start_address, &end_address);
intptr_t* address = reinterpret_cast<intptr_t*>(start_address);
for (int reg = vd; reg < vd + num_regs; reg++) {
if (precision == kSinglePrecision) {
if (load) {
set_s_register_from_sinteger(
reg, ReadW(reinterpret_cast<int32_t>(address), instr));
} else {
WriteW(reinterpret_cast<int32_t>(address),
get_sinteger_from_s_register(reg), instr);
}
address += 1;
} else {
if (load) {
set_s_register_from_sinteger(
2 * reg, ReadW(reinterpret_cast<int32_t>(address), instr));
set_s_register_from_sinteger(
2 * reg + 1, ReadW(reinterpret_cast<int32_t>(address + 1), instr));
} else {
WriteW(reinterpret_cast<int32_t>(address),
get_sinteger_from_s_register(2 * reg), instr);
WriteW(reinterpret_cast<int32_t>(address + 1),
get_sinteger_from_s_register(2 * reg + 1), instr);
}
address += 2;
}
}
ASSERT(reinterpret_cast<intptr_t>(address) - operand_size == end_address);
}
// Calls into the V8 runtime are based on this very simple interface.
// Note: To be able to return two values from some calls the code in runtime.cc
// uses the ObjectPair which is essentially two 32-bit values stuffed into a
@ -1533,7 +1696,8 @@ typedef int64_t (*SimulatorRuntimeCall)(int32_t arg0,
int32_t arg1,
int32_t arg2,
int32_t arg3,
int32_t arg4);
int32_t arg4,
int32_t arg5);
typedef double (*SimulatorRuntimeFPCall)(int32_t arg0,
int32_t arg1,
int32_t arg2,
@ -1564,28 +1728,94 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
int32_t arg2 = get_register(r2);
int32_t arg3 = get_register(r3);
int32_t* stack_pointer = reinterpret_cast<int32_t*>(get_register(sp));
int32_t arg4 = *stack_pointer;
int32_t arg4 = stack_pointer[0];
int32_t arg5 = stack_pointer[1];
bool fp_call =
(redirection->type() == ExternalReference::BUILTIN_FP_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_COMPARE_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_CALL) ||
(redirection->type() == ExternalReference::BUILTIN_FP_INT_CALL);
if (use_eabi_hardfloat()) {
// With the hard floating point calling convention, double
// arguments are passed in VFP registers. Fetch the arguments
// from there and call the builtin using soft floating point
// convention.
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
arg0 = vfp_register[0];
arg1 = vfp_register[1];
arg2 = vfp_register[2];
arg3 = vfp_register[3];
break;
case ExternalReference::BUILTIN_FP_CALL:
arg0 = vfp_register[0];
arg1 = vfp_register[1];
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
arg0 = vfp_register[0];
arg1 = vfp_register[1];
arg2 = get_register(0);
break;
default:
break;
}
}
// This is dodgy but it works because the C entry stubs are never moved.
// See comment in codegen-arm.cc and bug 1242173.
int32_t saved_lr = get_register(lr);
intptr_t external =
reinterpret_cast<intptr_t>(redirection->external_function());
if (redirection->type() == ExternalReference::FP_RETURN_CALL) {
SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external);
if (fp_call) {
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
double x, y;
GetFpArgs(&x, &y);
PrintF("Call to host function at %p with args %f, %f",
FUNCTION_ADDR(target), x, y);
SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external);
double dval0, dval1;
int32_t ival;
switch (redirection->type()) {
case ExternalReference::BUILTIN_FP_FP_CALL:
case ExternalReference::BUILTIN_COMPARE_CALL:
GetFpArgs(&dval0, &dval1);
PrintF("Call to host function at %p with args %f, %f",
FUNCTION_ADDR(target), dval0, dval1);
break;
case ExternalReference::BUILTIN_FP_CALL:
GetFpArgs(&dval0);
PrintF("Call to host function at %p with arg %f",
FUNCTION_ADDR(target), dval0);
break;
case ExternalReference::BUILTIN_FP_INT_CALL:
GetFpArgs(&dval0, &ival);
PrintF("Call to host function at %p with args %f, %d",
FUNCTION_ADDR(target), dval0, ival);
break;
default:
UNREACHABLE();
break;
}
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
PrintF("\n");
}
CHECK(stack_aligned);
double result = target(arg0, arg1, arg2, arg3);
SetFpResult(result);
if (redirection->type() != ExternalReference::BUILTIN_COMPARE_CALL) {
SimulatorRuntimeFPCall target =
reinterpret_cast<SimulatorRuntimeFPCall>(external);
double result = target(arg0, arg1, arg2, arg3);
SetFpResult(result);
} else {
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(external);
int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32);
if (::v8::internal::FLAG_trace_sim) {
PrintF("Returned %08x\n", lo_res);
}
set_register(r0, lo_res);
set_register(r1, hi_res);
}
} else if (redirection->type() == ExternalReference::DIRECT_API_CALL) {
SimulatorRuntimeDirectApiCall target =
reinterpret_cast<SimulatorRuntimeDirectApiCall>(external);
@ -1627,20 +1857,22 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
reinterpret_cast<SimulatorRuntimeCall>(external);
if (::v8::internal::FLAG_trace_sim || !stack_aligned) {
PrintF(
"Call to host function at %p args %08x, %08x, %08x, %08x, %0xc",
"Call to host function at %p"
"args %08x, %08x, %08x, %08x, %08x, %08x",
FUNCTION_ADDR(target),
arg0,
arg1,
arg2,
arg3,
arg4);
arg4,
arg5);
if (!stack_aligned) {
PrintF(" with unaligned stack %08x\n", get_register(sp));
}
PrintF("\n");
}
CHECK(stack_aligned);
int64_t result = target(arg0, arg1, arg2, arg3, arg4);
int64_t result = target(arg0, arg1, arg2, arg3, arg4, arg5);
int32_t lo_res = static_cast<int32_t>(result);
int32_t hi_res = static_cast<int32_t>(result >> 32);
if (::v8::internal::FLAG_trace_sim) {
@ -1654,7 +1886,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
break;
}
case kBreakpoint: {
Debugger dbg(this);
ArmDebugger dbg(this);
dbg.Debug();
break;
}
@ -1668,7 +1900,7 @@ void Simulator::SoftwareInterrupt(Instruction* instr) {
// Stop if it is enabled, otherwise go on jumping over the stop
// and the message address.
if (isEnabledStop(code)) {
Debugger dbg(this);
ArmDebugger dbg(this);
dbg.Stop(instr);
} else {
set_pc(get_pc() + 2 * Instruction::kInstrSize);
@ -1976,7 +2208,7 @@ void Simulator::DecodeType01(Instruction* instr) {
break;
}
case BKPT: {
Debugger dbg(this);
ArmDebugger dbg(this);
PrintF("Simulator hit BKPT.\n");
dbg.Debug();
break;
@ -2088,8 +2320,15 @@ void Simulator::DecodeType01(Instruction* instr) {
}
case ADC: {
Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
Format(instr, "adc'cond's 'rd, 'rn, 'imm");
// Format(instr, "adc'cond's 'rd, 'rn, 'shift_rm");
// Format(instr, "adc'cond's 'rd, 'rn, 'imm");
alu_out = rn_val + shifter_operand + GetCarry();
set_register(rd, alu_out);
if (instr->HasS()) {
SetNZFlags(alu_out);
SetCFlag(CarryFrom(rn_val, shifter_operand, GetCarry()));
SetVFlag(OverflowFrom(alu_out, rn_val, shifter_operand, true));
}
break;
}
@ -2467,6 +2706,8 @@ void Simulator::DecodeType7(Instruction* instr) {
// vmov :Rt = Sn
// vcvt: Dd = Sm
// vcvt: Sd = Dm
// Dd = vabs(Dm)
// Dd = vneg(Dm)
// Dd = vadd(Dn, Dm)
// Dd = vsub(Dn, Dm)
// Dd = vmul(Dn, Dm)
@ -2502,6 +2743,11 @@ void Simulator::DecodeTypeVFP(Instruction* instr) {
double dm_value = get_double_from_d_register(vm);
double dd_value = fabs(dm_value);
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc2Value() == 0x1) && (instr->Opc3Value() == 0x1)) {
// vneg
double dm_value = get_double_from_d_register(vm);
double dd_value = -dm_value;
set_d_register_from_double(vd, dd_value);
} else if ((instr->Opc2Value() == 0x7) && (instr->Opc3Value() == 0x3)) {
DecodeVCVTBetweenDoubleAndSingle(instr);
} else if ((instr->Opc2Value() == 0x8) && (instr->Opc3Value() & 0x1)) {
@ -2895,9 +3141,17 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
}
break;
}
case 0x4:
case 0x5:
case 0x6:
case 0x7:
case 0x9:
case 0xB:
// Load/store multiple single from memory: vldm/vstm.
HandleVList(instr);
break;
default:
UNIMPLEMENTED(); // Not used by V8.
break;
}
} else if (instr->CoprocessorValue() == 0xB) {
switch (instr->OpcodeValue()) {
@ -2944,9 +3198,14 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
}
break;
}
case 0x4:
case 0x5:
case 0x9:
// Load/store multiple double from memory: vldm/vstm.
HandleVList(instr);
break;
default:
UNIMPLEMENTED(); // Not used by V8.
break;
}
} else {
UNIMPLEMENTED(); // Not used by V8.
@ -2957,7 +3216,7 @@ void Simulator::DecodeType6CoprocessorIns(Instruction* instr) {
// Executes the current instruction.
void Simulator::InstructionDecode(Instruction* instr) {
if (v8::internal::FLAG_check_icache) {
CheckICache(instr);
CheckICache(isolate_->simulator_i_cache(), instr);
}
pc_modified_ = false;
if (::v8::internal::FLAG_trace_sim) {
@ -3040,7 +3299,7 @@ void Simulator::Execute() {
Instruction* instr = reinterpret_cast<Instruction*>(program_counter);
icount_++;
if (icount_ == ::v8::internal::FLAG_stop_sim_at) {
Debugger dbg(this);
ArmDebugger dbg(this);
dbg.Debug();
} else {
InstructionDecode(instr);

87
deps/v8/src/arm/simulator-arm.h

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -49,25 +49,28 @@ namespace internal {
(entry(p0, p1, p2, p3, p4))
typedef int (*arm_regexp_matcher)(String*, int, const byte*, const byte*,
void*, int*, Address, int);
void*, int*, Address, int, Isolate*);
// Call the generated regexp code directly. The code at the entry address
// should act as a function matching the type arm_regexp_matcher.
// The fifth argument is a dummy that reserves the space used for
// the return address added by the ExitFrame in native calls.
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
(FUNCTION_CAST<arm_regexp_matcher>(entry)(p0, p1, p2, p3, NULL, p4, p5, p6))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
(FUNCTION_CAST<arm_regexp_matcher>(entry)( \
p0, p1, p2, p3, NULL, p4, p5, p6, p7))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
(reinterpret_cast<TryCatch*>(try_catch_address))
reinterpret_cast<TryCatch*>(try_catch_address)
// The stack limit beyond which we will throw stack overflow errors in
// generated code. Because generated code on arm uses the C stack, we
// just use the C stack limit.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
USE(isolate);
return c_limit;
}
@ -123,7 +126,7 @@ class CachePage {
class Simulator {
public:
friend class Debugger;
friend class ArmDebugger;
enum Register {
no_reg = -1,
r0 = 0, r1, r2, r3, r4, r5, r6, r7,
@ -142,18 +145,19 @@ class Simulator {
num_d_registers = 16
};
Simulator();
explicit Simulator(Isolate* isolate);
~Simulator();
// The currently executing Simulator instance. Potentially there can be one
// for each native thread.
static Simulator* current();
static Simulator* current(v8::internal::Isolate* isolate);
// Accessors for register state. Reading the pc value adheres to the ARM
// architecture specification and is off by a 8 from the currently executing
// instruction.
void set_register(int reg, int32_t value);
int32_t get_register(int reg) const;
double get_double_from_register_pair(int reg);
void set_dw_register(int dreg, const int* dbl);
// Support for VFP.
@ -177,7 +181,7 @@ class Simulator {
void Execute();
// Call on program start.
static void Initialize();
static void Initialize(Isolate* isolate);
// V8 generally calls into generated JS code with 5 parameters and into
// generated RegExp code with 7 parameters. This is a convenience function,
@ -191,12 +195,22 @@ class Simulator {
uintptr_t PopAddress();
// ICache checking.
static void FlushICache(void* start, size_t size);
static void FlushICache(v8::internal::HashMap* i_cache, void* start,
size_t size);
// Returns true if pc register contains one of the 'special_values' defined
// below (bad_lr, end_sim_pc).
bool has_bad_pc() const;
// EABI variant for double arguments in use.
bool use_eabi_hardfloat() {
#if USE_EABI_HARDFLOAT
return true;
#else
return false;
#endif
}
private:
enum special_values {
// Known bad pc value to ensure that the simulator does not execute
@ -220,13 +234,17 @@ class Simulator {
void SetNZFlags(int32_t val);
void SetCFlag(bool val);
void SetVFlag(bool val);
bool CarryFrom(int32_t left, int32_t right);
bool CarryFrom(int32_t left, int32_t right, int32_t carry = 0);
bool BorrowFrom(int32_t left, int32_t right);
bool OverflowFrom(int32_t alu_out,
int32_t left,
int32_t right,
bool addition);
inline int GetCarry() {
return c_flag_ ? 1 : 0;
};
// Support for VFP.
void Compute_FPSCR_Flags(double val1, double val2);
void Copy_FPSCR_to_APSR();
@ -234,7 +252,13 @@ class Simulator {
// Helper functions to decode common "addressing" modes
int32_t GetShiftRm(Instruction* instr, bool* carry_out);
int32_t GetImm(Instruction* instr, bool* carry_out);
void ProcessPUW(Instruction* instr,
int num_regs,
int operand_size,
intptr_t* start_address,
intptr_t* end_address);
void HandleRList(Instruction* instr, bool load);
void HandleVList(Instruction* inst);
void SoftwareInterrupt(Instruction* instr);
// Stop helper functions.
@ -287,18 +311,20 @@ class Simulator {
void InstructionDecode(Instruction* instr);
// ICache.
static void CheckICache(Instruction* instr);
static void FlushOnePage(intptr_t start, int size);
static CachePage* GetCachePage(void* page);
static void CheckICache(v8::internal::HashMap* i_cache, Instruction* instr);
static void FlushOnePage(v8::internal::HashMap* i_cache, intptr_t start,
int size);
static CachePage* GetCachePage(v8::internal::HashMap* i_cache, void* page);
// Runtime call support.
static void* RedirectExternalReference(
void* external_function,
v8::internal::ExternalReference::Type type);
// For use in calls that take two double values, constructed from r0, r1, r2
// and r3.
// For use in calls that take double value arguments.
void GetFpArgs(double* x, double* y);
void GetFpArgs(double* x);
void GetFpArgs(double* x, int32_t* y);
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
@ -333,15 +359,16 @@ class Simulator {
char* stack_;
bool pc_modified_;
int icount_;
static bool initialized_;
// Icache simulation
static v8::internal::HashMap* i_cache_;
v8::internal::HashMap* i_cache_;
// Registered breakpoints.
Instruction* break_pc_;
Instr break_instr_;
v8::internal::Isolate* isolate_;
// A stop is watched if its code is less than kNumOfWatchedStops.
// Only watched stops support enabling/disabling and the counter feature.
static const uint32_t kNumOfWatchedStops = 256;
@ -364,15 +391,16 @@ class Simulator {
// When running with the simulator transition into simulated execution at this
// point.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
reinterpret_cast<Object*>(Simulator::current()->Call( \
reinterpret_cast<Object*>(Simulator::current(Isolate::Current())->Call( \
FUNCTION_ADDR(entry), 5, p0, p1, p2, p3, p4))
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6) \
Simulator::current()->Call(entry, 8, p0, p1, p2, p3, NULL, p4, p5, p6)
#define CALL_GENERATED_REGEXP_CODE(entry, p0, p1, p2, p3, p4, p5, p6, p7) \
Simulator::current(Isolate::Current())->Call( \
entry, 9, p0, p1, p2, p3, NULL, p4, p5, p6, p7)
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == \
NULL ? NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
#define TRY_CATCH_FROM_ADDRESS(try_catch_address) \
try_catch_address == NULL ? \
NULL : *(reinterpret_cast<TryCatch**>(try_catch_address))
// The simulator has its own stack. Thus it has a different stack limit from
@ -382,17 +410,18 @@ class Simulator {
// trouble down the line.
class SimulatorStack : public v8::internal::AllStatic {
public:
static inline uintptr_t JsLimitFromCLimit(uintptr_t c_limit) {
return Simulator::current()->StackLimit();
static inline uintptr_t JsLimitFromCLimit(v8::internal::Isolate* isolate,
uintptr_t c_limit) {
return Simulator::current(isolate)->StackLimit();
}
static inline uintptr_t RegisterCTryCatch(uintptr_t try_catch_address) {
Simulator* sim = Simulator::current();
Simulator* sim = Simulator::current(Isolate::Current());
return sim->PushAddress(try_catch_address);
}
static inline void UnregisterCTryCatch() {
Simulator::current()->PopAddress();
Simulator::current(Isolate::Current())->PopAddress();
}
};

1558
deps/v8/src/arm/stub-cache-arm.cc

File diff suppressed because it is too large

843
deps/v8/src/arm/virtual-frame-arm.cc

@ -1,843 +0,0 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include "v8.h"
#if defined(V8_TARGET_ARCH_ARM)
#include "codegen-inl.h"
#include "register-allocator-inl.h"
#include "scopes.h"
#include "virtual-frame-inl.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm())
void VirtualFrame::PopToR1R0() {
// Shuffle things around so the top of stack is in r0 and r1.
MergeTOSTo(R0_R1_TOS);
// Pop the two registers off the stack so they are detached from the frame.
LowerHeight(2);
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::PopToR1() {
// Shuffle things around so the top of stack is only in r1.
MergeTOSTo(R1_TOS);
// Pop the register off the stack so it is detached from the frame.
LowerHeight(1);
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::PopToR0() {
// Shuffle things around so the top of stack only in r0.
MergeTOSTo(R0_TOS);
// Pop the register off the stack so it is detached from the frame.
LowerHeight(1);
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::MergeTo(const VirtualFrame* expected, Condition cond) {
if (Equals(expected)) return;
ASSERT((expected->tos_known_smi_map_ & tos_known_smi_map_) ==
expected->tos_known_smi_map_);
ASSERT(expected->IsCompatibleWith(this));
MergeTOSTo(expected->top_of_stack_state_, cond);
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
}
void VirtualFrame::MergeTo(VirtualFrame* expected, Condition cond) {
if (Equals(expected)) return;
tos_known_smi_map_ &= expected->tos_known_smi_map_;
MergeTOSTo(expected->top_of_stack_state_, cond);
ASSERT(register_allocation_map_ == expected->register_allocation_map_);
}
void VirtualFrame::MergeTOSTo(
VirtualFrame::TopOfStack expected_top_of_stack_state, Condition cond) {
#define CASE_NUMBER(a, b) ((a) * TOS_STATES + (b))
switch (CASE_NUMBER(top_of_stack_state_, expected_top_of_stack_state)) {
case CASE_NUMBER(NO_TOS_REGISTERS, NO_TOS_REGISTERS):
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R0_TOS):
__ pop(r0, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R1_TOS):
__ pop(r1, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R0_R1_TOS):
__ pop(r0, cond);
__ pop(r1, cond);
break;
case CASE_NUMBER(NO_TOS_REGISTERS, R1_R0_TOS):
__ pop(r1, cond);
__ pop(r0, cond);
break;
case CASE_NUMBER(R0_TOS, NO_TOS_REGISTERS):
__ push(r0, cond);
break;
case CASE_NUMBER(R0_TOS, R0_TOS):
break;
case CASE_NUMBER(R0_TOS, R1_TOS):
__ mov(r1, r0, LeaveCC, cond);
break;
case CASE_NUMBER(R0_TOS, R0_R1_TOS):
__ pop(r1, cond);
break;
case CASE_NUMBER(R0_TOS, R1_R0_TOS):
__ mov(r1, r0, LeaveCC, cond);
__ pop(r0, cond);
break;
case CASE_NUMBER(R1_TOS, NO_TOS_REGISTERS):
__ push(r1, cond);
break;
case CASE_NUMBER(R1_TOS, R0_TOS):
__ mov(r0, r1, LeaveCC, cond);
break;
case CASE_NUMBER(R1_TOS, R1_TOS):
break;
case CASE_NUMBER(R1_TOS, R0_R1_TOS):
__ mov(r0, r1, LeaveCC, cond);
__ pop(r1, cond);
break;
case CASE_NUMBER(R1_TOS, R1_R0_TOS):
__ pop(r0, cond);
break;
case CASE_NUMBER(R0_R1_TOS, NO_TOS_REGISTERS):
__ Push(r1, r0, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R0_TOS):
__ push(r1, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R1_TOS):
__ push(r1, cond);
__ mov(r1, r0, LeaveCC, cond);
break;
case CASE_NUMBER(R0_R1_TOS, R0_R1_TOS):
break;
case CASE_NUMBER(R0_R1_TOS, R1_R0_TOS):
__ Swap(r0, r1, ip, cond);
break;
case CASE_NUMBER(R1_R0_TOS, NO_TOS_REGISTERS):
__ Push(r0, r1, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R0_TOS):
__ push(r0, cond);
__ mov(r0, r1, LeaveCC, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R1_TOS):
__ push(r0, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R0_R1_TOS):
__ Swap(r0, r1, ip, cond);
break;
case CASE_NUMBER(R1_R0_TOS, R1_R0_TOS):
break;
default:
UNREACHABLE();
#undef CASE_NUMBER
}
// A conditional merge will be followed by a conditional branch and the
// fall-through code will have an unchanged virtual frame state. If the
// merge is unconditional ('al'ways) then it might be followed by a fall
// through. We need to update the virtual frame state to match the code we
// are falling into. The final case is an unconditional merge followed by an
// unconditional branch, in which case it doesn't matter what we do to the
// virtual frame state, because the virtual frame will be invalidated.
if (cond == al) {
top_of_stack_state_ = expected_top_of_stack_state;
}
}
void VirtualFrame::Enter() {
Comment cmnt(masm(), "[ Enter JS frame");
#ifdef DEBUG
// Verify that r1 contains a JS function. The following code relies
// on r2 being available for use.
if (FLAG_debug_code) {
Label map_check, done;
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &map_check);
__ stop("VirtualFrame::Enter - r1 is not a function (smi check).");
__ bind(&map_check);
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(eq, &done);
__ stop("VirtualFrame::Enter - r1 is not a function (map check).");
__ bind(&done);
}
#endif // DEBUG
// We are about to push four values to the frame.
Adjust(4);
__ stm(db_w, sp, r1.bit() | cp.bit() | fp.bit() | lr.bit());
// Adjust FP to point to saved FP.
__ add(fp, sp, Operand(2 * kPointerSize));
}
void VirtualFrame::Exit() {
Comment cmnt(masm(), "[ Exit JS frame");
// Record the location of the JS exit code for patching when setting
// break point.
__ RecordJSReturn();
// Drop the execution stack down to the frame pointer and restore the caller
// frame pointer and return address.
__ mov(sp, fp);
__ ldm(ia_w, sp, fp.bit() | lr.bit());
}
void VirtualFrame::AllocateStackSlots() {
int count = local_count();
if (count > 0) {
Comment cmnt(masm(), "[ Allocate space for locals");
Adjust(count);
// Initialize stack slots with 'undefined' value.
__ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
__ LoadRoot(r2, Heap::kStackLimitRootIndex);
if (count < kLocalVarBound) {
// For less locals the unrolled loop is more compact.
for (int i = 0; i < count; i++) {
__ push(ip);
}
} else {
// For more locals a loop in generated code is more compact.
Label alloc_locals_loop;
__ mov(r1, Operand(count));
__ bind(&alloc_locals_loop);
__ push(ip);
__ sub(r1, r1, Operand(1), SetCC);
__ b(ne, &alloc_locals_loop);
}
} else {
__ LoadRoot(r2, Heap::kStackLimitRootIndex);
}
// Check the stack for overflow or a break request.
masm()->cmp(sp, Operand(r2));
StackCheckStub stub;
// Call the stub if lower.
masm()->mov(ip,
Operand(reinterpret_cast<intptr_t>(stub.GetCode().location()),
RelocInfo::CODE_TARGET),
LeaveCC,
lo);
masm()->Call(ip, lo);
}
void VirtualFrame::PushReceiverSlotAddress() {
UNIMPLEMENTED();
}
void VirtualFrame::PushTryHandler(HandlerType type) {
// Grow the expression stack by handler size less one (the return
// address in lr is already counted by a call instruction).
Adjust(kHandlerSize - 1);
__ PushTryHandler(IN_JAVASCRIPT, type);
}
void VirtualFrame::CallJSFunction(int arg_count) {
// InvokeFunction requires function in r1.
PopToR1();
SpillAll();
// +1 for receiver.
Forget(arg_count + 1);
ASSERT(cgen()->HasValidEntryRegisters());
ParameterCount count(arg_count);
__ InvokeFunction(r1, count, CALL_FUNCTION);
// Restore the context.
__ ldr(cp, Context());
}
void VirtualFrame::CallRuntime(Runtime::Function* f, int arg_count) {
SpillAll();
Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(f, arg_count);
}
void VirtualFrame::CallRuntime(Runtime::FunctionId id, int arg_count) {
SpillAll();
Forget(arg_count);
ASSERT(cgen()->HasValidEntryRegisters());
__ CallRuntime(id, arg_count);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
void VirtualFrame::DebugBreak() {
ASSERT(cgen()->HasValidEntryRegisters());
__ DebugBreak();
}
#endif
void VirtualFrame::InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flags,
int arg_count) {
Forget(arg_count);
__ InvokeBuiltin(id, flags);
}
void VirtualFrame::CallLoadIC(Handle<String> name, RelocInfo::Mode mode) {
Handle<Code> ic(Builtins::builtin(Builtins::LoadIC_Initialize));
PopToR0();
SpillAll();
__ mov(r2, Operand(name));
CallCodeObject(ic, mode, 0);
}
void VirtualFrame::CallStoreIC(Handle<String> name,
bool is_contextual,
StrictModeFlag strict_mode) {
Handle<Code> ic(Builtins::builtin(
(strict_mode == kStrictMode) ? Builtins::StoreIC_Initialize_Strict
: Builtins::StoreIC_Initialize));
PopToR0();
RelocInfo::Mode mode;
if (is_contextual) {
SpillAll();
__ ldr(r1, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_INDEX)));
mode = RelocInfo::CODE_TARGET_CONTEXT;
} else {
EmitPop(r1);
SpillAll();
mode = RelocInfo::CODE_TARGET;
}
__ mov(r2, Operand(name));
CallCodeObject(ic, mode, 0);
}
void VirtualFrame::CallKeyedLoadIC() {
Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Initialize));
PopToR1R0();
SpillAll();
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
}
void VirtualFrame::CallKeyedStoreIC(StrictModeFlag strict_mode) {
Handle<Code> ic(Builtins::builtin(
(strict_mode == kStrictMode) ? Builtins::KeyedStoreIC_Initialize_Strict
: Builtins::KeyedStoreIC_Initialize));
PopToR1R0();
SpillAll();
EmitPop(r2);
CallCodeObject(ic, RelocInfo::CODE_TARGET, 0);
}
void VirtualFrame::CallCodeObject(Handle<Code> code,
RelocInfo::Mode rmode,
int dropped_args) {
switch (code->kind()) {
case Code::CALL_IC:
case Code::KEYED_CALL_IC:
case Code::FUNCTION:
break;
case Code::KEYED_LOAD_IC:
case Code::LOAD_IC:
case Code::KEYED_STORE_IC:
case Code::STORE_IC:
ASSERT(dropped_args == 0);
break;
case Code::BUILTIN:
ASSERT(*code == Builtins::builtin(Builtins::JSConstructCall));
break;
default:
UNREACHABLE();
break;
}
Forget(dropped_args);
ASSERT(cgen()->HasValidEntryRegisters());
__ Call(code, rmode);
}
// NO_TOS_REGISTERS, R0_TOS, R1_TOS, R1_R0_TOS, R0_R1_TOS.
const bool VirtualFrame::kR0InUse[TOS_STATES] =
{ false, true, false, true, true };
const bool VirtualFrame::kR1InUse[TOS_STATES] =
{ false, false, true, true, true };
const int VirtualFrame::kVirtualElements[TOS_STATES] =
{ 0, 1, 1, 2, 2 };
const Register VirtualFrame::kTopRegister[TOS_STATES] =
{ r0, r0, r1, r1, r0 };
const Register VirtualFrame::kBottomRegister[TOS_STATES] =
{ r0, r0, r1, r0, r1 };
const Register VirtualFrame::kAllocatedRegisters[
VirtualFrame::kNumberOfAllocatedRegisters] = { r2, r3, r4, r5, r6 };
// Popping is done by the transition implied by kStateAfterPop. Of course if
// there were no stack slots allocated to registers then the physical SP must
// be adjusted.
const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPop[TOS_STATES] =
{ NO_TOS_REGISTERS, NO_TOS_REGISTERS, NO_TOS_REGISTERS, R0_TOS, R1_TOS };
// Pushing is done by the transition implied by kStateAfterPush. Of course if
// the maximum number of registers was already allocated to the top of stack
// slots then one register must be physically pushed onto the stack.
const VirtualFrame::TopOfStack VirtualFrame::kStateAfterPush[TOS_STATES] =
{ R0_TOS, R1_R0_TOS, R0_R1_TOS, R0_R1_TOS, R1_R0_TOS };
bool VirtualFrame::SpilledScope::is_spilled_ = false;
void VirtualFrame::Drop(int count) {
ASSERT(count >= 0);
ASSERT(height() >= count);
// Discard elements from the virtual frame and free any registers.
int num_virtual_elements = kVirtualElements[top_of_stack_state_];
while (num_virtual_elements > 0) {
Pop();
num_virtual_elements--;
count--;
if (count == 0) return;
}
if (count == 0) return;
__ add(sp, sp, Operand(count * kPointerSize));
LowerHeight(count);
}
void VirtualFrame::Pop() {
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
__ add(sp, sp, Operand(kPointerSize));
} else {
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
}
LowerHeight(1);
}
void VirtualFrame::EmitPop(Register reg) {
ASSERT(!is_used(RegisterAllocator::ToNumber(reg)));
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
__ pop(reg);
} else {
__ mov(reg, kTopRegister[top_of_stack_state_]);
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
}
LowerHeight(1);
}
void VirtualFrame::SpillAllButCopyTOSToR0() {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
__ ldr(r0, MemOperand(sp, 0));
break;
case R0_TOS:
__ push(r0);
break;
case R1_TOS:
__ push(r1);
__ mov(r0, r1);
break;
case R0_R1_TOS:
__ Push(r1, r0);
break;
case R1_R0_TOS:
__ Push(r0, r1);
__ mov(r0, r1);
break;
default:
UNREACHABLE();
}
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::SpillAllButCopyTOSToR1() {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
__ ldr(r1, MemOperand(sp, 0));
break;
case R0_TOS:
__ push(r0);
__ mov(r1, r0);
break;
case R1_TOS:
__ push(r1);
break;
case R0_R1_TOS:
__ Push(r1, r0);
__ mov(r1, r0);
break;
case R1_R0_TOS:
__ Push(r0, r1);
break;
default:
UNREACHABLE();
}
top_of_stack_state_ = NO_TOS_REGISTERS;
}
void VirtualFrame::SpillAllButCopyTOSToR1R0() {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
__ ldr(r1, MemOperand(sp, 0));
__ ldr(r0, MemOperand(sp, kPointerSize));
break;
case R0_TOS:
__ push(r0);
__ mov(r1, r0);
__ ldr(r0, MemOperand(sp, kPointerSize));
break;
case R1_TOS:
__ push(r1);
__ ldr(r0, MemOperand(sp, kPointerSize));
break;
case R0_R1_TOS:
__ Push(r1, r0);
__ Swap(r0, r1, ip);
break;
case R1_R0_TOS:
__ Push(r0, r1);
break;
default:
UNREACHABLE();
}
top_of_stack_state_ = NO_TOS_REGISTERS;
}
Register VirtualFrame::Peek() {
AssertIsNotSpilled();
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
Register answer = kTopRegister[top_of_stack_state_];
__ pop(answer);
return answer;
} else {
return kTopRegister[top_of_stack_state_];
}
}
Register VirtualFrame::Peek2() {
AssertIsNotSpilled();
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
case R0_TOS:
case R0_R1_TOS:
MergeTOSTo(R0_R1_TOS);
return r1;
case R1_TOS:
case R1_R0_TOS:
MergeTOSTo(R1_R0_TOS);
return r0;
default:
UNREACHABLE();
return no_reg;
}
}
void VirtualFrame::Dup() {
if (SpilledScope::is_spilled()) {
__ ldr(ip, MemOperand(sp, 0));
__ push(ip);
} else {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
__ ldr(r0, MemOperand(sp, 0));
top_of_stack_state_ = R0_TOS;
break;
case R0_TOS:
__ mov(r1, r0);
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
case R1_TOS:
__ mov(r0, r1);
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
case R0_R1_TOS:
__ push(r1);
__ mov(r1, r0);
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
case R1_R0_TOS:
__ push(r0);
__ mov(r0, r1);
// r0 and r1 contains the same value. Prefer state with r0 holding TOS.
top_of_stack_state_ = R0_R1_TOS;
break;
default:
UNREACHABLE();
}
}
RaiseHeight(1, tos_known_smi_map_ & 1);
}
void VirtualFrame::Dup2() {
if (SpilledScope::is_spilled()) {
__ ldr(ip, MemOperand(sp, kPointerSize));
__ push(ip);
__ ldr(ip, MemOperand(sp, kPointerSize));
__ push(ip);
} else {
switch (top_of_stack_state_) {
case NO_TOS_REGISTERS:
__ ldr(r0, MemOperand(sp, 0));
__ ldr(r1, MemOperand(sp, kPointerSize));
top_of_stack_state_ = R0_R1_TOS;
break;
case R0_TOS:
__ push(r0);
__ ldr(r1, MemOperand(sp, kPointerSize));
top_of_stack_state_ = R0_R1_TOS;
break;
case R1_TOS:
__ push(r1);
__ ldr(r0, MemOperand(sp, kPointerSize));
top_of_stack_state_ = R1_R0_TOS;
break;
case R0_R1_TOS:
__ Push(r1, r0);
top_of_stack_state_ = R0_R1_TOS;
break;
case R1_R0_TOS:
__ Push(r0, r1);
top_of_stack_state_ = R1_R0_TOS;
break;
default:
UNREACHABLE();
}
}
RaiseHeight(2, tos_known_smi_map_ & 3);
}
Register VirtualFrame::PopToRegister(Register but_not_to_this_one) {
ASSERT(but_not_to_this_one.is(r0) ||
but_not_to_this_one.is(r1) ||
but_not_to_this_one.is(no_reg));
LowerHeight(1);
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
if (but_not_to_this_one.is(r0)) {
__ pop(r1);
return r1;
} else {
__ pop(r0);
return r0;
}
} else {
Register answer = kTopRegister[top_of_stack_state_];
ASSERT(!answer.is(but_not_to_this_one));
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
return answer;
}
}
void VirtualFrame::EnsureOneFreeTOSRegister() {
if (kVirtualElements[top_of_stack_state_] == kMaxTOSRegisters) {
__ push(kBottomRegister[top_of_stack_state_]);
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
top_of_stack_state_ = kStateAfterPop[top_of_stack_state_];
}
ASSERT(kVirtualElements[top_of_stack_state_] != kMaxTOSRegisters);
}
void VirtualFrame::EmitPush(Register reg, TypeInfo info) {
RaiseHeight(1, info.IsSmi() ? 1 : 0);
if (reg.is(cp)) {
// If we are pushing cp then we are about to make a call and things have to
// be pushed to the physical stack. There's nothing to be gained my moving
// to a TOS register and then pushing that, we might as well push to the
// physical stack immediately.
MergeTOSTo(NO_TOS_REGISTERS);
__ push(reg);
return;
}
if (SpilledScope::is_spilled()) {
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
__ push(reg);
return;
}
if (top_of_stack_state_ == NO_TOS_REGISTERS) {
if (reg.is(r0)) {
top_of_stack_state_ = R0_TOS;
return;
}
if (reg.is(r1)) {
top_of_stack_state_ = R1_TOS;
return;
}
}
EnsureOneFreeTOSRegister();
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
Register dest = kTopRegister[top_of_stack_state_];
__ Move(dest, reg);
}
void VirtualFrame::SetElementAt(Register reg, int this_far_down) {
if (this_far_down < kTOSKnownSmiMapSize) {
tos_known_smi_map_ &= ~(1 << this_far_down);
}
if (this_far_down == 0) {
Pop();
Register dest = GetTOSRegister();
if (dest.is(reg)) {
// We already popped one item off the top of the stack. If the only
// free register is the one we were asked to push then we have been
// asked to push a register that was already in use, which cannot
// happen. It therefore folows that there are two free TOS registers:
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
dest = dest.is(r0) ? r1 : r0;
}
__ mov(dest, reg);
EmitPush(dest);
} else if (this_far_down == 1) {
int virtual_elements = kVirtualElements[top_of_stack_state_];
if (virtual_elements < 2) {
__ str(reg, ElementAt(this_far_down));
} else {
ASSERT(virtual_elements == 2);
ASSERT(!reg.is(r0));
ASSERT(!reg.is(r1));
Register dest = kBottomRegister[top_of_stack_state_];
__ mov(dest, reg);
}
} else {
ASSERT(this_far_down >= 2);
ASSERT(kVirtualElements[top_of_stack_state_] <= 2);
__ str(reg, ElementAt(this_far_down));
}
}
Register VirtualFrame::GetTOSRegister() {
if (SpilledScope::is_spilled()) return r0;
EnsureOneFreeTOSRegister();
return kTopRegister[kStateAfterPush[top_of_stack_state_]];
}
void VirtualFrame::EmitPush(Operand operand, TypeInfo info) {
RaiseHeight(1, info.IsSmi() ? 1 : 0);
if (SpilledScope::is_spilled()) {
__ mov(r0, operand);
__ push(r0);
return;
}
EnsureOneFreeTOSRegister();
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
__ mov(kTopRegister[top_of_stack_state_], operand);
}
void VirtualFrame::EmitPush(MemOperand operand, TypeInfo info) {
RaiseHeight(1, info.IsSmi() ? 1 : 0);
if (SpilledScope::is_spilled()) {
__ ldr(r0, operand);
__ push(r0);
return;
}
EnsureOneFreeTOSRegister();
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
__ ldr(kTopRegister[top_of_stack_state_], operand);
}
void VirtualFrame::EmitPushRoot(Heap::RootListIndex index) {
RaiseHeight(1, 0);
if (SpilledScope::is_spilled()) {
__ LoadRoot(r0, index);
__ push(r0);
return;
}
EnsureOneFreeTOSRegister();
top_of_stack_state_ = kStateAfterPush[top_of_stack_state_];
__ LoadRoot(kTopRegister[top_of_stack_state_], index);
}
void VirtualFrame::EmitPushMultiple(int count, int src_regs) {
ASSERT(SpilledScope::is_spilled());
Adjust(count);
__ stm(db_w, sp, src_regs);
}
void VirtualFrame::SpillAll() {
switch (top_of_stack_state_) {
case R1_R0_TOS:
masm()->push(r0);
// Fall through.
case R1_TOS:
masm()->push(r1);
top_of_stack_state_ = NO_TOS_REGISTERS;
break;
case R0_R1_TOS:
masm()->push(r1);
// Fall through.
case R0_TOS:
masm()->push(r0);
top_of_stack_state_ = NO_TOS_REGISTERS;
// Fall through.
case NO_TOS_REGISTERS:
break;
default:
UNREACHABLE();
break;
}
ASSERT(register_allocation_map_ == 0); // Not yet implemented.
}
#undef __
} } // namespace v8::internal
#endif // V8_TARGET_ARCH_ARM

520
deps/v8/src/arm/virtual-frame-arm.h

@ -1,520 +0,0 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_ARM_VIRTUAL_FRAME_ARM_H_
#define V8_ARM_VIRTUAL_FRAME_ARM_H_
#include "register-allocator.h"
namespace v8 {
namespace internal {
// This dummy class is only used to create invalid virtual frames.
extern class InvalidVirtualFrameInitializer {}* kInvalidVirtualFrameInitializer;
// -------------------------------------------------------------------------
// Virtual frames
//
// The virtual frame is an abstraction of the physical stack frame. It
// encapsulates the parameters, frame-allocated locals, and the expression
// stack. It supports push/pop operations on the expression stack, as well
// as random access to the expression stack elements, locals, and
// parameters.
class VirtualFrame : public ZoneObject {
public:
class RegisterAllocationScope;
// A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code
// generator's current frame, and keeps it spilled.
class SpilledScope BASE_EMBEDDED {
public:
explicit SpilledScope(VirtualFrame* frame)
: old_is_spilled_(is_spilled_) {
if (frame != NULL) {
if (!is_spilled_) {
frame->SpillAll();
} else {
frame->AssertIsSpilled();
}
}
is_spilled_ = true;
}
~SpilledScope() {
is_spilled_ = old_is_spilled_;
}
static bool is_spilled() { return is_spilled_; }
private:
static bool is_spilled_;
int old_is_spilled_;
SpilledScope() { }
friend class RegisterAllocationScope;
};
class RegisterAllocationScope BASE_EMBEDDED {
public:
// A utility class to introduce a scope where the virtual frame
// is not spilled, ie. where register allocation occurs. Eventually
// when RegisterAllocationScope is ubiquitous it can be removed
// along with the (by then unused) SpilledScope class.
inline explicit RegisterAllocationScope(CodeGenerator* cgen);
inline ~RegisterAllocationScope();
private:
CodeGenerator* cgen_;
bool old_is_spilled_;
RegisterAllocationScope() { }
};
// An illegal index into the virtual frame.
static const int kIllegalIndex = -1;
// Construct an initial virtual frame on entry to a JS function.
inline VirtualFrame();
// Construct an invalid virtual frame, used by JumpTargets.
inline VirtualFrame(InvalidVirtualFrameInitializer* dummy);
// Construct a virtual frame as a clone of an existing one.
explicit inline VirtualFrame(VirtualFrame* original);
inline CodeGenerator* cgen() const;
inline MacroAssembler* masm();
// The number of elements on the virtual frame.
int element_count() const { return element_count_; }
// The height of the virtual expression stack.
inline int height() const;
bool is_used(int num) {
switch (num) {
case 0: { // r0.
return kR0InUse[top_of_stack_state_];
}
case 1: { // r1.
return kR1InUse[top_of_stack_state_];
}
case 2:
case 3:
case 4:
case 5:
case 6: { // r2 to r6.
ASSERT(num - kFirstAllocatedRegister < kNumberOfAllocatedRegisters);
ASSERT(num >= kFirstAllocatedRegister);
if ((register_allocation_map_ &
(1 << (num - kFirstAllocatedRegister))) == 0) {
return false;
} else {
return true;
}
}
default: {
ASSERT(num < kFirstAllocatedRegister ||
num >= kFirstAllocatedRegister + kNumberOfAllocatedRegisters);
return false;
}
}
}
// Add extra in-memory elements to the top of the frame to match an actual
// frame (eg, the frame after an exception handler is pushed). No code is
// emitted.
void Adjust(int count);
// Forget elements from the top of the frame to match an actual frame (eg,
// the frame after a runtime call). No code is emitted except to bring the
// frame to a spilled state.
void Forget(int count);
// Spill all values from the frame to memory.
void SpillAll();
void AssertIsSpilled() const {
ASSERT(top_of_stack_state_ == NO_TOS_REGISTERS);
ASSERT(register_allocation_map_ == 0);
}
void AssertIsNotSpilled() {
ASSERT(!SpilledScope::is_spilled());
}
// Spill all occurrences of a specific register from the frame.
void Spill(Register reg) {
UNIMPLEMENTED();
}
// Spill all occurrences of an arbitrary register if possible. Return the
// register spilled or no_reg if it was not possible to free any register
// (ie, they all have frame-external references). Unimplemented.
Register SpillAnyRegister();
// Make this virtual frame have a state identical to an expected virtual
// frame. As a side effect, code may be emitted to make this frame match
// the expected one.
void MergeTo(VirtualFrame* expected, Condition cond = al);
void MergeTo(const VirtualFrame* expected, Condition cond = al);
// Checks whether this frame can be branched to by the other frame.
bool IsCompatibleWith(const VirtualFrame* other) const {
return (tos_known_smi_map_ & (~other->tos_known_smi_map_)) == 0;
}
inline void ForgetTypeInfo() {
tos_known_smi_map_ = 0;
}
// Detach a frame from its code generator, perhaps temporarily. This
// tells the register allocator that it is free to use frame-internal
// registers. Used when the code generator's frame is switched from this
// one to NULL by an unconditional jump.
void DetachFromCodeGenerator() {
}
// (Re)attach a frame to its code generator. This informs the register
// allocator that the frame-internal register references are active again.
// Used when a code generator's frame is switched from NULL to this one by
// binding a label.
void AttachToCodeGenerator() {
}
// Emit code for the physical JS entry and exit frame sequences. After
// calling Enter, the virtual frame is ready for use; and after calling
// Exit it should not be used. Note that Enter does not allocate space in
// the physical frame for storing frame-allocated locals.
void Enter();
void Exit();
// Prepare for returning from the frame by elements in the virtual frame. This
// avoids generating unnecessary merge code when jumping to the
// shared return site. No spill code emitted. Value to return should be in r0.
inline void PrepareForReturn();
// Number of local variables after when we use a loop for allocating.
static const int kLocalVarBound = 5;
// Allocate and initialize the frame-allocated locals.
void AllocateStackSlots();
// The current top of the expression stack as an assembly operand.
MemOperand Top() {
AssertIsSpilled();
return MemOperand(sp, 0);
}
// An element of the expression stack as an assembly operand.
MemOperand ElementAt(int index) {
int adjusted_index = index - kVirtualElements[top_of_stack_state_];
ASSERT(adjusted_index >= 0);
return MemOperand(sp, adjusted_index * kPointerSize);
}
bool KnownSmiAt(int index) {
if (index >= kTOSKnownSmiMapSize) return false;
return (tos_known_smi_map_ & (1 << index)) != 0;
}
// A frame-allocated local as an assembly operand.
inline MemOperand LocalAt(int index);
// Push the address of the receiver slot on the frame.
void PushReceiverSlotAddress();
// The function frame slot.
MemOperand Function() { return MemOperand(fp, kFunctionOffset); }
// The context frame slot.
MemOperand Context() { return MemOperand(fp, kContextOffset); }
// A parameter as an assembly operand.
inline MemOperand ParameterAt(int index);
// The receiver frame slot.
inline MemOperand Receiver();
// Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type);
// Call stub given the number of arguments it expects on (and
// removes from) the stack.
inline void CallStub(CodeStub* stub, int arg_count);
// Call JS function from top of the stack with arguments
// taken from the stack.
void CallJSFunction(int arg_count);
// Call runtime given the number of arguments expected on (and
// removed from) the stack.
void CallRuntime(Runtime::Function* f, int arg_count);
void CallRuntime(Runtime::FunctionId id, int arg_count);
#ifdef ENABLE_DEBUGGER_SUPPORT
void DebugBreak();
#endif
// Invoke builtin given the number of arguments it expects on (and
// removes from) the stack.
void InvokeBuiltin(Builtins::JavaScript id,
InvokeJSFlags flag,
int arg_count);
// Call load IC. Receiver is on the stack and is consumed. Result is returned
// in r0.
void CallLoadIC(Handle<String> name, RelocInfo::Mode mode);
// Call store IC. If the load is contextual, value is found on top of the
// frame. If not, value and receiver are on the frame. Both are consumed.
// Result is returned in r0.
void CallStoreIC(Handle<String> name, bool is_contextual,
StrictModeFlag strict_mode);
// Call keyed load IC. Key and receiver are on the stack. Both are consumed.
// Result is returned in r0.
void CallKeyedLoadIC();
// Call keyed store IC. Value, key and receiver are on the stack. All three
// are consumed. Result is returned in r0.
void CallKeyedStoreIC(StrictModeFlag strict_mode);
// Call into an IC stub given the number of arguments it removes
// from the stack. Register arguments to the IC stub are implicit,
// and depend on the type of IC stub.
void CallCodeObject(Handle<Code> ic,
RelocInfo::Mode rmode,
int dropped_args);
// Drop a number of elements from the top of the expression stack. May
// emit code to affect the physical frame. Does not clobber any registers
// excepting possibly the stack pointer.
void Drop(int count);
// Drop one element.
void Drop() { Drop(1); }
// Pop an element from the top of the expression stack. Discards
// the result.
void Pop();
// Pop an element from the top of the expression stack. The register
// will be one normally used for the top of stack register allocation
// so you can't hold on to it if you push on the stack.
Register PopToRegister(Register but_not_to_this_one = no_reg);
// Look at the top of the stack. The register returned is aliased and
// must be copied to a scratch register before modification.
Register Peek();
// Look at the value beneath the top of the stack. The register returned is
// aliased and must be copied to a scratch register before modification.
Register Peek2();
// Duplicate the top of stack.
void Dup();
// Duplicate the two elements on top of stack.
void Dup2();
// Flushes all registers, but it puts a copy of the top-of-stack in r0.
void SpillAllButCopyTOSToR0();
// Flushes all registers, but it puts a copy of the top-of-stack in r1.
void SpillAllButCopyTOSToR1();
// Flushes all registers, but it puts a copy of the top-of-stack in r1
// and the next value on the stack in r0.
void SpillAllButCopyTOSToR1R0();
// Pop and save an element from the top of the expression stack and
// emit a corresponding pop instruction.
void EmitPop(Register reg);
// Takes the top two elements and puts them in r0 (top element) and r1
// (second element).
void PopToR1R0();
// Takes the top element and puts it in r1.
void PopToR1();
// Takes the top element and puts it in r0.
void PopToR0();
// Push an element on top of the expression stack and emit a
// corresponding push instruction.
void EmitPush(Register reg, TypeInfo type_info = TypeInfo::Unknown());
void EmitPush(Operand operand, TypeInfo type_info = TypeInfo::Unknown());
void EmitPush(MemOperand operand, TypeInfo type_info = TypeInfo::Unknown());
void EmitPushRoot(Heap::RootListIndex index);
// Overwrite the nth thing on the stack. If the nth position is in a
// register then this turns into a mov, otherwise an str. Afterwards
// you can still use the register even if it is a register that can be
// used for TOS (r0 or r1).
void SetElementAt(Register reg, int this_far_down);
// Get a register which is free and which must be immediately used to
// push on the top of the stack.
Register GetTOSRegister();
// Push multiple registers on the stack and the virtual frame
// Register are selected by setting bit in src_regs and
// are pushed in decreasing order: r15 .. r0.
void EmitPushMultiple(int count, int src_regs);
static Register scratch0() { return r7; }
static Register scratch1() { return r9; }
private:
static const int kLocal0Offset = JavaScriptFrameConstants::kLocal0Offset;
static const int kFunctionOffset = JavaScriptFrameConstants::kFunctionOffset;
static const int kContextOffset = StandardFrameConstants::kContextOffset;
static const int kHandlerSize = StackHandlerConstants::kSize / kPointerSize;
static const int kPreallocatedElements = 5 + 8; // 8 expression stack slots.
// 5 states for the top of stack, which can be in memory or in r0 and r1.
enum TopOfStack {
NO_TOS_REGISTERS,
R0_TOS,
R1_TOS,
R1_R0_TOS,
R0_R1_TOS,
TOS_STATES
};
static const int kMaxTOSRegisters = 2;
static const bool kR0InUse[TOS_STATES];
static const bool kR1InUse[TOS_STATES];
static const int kVirtualElements[TOS_STATES];
static const TopOfStack kStateAfterPop[TOS_STATES];
static const TopOfStack kStateAfterPush[TOS_STATES];
static const Register kTopRegister[TOS_STATES];
static const Register kBottomRegister[TOS_STATES];
// We allocate up to 5 locals in registers.
static const int kNumberOfAllocatedRegisters = 5;
// r2 to r6 are allocated to locals.
static const int kFirstAllocatedRegister = 2;
static const Register kAllocatedRegisters[kNumberOfAllocatedRegisters];
static Register AllocatedRegister(int r) {
ASSERT(r >= 0 && r < kNumberOfAllocatedRegisters);
return kAllocatedRegisters[r];
}
// The number of elements on the stack frame.
int element_count_;
TopOfStack top_of_stack_state_:3;
int register_allocation_map_:kNumberOfAllocatedRegisters;
static const int kTOSKnownSmiMapSize = 4;
unsigned tos_known_smi_map_:kTOSKnownSmiMapSize;
// The index of the element that is at the processor's stack pointer
// (the sp register). For now since everything is in memory it is given
// by the number of elements on the not-very-virtual stack frame.
int stack_pointer() { return element_count_ - 1; }
// The number of frame-allocated locals and parameters respectively.
inline int parameter_count() const;
inline int local_count() const;
// The index of the element that is at the processor's frame pointer
// (the fp register). The parameters, receiver, function, and context
// are below the frame pointer.
inline int frame_pointer() const;
// The index of the first parameter. The receiver lies below the first
// parameter.
int param0_index() { return 1; }
// The index of the context slot in the frame. It is immediately
// below the frame pointer.
inline int context_index();
// The index of the function slot in the frame. It is below the frame
// pointer and context slot.
inline int function_index();
// The index of the first local. Between the frame pointer and the
// locals lies the return address.
inline int local0_index() const;
// The index of the base of the expression stack.
inline int expression_base_index() const;
// Convert a frame index into a frame pointer relative offset into the
// actual stack.
inline int fp_relative(int index);
// Spill all elements in registers. Spill the top spilled_args elements
// on the frame. Sync all other frame elements.
// Then drop dropped_args elements from the virtual frame, to match
// the effect of an upcoming call that will drop them from the stack.
void PrepareForCall(int spilled_args, int dropped_args);
// If all top-of-stack registers are in use then the lowest one is pushed
// onto the physical stack and made free.
void EnsureOneFreeTOSRegister();
// Emit instructions to get the top of stack state from where we are to where
// we want to be.
void MergeTOSTo(TopOfStack expected_state, Condition cond = al);
inline bool Equals(const VirtualFrame* other);
inline void LowerHeight(int count) {
element_count_ -= count;
if (count >= kTOSKnownSmiMapSize) {
tos_known_smi_map_ = 0;
} else {
tos_known_smi_map_ >>= count;
}
}
inline void RaiseHeight(int count, unsigned known_smi_map = 0) {
ASSERT(count >= 32 || known_smi_map < (1u << count));
element_count_ += count;
if (count >= kTOSKnownSmiMapSize) {
tos_known_smi_map_ = known_smi_map;
} else {
tos_known_smi_map_ = ((tos_known_smi_map_ << count) | known_smi_map);
}
}
friend class JumpTarget;
};
} } // namespace v8::internal
#endif // V8_ARM_VIRTUAL_FRAME_ARM_H_

189
deps/v8/src/array.js

@ -33,7 +33,7 @@
// Global list of arrays visited during toString, toLocaleString and
// join invocations.
var visited_arrays = new $Array();
var visited_arrays = new InternalArray();
// Gets a sorted array of array keys. Useful for operations on sparse
@ -67,13 +67,32 @@ function GetSortedArrayKeys(array, intervals) {
}
function SparseJoinWithSeparator(array, len, convert, separator) {
var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
var totalLength = 0;
var elements = new InternalArray(keys.length * 2);
var previousKey = -1;
for (var i = 0; i < keys.length; i++) {
var key = keys[i];
if (key != previousKey) { // keys may contain duplicates.
var e = array[key];
if (!IS_STRING(e)) e = convert(e);
elements[i * 2] = key;
elements[i * 2 + 1] = e;
previousKey = key;
}
}
return %SparseJoinWithSeparator(elements, len, separator);
}
// Optimized for sparse arrays if separator is ''.
function SparseJoin(array, len, convert) {
var keys = GetSortedArrayKeys(array, %GetArrayKeys(array, len));
var last_key = -1;
var keys_length = keys.length;
var elements = new $Array(keys_length);
var elements = new InternalArray(keys_length);
var elements_length = 0;
for (var i = 0; i < keys_length; i++) {
@ -110,8 +129,12 @@ function Join(array, length, separator, convert) {
// Attempt to convert the elements.
try {
if (UseSparseVariant(array, length, is_array) && (separator.length == 0)) {
return SparseJoin(array, length, convert);
if (UseSparseVariant(array, length, is_array)) {
if (separator.length == 0) {
return SparseJoin(array, length, convert);
} else {
return SparseJoinWithSeparator(array, length, convert, separator);
}
}
// Fast case for one-element arrays.
@ -122,17 +145,15 @@ function Join(array, length, separator, convert) {
}
// Construct an array for the elements.
var elements = new $Array(length);
var elements = new InternalArray(length);
// We pull the empty separator check outside the loop for speed!
if (separator.length == 0) {
var elements_length = 0;
for (var i = 0; i < length; i++) {
var e = array[i];
if (!IS_UNDEFINED(e)) {
if (!IS_STRING(e)) e = convert(e);
elements[elements_length++] = e;
}
if (!IS_STRING(e)) e = convert(e);
elements[elements_length++] = e;
}
elements.length = elements_length;
var result = %_FastAsciiArrayJoin(elements, '');
@ -140,7 +161,7 @@ function Join(array, length, separator, convert) {
return %StringBuilderConcat(elements, elements_length, '');
}
// Non-empty separator case.
// If the first element is a number then use the heuristic that the
// If the first element is a number then use the heuristic that the
// remaining elements are also likely to be numbers.
if (!IS_NUMBER(array[0])) {
for (var i = 0; i < length; i++) {
@ -148,18 +169,19 @@ function Join(array, length, separator, convert) {
if (!IS_STRING(e)) e = convert(e);
elements[i] = e;
}
} else {
} else {
for (var i = 0; i < length; i++) {
var e = array[i];
if (IS_NUMBER(e)) elements[i] = %_NumberToString(e);
else {
if (!IS_STRING(e)) e = convert(e);
if (IS_NUMBER(e)) {
e = %_NumberToString(e);
} else if (!IS_STRING(e)) {
e = convert(e);
}
elements[i] = e;
}
}
}
}
var result = %_FastAsciiArrayJoin(elements, separator);
if (!IS_UNDEFINED(result)) return result;
if (!IS_UNDEFINED(result)) return result;
return %StringBuilderJoin(elements, length, separator);
} finally {
@ -171,7 +193,7 @@ function Join(array, length, separator, convert) {
function ConvertToString(x) {
// Assumes x is a non-string.
// Assumes x is a non-string.
if (IS_NUMBER(x)) return %_NumberToString(x);
if (IS_BOOLEAN(x)) return x ? 'true' : 'false';
return (IS_NULL_OR_UNDEFINED(x)) ? '' : %ToString(%DefaultString(x));
@ -241,7 +263,7 @@ function SmartSlice(array, start_i, del_count, len, deleted_elements) {
// special array operations to handle sparse arrays in a sensible fashion.
function SmartMove(array, start_i, del_count, len, num_additional_args) {
// Move data to new array.
var new_array = new $Array(len - del_count + num_additional_args);
var new_array = new InternalArray(len - del_count + num_additional_args);
var intervals = %GetArrayKeys(array, len);
var length = intervals.length;
for (var k = 0; k < length; k++) {
@ -375,6 +397,11 @@ function ArrayToLocaleString() {
function ArrayJoin(separator) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.join"]);
}
if (IS_UNDEFINED(separator)) {
separator = ',';
} else if (!IS_STRING(separator)) {
@ -391,6 +418,11 @@ function ArrayJoin(separator) {
// Removes the last element from the array and returns it. See
// ECMA-262, section 15.4.4.6.
function ArrayPop() {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.pop"]);
}
var n = TO_UINT32(this.length);
if (n == 0) {
this.length = n;
@ -407,6 +439,11 @@ function ArrayPop() {
// Appends the arguments to the end of the array and returns the new
// length of the array. See ECMA-262, section 15.4.4.7.
function ArrayPush() {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.push"]);
}
var n = TO_UINT32(this.length);
var m = %_ArgumentsLength();
for (var i = 0; i < m; i++) {
@ -418,8 +455,13 @@ function ArrayPush() {
function ArrayConcat(arg1) { // length == 1
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.concat"]);
}
var arg_count = %_ArgumentsLength();
var arrays = new $Array(1 + arg_count);
var arrays = new InternalArray(1 + arg_count);
arrays[0] = this;
for (var i = 0; i < arg_count; i++) {
arrays[i + 1] = %_Arguments(i);
@ -474,6 +516,11 @@ function SparseReverse(array, len) {
function ArrayReverse() {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.reverse"]);
}
var j = TO_UINT32(this.length) - 1;
if (UseSparseVariant(this, j, IS_ARRAY(this))) {
@ -505,6 +552,11 @@ function ArrayReverse() {
function ArrayShift() {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.shift"]);
}
var len = TO_UINT32(this.length);
if (len === 0) {
@ -526,6 +578,11 @@ function ArrayShift() {
function ArrayUnshift(arg1) { // length == 1
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.unshift"]);
}
var len = TO_UINT32(this.length);
var num_arguments = %_ArgumentsLength();
@ -545,6 +602,11 @@ function ArrayUnshift(arg1) { // length == 1
function ArraySlice(start, end) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.slice"]);
}
var len = TO_UINT32(this.length);
var start_i = TO_INTEGER(start);
var end_i = len;
@ -569,7 +631,9 @@ function ArraySlice(start, end) {
if (end_i < start_i) return result;
if (IS_ARRAY(this)) {
if (IS_ARRAY(this) &&
(end_i > 1000) &&
(%EstimateNumberOfElements(this) < end_i)) {
SmartSlice(this, start_i, end_i - start_i, len, result);
} else {
SimpleSlice(this, start_i, end_i - start_i, len, result);
@ -582,6 +646,11 @@ function ArraySlice(start, end) {
function ArraySplice(start, delete_count) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.splice"]);
}
var num_arguments = %_ArgumentsLength();
var len = TO_UINT32(this.length);
@ -653,6 +722,11 @@ function ArraySplice(start, delete_count) {
function ArraySort(comparefn) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.sort"]);
}
// In-place QuickSort algorithm.
// For short (length <= 22) arrays, insertion sort is used for efficiency.
@ -914,6 +988,11 @@ function ArraySort(comparefn) {
// preserving the semantics, since the calls to the receiver function can add
// or delete elements from the array.
function ArrayFilter(f, receiver) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.filter"]);
}
if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
@ -925,7 +1004,9 @@ function ArrayFilter(f, receiver) {
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
if (f.call(receiver, current, i, this)) result[result_length++] = current;
if (f.call(receiver, current, i, this)) {
result[result_length++] = current;
}
}
}
return result;
@ -933,6 +1014,11 @@ function ArrayFilter(f, receiver) {
function ArrayForEach(f, receiver) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.forEach"]);
}
if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
@ -951,6 +1037,11 @@ function ArrayForEach(f, receiver) {
// Executes the function once for each element present in the
// array until it finds one where callback returns true.
function ArraySome(f, receiver) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.some"]);
}
if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
@ -968,6 +1059,11 @@ function ArraySome(f, receiver) {
function ArrayEvery(f, receiver) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.every"]);
}
if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
@ -984,24 +1080,36 @@ function ArrayEvery(f, receiver) {
}
function ArrayMap(f, receiver) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.map"]);
}
if (!IS_FUNCTION(f)) {
throw MakeTypeError('called_non_callable', [ f ]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = TO_UINT32(this.length);
var result = new $Array(length);
var result = new $Array();
var accumulator = new InternalArray(length);
for (var i = 0; i < length; i++) {
var current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
result[i] = f.call(receiver, current, i, this);
accumulator[i] = f.call(receiver, current, i, this);
}
}
%MoveArrayContents(accumulator, result);
return result;
}
function ArrayIndexOf(element, index) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.indexOf"]);
}
var length = TO_UINT32(this.length);
if (length == 0) return -1;
if (IS_UNDEFINED(index)) {
@ -1059,6 +1167,11 @@ function ArrayIndexOf(element, index) {
function ArrayLastIndexOf(element, index) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.lastIndexOf"]);
}
var length = TO_UINT32(this.length);
if (length == 0) return -1;
if (%_ArgumentsLength() < 2) {
@ -1112,6 +1225,11 @@ function ArrayLastIndexOf(element, index) {
function ArrayReduce(callback, current) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.reduce"]);
}
if (!IS_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
}
@ -1134,13 +1252,18 @@ function ArrayReduce(callback, current) {
for (; i < length; i++) {
var element = this[i];
if (!IS_UNDEFINED(element) || i in this) {
current = callback.call(null, current, element, i, this);
current = callback.call(void 0, current, element, i, this);
}
}
return current;
}
function ArrayReduceRight(callback, current) {
if (IS_NULL_OR_UNDEFINED(this) && !IS_UNDETECTABLE(this)) {
throw MakeTypeError("called_on_null_or_undefined",
["Array.prototype.reduceRight"]);
}
if (!IS_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
}
@ -1160,7 +1283,7 @@ function ArrayReduceRight(callback, current) {
for (; i >= 0; i--) {
var element = this[i];
if (!IS_UNDEFINED(element) || i in this) {
current = callback.call(null, current, element, i, this);
current = callback.call(void 0, current, element, i, this);
}
}
return current;
@ -1225,6 +1348,20 @@ function SetupArray() {
));
%FinishArrayPrototypeSetup($Array.prototype);
// The internal Array prototype doesn't need to be fancy, since it's never
// exposed to user code, so no hidden prototypes or DONT_ENUM attributes
// are necessary.
// The null __proto__ ensures that we never inherit any user created
// getters or setters from, e.g., Object.prototype.
InternalArray.prototype.__proto__ = null;
// Adding only the functions that are actually used, and a toString.
InternalArray.prototype.join = getFunction("join", ArrayJoin);
InternalArray.prototype.pop = getFunction("pop", ArrayPop);
InternalArray.prototype.push = getFunction("push", ArrayPush);
InternalArray.prototype.toString = function() {
return "Internal Array, length " + this.length;
};
}

593
deps/v8/src/assembler.cc

@ -30,7 +30,7 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
#include "v8.h"
@ -55,6 +55,8 @@
#include "x64/regexp-macro-assembler-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/regexp-macro-assembler-arm.h"
#elif V8_TARGET_ARCH_MIPS
#include "mips/regexp-macro-assembler-mips.h"
#else // Unknown architecture.
#error "Unknown architecture."
#endif // Target architecture.
@ -67,9 +69,24 @@ namespace internal {
const double DoubleConstant::min_int = kMinInt;
const double DoubleConstant::one_half = 0.5;
const double DoubleConstant::minus_zero = -0.0;
const double DoubleConstant::uint8_max_value = 255;
const double DoubleConstant::zero = 0.0;
const double DoubleConstant::nan = OS::nan_value();
const double DoubleConstant::negative_infinity = -V8_INFINITY;
const char* RelocInfo::kFillerCommentString = "DEOPTIMIZATION PADDING";
// -----------------------------------------------------------------------------
// Implementation of AssemblerBase
AssemblerBase::AssemblerBase(Isolate* isolate)
: isolate_(isolate),
jit_cookie_(0) {
if (FLAG_mask_constants_with_cookie && isolate != NULL) {
jit_cookie_ = V8::RandomPrivate(isolate);
}
}
// -----------------------------------------------------------------------------
// Implementation of Label
@ -84,58 +101,85 @@ int Label::pos() const {
// -----------------------------------------------------------------------------
// Implementation of RelocInfoWriter and RelocIterator
//
// Relocation information is written backwards in memory, from high addresses
// towards low addresses, byte by byte. Therefore, in the encodings listed
// below, the first byte listed it at the highest address, and successive
// bytes in the record are at progressively lower addresses.
//
// Encoding
//
// The most common modes are given single-byte encodings. Also, it is
// easy to identify the type of reloc info and skip unwanted modes in
// an iteration.
//
// The encoding relies on the fact that there are less than 14
// different relocation modes.
//
// embedded_object: [6 bits pc delta] 00
//
// code_taget: [6 bits pc delta] 01
// The encoding relies on the fact that there are fewer than 14
// different non-compactly encoded relocation modes.
//
// position: [6 bits pc delta] 10,
// [7 bits signed data delta] 0
// The first byte of a relocation record has a tag in its low 2 bits:
// Here are the record schemes, depending on the low tag and optional higher
// tags.
//
// statement_position: [6 bits pc delta] 10,
// [7 bits signed data delta] 1
// Low tag:
// 00: embedded_object: [6-bit pc delta] 00
//
// any nondata mode: 00 [4 bits rmode] 11, // rmode: 0..13 only
// 00 [6 bits pc delta]
// 01: code_target: [6-bit pc delta] 01
//
// pc-jump: 00 1111 11,
// 00 [6 bits pc delta]
// 10: short_data_record: [6-bit pc delta] 10 followed by
// [6-bit data delta] [2-bit data type tag]
//
// pc-jump: 01 1111 11,
// (variable length) 7 - 26 bit pc delta, written in chunks of 7
// bits, the lowest 7 bits written first.
// 11: long_record [2-bit high tag][4 bit middle_tag] 11
// followed by variable data depending on type.
//
// data-jump + pos: 00 1110 11,
// signed intptr_t, lowest byte written first
// 2-bit data type tags, used in short_data_record and data_jump long_record:
// code_target_with_id: 00
// position: 01
// statement_position: 10
// comment: 11 (not used in short_data_record)
//
// data-jump + st.pos: 01 1110 11,
// signed intptr_t, lowest byte written first
// Long record format:
// 4-bit middle_tag:
// 0000 - 1100 : Short record for RelocInfo::Mode middle_tag + 2
// (The middle_tag encodes rmode - RelocInfo::LAST_COMPACT_ENUM,
// and is between 0000 and 1100)
// The format is:
// 00 [4 bit middle_tag] 11 followed by
// 00 [6 bit pc delta]
//
// data-jump + comm.: 10 1110 11,
// signed intptr_t, lowest byte written first
// 1101: not used (would allow one more relocation mode to be added)
// 1110: long_data_record
// The format is: [2-bit data_type_tag] 1110 11
// signed intptr_t, lowest byte written first
// (except data_type code_target_with_id, which
// is followed by a signed int, not intptr_t.)
//
// 1111: long_pc_jump
// The format is:
// pc-jump: 00 1111 11,
// 00 [6 bits pc delta]
// or
// pc-jump (variable length):
// 01 1111 11,
// [7 bits data] 0
// ...
// [7 bits data] 1
// (Bits 6..31 of pc delta, with leading zeroes
// dropped, and last non-zero chunk tagged with 1.)
const int kMaxRelocModes = 14;
const int kTagBits = 2;
const int kTagMask = (1 << kTagBits) - 1;
const int kExtraTagBits = 4;
const int kPositionTypeTagBits = 1;
const int kSmallDataBits = kBitsPerByte - kPositionTypeTagBits;
const int kLocatableTypeTagBits = 2;
const int kSmallDataBits = kBitsPerByte - kLocatableTypeTagBits;
const int kEmbeddedObjectTag = 0;
const int kCodeTargetTag = 1;
const int kPositionTag = 2;
const int kLocatableTag = 2;
const int kDefaultTag = 3;
const int kPCJumpTag = (1 << kExtraTagBits) - 1;
const int kPCJumpExtraTag = (1 << kExtraTagBits) - 1;
const int kSmallPCDeltaBits = kBitsPerByte - kTagBits;
const int kSmallPCDeltaMask = (1 << kSmallPCDeltaBits) - 1;
@ -149,11 +193,12 @@ const int kLastChunkTagMask = 1;
const int kLastChunkTag = 1;
const int kDataJumpTag = kPCJumpTag - 1;
const int kDataJumpExtraTag = kPCJumpExtraTag - 1;
const int kNonstatementPositionTag = 0;
const int kStatementPositionTag = 1;
const int kCommentTag = 2;
const int kCodeWithIdTag = 0;
const int kNonstatementPositionTag = 1;
const int kStatementPositionTag = 2;
const int kCommentTag = 3;
uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
@ -161,7 +206,7 @@ uint32_t RelocInfoWriter::WriteVariableLengthPCJump(uint32_t pc_delta) {
// Otherwise write a variable length PC jump for the bits that do
// not fit in the kSmallPCDeltaBits bits.
if (is_uintn(pc_delta, kSmallPCDeltaBits)) return pc_delta;
WriteExtraTag(kPCJumpTag, kVariableLengthPCJumpTopTag);
WriteExtraTag(kPCJumpExtraTag, kVariableLengthPCJumpTopTag);
uint32_t pc_jump = pc_delta >> kSmallPCDeltaBits;
ASSERT(pc_jump > 0);
// Write kChunkBits size chunks of the pc_jump.
@ -184,7 +229,7 @@ void RelocInfoWriter::WriteTaggedPC(uint32_t pc_delta, int tag) {
void RelocInfoWriter::WriteTaggedData(intptr_t data_delta, int tag) {
*--pos_ = static_cast<byte>(data_delta << kPositionTypeTagBits | tag);
*--pos_ = static_cast<byte>(data_delta << kLocatableTypeTagBits | tag);
}
@ -203,11 +248,20 @@ void RelocInfoWriter::WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag) {
}
void RelocInfoWriter::WriteExtraTaggedIntData(int data_delta, int top_tag) {
WriteExtraTag(kDataJumpExtraTag, top_tag);
for (int i = 0; i < kIntSize; i++) {
*--pos_ = static_cast<byte>(data_delta);
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
data_delta = data_delta >> kBitsPerByte;
}
}
void RelocInfoWriter::WriteExtraTaggedData(intptr_t data_delta, int top_tag) {
WriteExtraTag(kDataJumpTag, top_tag);
WriteExtraTag(kDataJumpExtraTag, top_tag);
for (int i = 0; i < kIntptrSize; i++) {
*--pos_ = static_cast<byte>(data_delta);
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
data_delta = data_delta >> kBitsPerByte;
}
}
@ -217,9 +271,9 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
#ifdef DEBUG
byte* begin_pos = pos_;
#endif
Counters::reloc_info_count.Increment();
ASSERT(rinfo->pc() - last_pc_ >= 0);
ASSERT(RelocInfo::NUMBER_OF_MODES <= kMaxRelocModes);
ASSERT(RelocInfo::NUMBER_OF_MODES - RelocInfo::LAST_COMPACT_ENUM <=
kMaxRelocModes);
// Use unsigned delta-encoding for pc.
uint32_t pc_delta = static_cast<uint32_t>(rinfo->pc() - last_pc_);
RelocInfo::Mode rmode = rinfo->rmode();
@ -230,35 +284,48 @@ void RelocInfoWriter::Write(const RelocInfo* rinfo) {
} else if (rmode == RelocInfo::CODE_TARGET) {
WriteTaggedPC(pc_delta, kCodeTargetTag);
ASSERT(begin_pos - pos_ <= RelocInfo::kMaxCallSize);
} else if (rmode == RelocInfo::CODE_TARGET_WITH_ID) {
// Use signed delta-encoding for id.
ASSERT(static_cast<int>(rinfo->data()) == rinfo->data());
int id_delta = static_cast<int>(rinfo->data()) - last_id_;
// Check if delta is small enough to fit in a tagged byte.
if (is_intn(id_delta, kSmallDataBits)) {
WriteTaggedPC(pc_delta, kLocatableTag);
WriteTaggedData(id_delta, kCodeWithIdTag);
} else {
// Otherwise, use costly encoding.
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
WriteExtraTaggedIntData(id_delta, kCodeWithIdTag);
}
last_id_ = static_cast<int>(rinfo->data());
} else if (RelocInfo::IsPosition(rmode)) {
// Use signed delta-encoding for data.
intptr_t data_delta = rinfo->data() - last_data_;
int pos_type_tag = rmode == RelocInfo::POSITION ? kNonstatementPositionTag
: kStatementPositionTag;
// Check if data is small enough to fit in a tagged byte.
// We cannot use is_intn because data_delta is not an int32_t.
if (data_delta >= -(1 << (kSmallDataBits-1)) &&
data_delta < 1 << (kSmallDataBits-1)) {
WriteTaggedPC(pc_delta, kPositionTag);
WriteTaggedData(data_delta, pos_type_tag);
last_data_ = rinfo->data();
// Use signed delta-encoding for position.
ASSERT(static_cast<int>(rinfo->data()) == rinfo->data());
int pos_delta = static_cast<int>(rinfo->data()) - last_position_;
int pos_type_tag = (rmode == RelocInfo::POSITION) ? kNonstatementPositionTag
: kStatementPositionTag;
// Check if delta is small enough to fit in a tagged byte.
if (is_intn(pos_delta, kSmallDataBits)) {
WriteTaggedPC(pc_delta, kLocatableTag);
WriteTaggedData(pos_delta, pos_type_tag);
} else {
// Otherwise, use costly encoding.
WriteExtraTaggedPC(pc_delta, kPCJumpTag);
WriteExtraTaggedData(data_delta, pos_type_tag);
last_data_ = rinfo->data();
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
WriteExtraTaggedIntData(pos_delta, pos_type_tag);
}
last_position_ = static_cast<int>(rinfo->data());
} else if (RelocInfo::IsComment(rmode)) {
// Comments are normally not generated, so we use the costly encoding.
WriteExtraTaggedPC(pc_delta, kPCJumpTag);
WriteExtraTaggedData(rinfo->data() - last_data_, kCommentTag);
last_data_ = rinfo->data();
WriteExtraTaggedPC(pc_delta, kPCJumpExtraTag);
WriteExtraTaggedData(rinfo->data(), kCommentTag);
ASSERT(begin_pos - pos_ >= RelocInfo::kMinRelocCommentSize);
} else {
ASSERT(rmode > RelocInfo::LAST_COMPACT_ENUM);
int saved_mode = rmode - RelocInfo::LAST_COMPACT_ENUM;
// For all other modes we simply use the mode as the extra tag.
// None of these modes need a data component.
ASSERT(rmode < kPCJumpTag && rmode < kDataJumpTag);
WriteExtraTaggedPC(pc_delta, rmode);
ASSERT(saved_mode < kPCJumpExtraTag && saved_mode < kDataJumpExtraTag);
WriteExtraTaggedPC(pc_delta, saved_mode);
}
last_pc_ = rinfo->pc();
#ifdef DEBUG
@ -292,12 +359,32 @@ inline void RelocIterator::AdvanceReadPC() {
}
void RelocIterator::AdvanceReadId() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
}
last_id_ += x;
rinfo_.data_ = last_id_;
}
void RelocIterator::AdvanceReadPosition() {
int x = 0;
for (int i = 0; i < kIntSize; i++) {
x |= static_cast<int>(*--pos_) << i * kBitsPerByte;
}
last_position_ += x;
rinfo_.data_ = last_position_;
}
void RelocIterator::AdvanceReadData() {
intptr_t x = 0;
for (int i = 0; i < kIntptrSize; i++) {
x |= static_cast<intptr_t>(*--pos_) << i * kBitsPerByte;
}
rinfo_.data_ += x;
rinfo_.data_ = x;
}
@ -317,27 +404,33 @@ void RelocIterator::AdvanceReadVariableLengthPCJump() {
}
inline int RelocIterator::GetPositionTypeTag() {
return *pos_ & ((1 << kPositionTypeTagBits) - 1);
inline int RelocIterator::GetLocatableTypeTag() {
return *pos_ & ((1 << kLocatableTypeTagBits) - 1);
}
inline void RelocIterator::ReadTaggedData() {
inline void RelocIterator::ReadTaggedId() {
int8_t signed_b = *pos_;
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
rinfo_.data_ += signed_b >> kPositionTypeTagBits;
last_id_ += signed_b >> kLocatableTypeTagBits;
rinfo_.data_ = last_id_;
}
inline RelocInfo::Mode RelocIterator::DebugInfoModeFromTag(int tag) {
if (tag == kStatementPositionTag) {
return RelocInfo::STATEMENT_POSITION;
} else if (tag == kNonstatementPositionTag) {
return RelocInfo::POSITION;
} else {
ASSERT(tag == kCommentTag);
return RelocInfo::COMMENT;
}
inline void RelocIterator::ReadTaggedPosition() {
int8_t signed_b = *pos_;
// Signed right shift is arithmetic shift. Tested in test-utils.cc.
last_position_ += signed_b >> kLocatableTypeTagBits;
rinfo_.data_ = last_position_;
}
static inline RelocInfo::Mode GetPositionModeFromTag(int tag) {
ASSERT(tag == kNonstatementPositionTag ||
tag == kStatementPositionTag);
return (tag == kNonstatementPositionTag) ?
RelocInfo::POSITION :
RelocInfo::STATEMENT_POSITION;
}
@ -356,37 +449,64 @@ void RelocIterator::next() {
} else if (tag == kCodeTargetTag) {
ReadTaggedPC();
if (SetMode(RelocInfo::CODE_TARGET)) return;
} else if (tag == kPositionTag) {
} else if (tag == kLocatableTag) {
ReadTaggedPC();
Advance();
// Check if we want source positions.
if (mode_mask_ & RelocInfo::kPositionMask) {
ReadTaggedData();
if (SetMode(DebugInfoModeFromTag(GetPositionTypeTag()))) return;
int locatable_tag = GetLocatableTypeTag();
if (locatable_tag == kCodeWithIdTag) {
if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
ReadTaggedId();
return;
}
} else {
// Compact encoding is never used for comments,
// so it must be a position.
ASSERT(locatable_tag == kNonstatementPositionTag ||
locatable_tag == kStatementPositionTag);
if (mode_mask_ & RelocInfo::kPositionMask) {
ReadTaggedPosition();
if (SetMode(GetPositionModeFromTag(locatable_tag))) return;
}
}
} else {
ASSERT(tag == kDefaultTag);
int extra_tag = GetExtraTag();
if (extra_tag == kPCJumpTag) {
if (extra_tag == kPCJumpExtraTag) {
int top_tag = GetTopTag();
if (top_tag == kVariableLengthPCJumpTopTag) {
AdvanceReadVariableLengthPCJump();
} else {
AdvanceReadPC();
}
} else if (extra_tag == kDataJumpTag) {
// Check if we want debug modes (the only ones with data).
if (mode_mask_ & RelocInfo::kDebugMask) {
int top_tag = GetTopTag();
AdvanceReadData();
if (SetMode(DebugInfoModeFromTag(top_tag))) return;
} else if (extra_tag == kDataJumpExtraTag) {
int locatable_tag = GetTopTag();
if (locatable_tag == kCodeWithIdTag) {
if (SetMode(RelocInfo::CODE_TARGET_WITH_ID)) {
AdvanceReadId();
return;
}
Advance(kIntSize);
} else if (locatable_tag != kCommentTag) {
ASSERT(locatable_tag == kNonstatementPositionTag ||
locatable_tag == kStatementPositionTag);
if (mode_mask_ & RelocInfo::kPositionMask) {
AdvanceReadPosition();
if (SetMode(GetPositionModeFromTag(locatable_tag))) return;
} else {
Advance(kIntSize);
}
} else {
// Otherwise, just skip over the data.
ASSERT(locatable_tag == kCommentTag);
if (SetMode(RelocInfo::COMMENT)) {
AdvanceReadData();
return;
}
Advance(kIntptrSize);
}
} else {
AdvanceReadPC();
if (SetMode(static_cast<RelocInfo::Mode>(extra_tag))) return;
int rmode = extra_tag + RelocInfo::LAST_COMPACT_ENUM;
if (SetMode(static_cast<RelocInfo::Mode>(rmode))) return;
}
}
}
@ -402,6 +522,8 @@ RelocIterator::RelocIterator(Code* code, int mode_mask) {
end_ = code->relocation_start();
done_ = false;
mode_mask_ = mode_mask;
last_id_ = 0;
last_position_ = 0;
if (mode_mask_ == 0) pos_ = end_;
next();
}
@ -415,6 +537,8 @@ RelocIterator::RelocIterator(const CodeDesc& desc, int mode_mask) {
end_ = pos_ - desc.reloc_size;
done_ = false;
mode_mask_ = mode_mask;
last_id_ = 0;
last_position_ = 0;
if (mode_mask_ == 0) pos_ = end_;
next();
}
@ -442,6 +566,8 @@ const char* RelocInfo::RelocModeName(RelocInfo::Mode rmode) {
return "debug break";
case RelocInfo::CODE_TARGET:
return "code target";
case RelocInfo::CODE_TARGET_WITH_ID:
return "code target with id";
case RelocInfo::GLOBAL_PROPERTY_CELL:
return "global property cell";
case RelocInfo::RUNTIME_ENTRY:
@ -488,9 +614,13 @@ void RelocInfo::Print(FILE* out) {
Code* code = Code::GetCodeFromTargetAddress(target_address());
PrintF(out, " (%s) (%p)", Code::Kind2String(code->kind()),
target_address());
if (rmode_ == CODE_TARGET_WITH_ID) {
PrintF(" (id=%d)", static_cast<int>(data_));
}
} else if (IsPosition(rmode_)) {
PrintF(out, " (%" V8_PTR_PREFIX "d)", data());
} else if (rmode_ == RelocInfo::RUNTIME_ENTRY) {
} else if (rmode_ == RelocInfo::RUNTIME_ENTRY &&
Isolate::Current()->deoptimizer_data() != NULL) {
// Depotimization bailouts are stored as runtime entries.
int id = Deoptimizer::GetDeoptimizationId(
target_address(), Deoptimizer::EAGER);
@ -520,13 +650,14 @@ void RelocInfo::Verify() {
#endif
case CONSTRUCT_CALL:
case CODE_TARGET_CONTEXT:
case CODE_TARGET_WITH_ID:
case CODE_TARGET: {
// convert inline target address to code object
Address addr = target_address();
ASSERT(addr != NULL);
// Check that we can find the right code object.
Code* code = Code::GetCodeFromTargetAddress(addr);
Object* found = Heap::FindCodeObject(addr);
Object* found = HEAP->FindCodeObject(addr);
ASSERT(found->IsCode());
ASSERT(code->address() == HeapObject::cast(found)->address());
break;
@ -552,153 +683,184 @@ void RelocInfo::Verify() {
// -----------------------------------------------------------------------------
// Implementation of ExternalReference
ExternalReference::ExternalReference(Builtins::CFunctionId id)
: address_(Redirect(Builtins::c_function_address(id))) {}
ExternalReference::ExternalReference(Builtins::CFunctionId id, Isolate* isolate)
: address_(Redirect(isolate, Builtins::c_function_address(id))) {}
ExternalReference::ExternalReference(
ApiFunction* fun, Type type = ExternalReference::BUILTIN_CALL)
: address_(Redirect(fun->address(), type)) {}
ApiFunction* fun,
Type type = ExternalReference::BUILTIN_CALL,
Isolate* isolate = NULL)
: address_(Redirect(isolate, fun->address(), type)) {}
ExternalReference::ExternalReference(Builtins::Name name)
: address_(Builtins::builtin_address(name)) {}
ExternalReference::ExternalReference(Builtins::Name name, Isolate* isolate)
: address_(isolate->builtins()->builtin_address(name)) {}
ExternalReference::ExternalReference(Runtime::FunctionId id)
: address_(Redirect(Runtime::FunctionForId(id)->entry)) {}
ExternalReference::ExternalReference(Runtime::FunctionId id,
Isolate* isolate)
: address_(Redirect(isolate, Runtime::FunctionForId(id)->entry)) {}
ExternalReference::ExternalReference(Runtime::Function* f)
: address_(Redirect(f->entry)) {}
ExternalReference::ExternalReference(const Runtime::Function* f,
Isolate* isolate)
: address_(Redirect(isolate, f->entry)) {}
ExternalReference::ExternalReference(const IC_Utility& ic_utility)
: address_(Redirect(ic_utility.address())) {}
ExternalReference ExternalReference::isolate_address() {
return ExternalReference(Isolate::Current());
}
ExternalReference::ExternalReference(const IC_Utility& ic_utility,
Isolate* isolate)
: address_(Redirect(isolate, ic_utility.address())) {}
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference::ExternalReference(const Debug_Address& debug_address)
: address_(debug_address.address()) {}
ExternalReference::ExternalReference(const Debug_Address& debug_address,
Isolate* isolate)
: address_(debug_address.address(isolate)) {}
#endif
ExternalReference::ExternalReference(StatsCounter* counter)
: address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
ExternalReference::ExternalReference(Top::AddressId id)
: address_(Top::get_address_from_id(id)) {}
ExternalReference::ExternalReference(Isolate::AddressId id, Isolate* isolate)
: address_(isolate->get_address_from_id(id)) {}
ExternalReference::ExternalReference(const SCTableReference& table_ref)
: address_(table_ref.address()) {}
ExternalReference ExternalReference::perform_gc_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(Runtime::PerformGC)));
ExternalReference ExternalReference::perform_gc_function(Isolate* isolate) {
return ExternalReference(Redirect(isolate,
FUNCTION_ADDR(Runtime::PerformGC)));
}
ExternalReference ExternalReference::fill_heap_number_with_random_function() {
return
ExternalReference(Redirect(FUNCTION_ADDR(V8::FillHeapNumberWithRandom)));
ExternalReference ExternalReference::fill_heap_number_with_random_function(
Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(V8::FillHeapNumberWithRandom)));
}
ExternalReference ExternalReference::delete_handle_scope_extensions() {
return ExternalReference(Redirect(FUNCTION_ADDR(
HandleScope::DeleteExtensions)));
ExternalReference ExternalReference::delete_handle_scope_extensions(
Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(HandleScope::DeleteExtensions)));
}
ExternalReference ExternalReference::random_uint32_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(V8::Random)));
ExternalReference ExternalReference::random_uint32_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(V8::Random)));
}
ExternalReference ExternalReference::transcendental_cache_array_address() {
return ExternalReference(TranscendentalCache::cache_array_address());
ExternalReference ExternalReference::transcendental_cache_array_address(
Isolate* isolate) {
return ExternalReference(
isolate->transcendental_cache()->cache_array_address());
}
ExternalReference ExternalReference::new_deoptimizer_function() {
ExternalReference ExternalReference::new_deoptimizer_function(
Isolate* isolate) {
return ExternalReference(
Redirect(FUNCTION_ADDR(Deoptimizer::New)));
Redirect(isolate, FUNCTION_ADDR(Deoptimizer::New)));
}
ExternalReference ExternalReference::compute_output_frames_function() {
ExternalReference ExternalReference::compute_output_frames_function(
Isolate* isolate) {
return ExternalReference(
Redirect(FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
Redirect(isolate, FUNCTION_ADDR(Deoptimizer::ComputeOutputFrames)));
}
ExternalReference ExternalReference::global_contexts_list() {
return ExternalReference(Heap::global_contexts_list_address());
ExternalReference ExternalReference::global_contexts_list(Isolate* isolate) {
return ExternalReference(isolate->heap()->global_contexts_list_address());
}
ExternalReference ExternalReference::keyed_lookup_cache_keys() {
return ExternalReference(KeyedLookupCache::keys_address());
ExternalReference ExternalReference::keyed_lookup_cache_keys(Isolate* isolate) {
return ExternalReference(isolate->keyed_lookup_cache()->keys_address());
}
ExternalReference ExternalReference::keyed_lookup_cache_field_offsets() {
return ExternalReference(KeyedLookupCache::field_offsets_address());
ExternalReference ExternalReference::keyed_lookup_cache_field_offsets(
Isolate* isolate) {
return ExternalReference(
isolate->keyed_lookup_cache()->field_offsets_address());
}
ExternalReference ExternalReference::the_hole_value_location() {
return ExternalReference(Factory::the_hole_value().location());
ExternalReference ExternalReference::the_hole_value_location(Isolate* isolate) {
return ExternalReference(isolate->factory()->the_hole_value().location());
}
ExternalReference ExternalReference::arguments_marker_location() {
return ExternalReference(Factory::arguments_marker().location());
ExternalReference ExternalReference::arguments_marker_location(
Isolate* isolate) {
return ExternalReference(isolate->factory()->arguments_marker().location());
}
ExternalReference ExternalReference::roots_address() {
return ExternalReference(Heap::roots_address());
ExternalReference ExternalReference::roots_address(Isolate* isolate) {
return ExternalReference(isolate->heap()->roots_address());
}
ExternalReference ExternalReference::address_of_stack_limit() {
return ExternalReference(StackGuard::address_of_jslimit());
ExternalReference ExternalReference::address_of_stack_limit(Isolate* isolate) {
return ExternalReference(isolate->stack_guard()->address_of_jslimit());
}
ExternalReference ExternalReference::address_of_real_stack_limit() {
return ExternalReference(StackGuard::address_of_real_jslimit());
ExternalReference ExternalReference::address_of_real_stack_limit(
Isolate* isolate) {
return ExternalReference(isolate->stack_guard()->address_of_real_jslimit());
}
ExternalReference ExternalReference::address_of_regexp_stack_limit() {
return ExternalReference(RegExpStack::limit_address());
ExternalReference ExternalReference::address_of_regexp_stack_limit(
Isolate* isolate) {
return ExternalReference(isolate->regexp_stack()->limit_address());
}
ExternalReference ExternalReference::new_space_start() {
return ExternalReference(Heap::NewSpaceStart());
ExternalReference ExternalReference::new_space_start(Isolate* isolate) {
return ExternalReference(isolate->heap()->NewSpaceStart());
}
ExternalReference ExternalReference::new_space_mask() {
return ExternalReference(reinterpret_cast<Address>(Heap::NewSpaceMask()));
ExternalReference ExternalReference::new_space_mask(Isolate* isolate) {
Address mask = reinterpret_cast<Address>(isolate->heap()->NewSpaceMask());
return ExternalReference(mask);
}
ExternalReference ExternalReference::new_space_allocation_top_address() {
return ExternalReference(Heap::NewSpaceAllocationTopAddress());
ExternalReference ExternalReference::new_space_allocation_top_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->NewSpaceAllocationTopAddress());
}
ExternalReference ExternalReference::heap_always_allocate_scope_depth() {
return ExternalReference(Heap::always_allocate_scope_depth_address());
ExternalReference ExternalReference::heap_always_allocate_scope_depth(
Isolate* isolate) {
Heap* heap = isolate->heap();
return ExternalReference(heap->always_allocate_scope_depth_address());
}
ExternalReference ExternalReference::new_space_allocation_limit_address() {
return ExternalReference(Heap::NewSpaceAllocationLimitAddress());
ExternalReference ExternalReference::new_space_allocation_limit_address(
Isolate* isolate) {
return ExternalReference(isolate->heap()->NewSpaceAllocationLimitAddress());
}
@ -717,8 +879,9 @@ ExternalReference ExternalReference::handle_scope_limit_address() {
}
ExternalReference ExternalReference::scheduled_exception_address() {
return ExternalReference(Top::scheduled_exception_address());
ExternalReference ExternalReference::scheduled_exception_address(
Isolate* isolate) {
return ExternalReference(isolate->scheduled_exception_address());
}
@ -740,15 +903,34 @@ ExternalReference ExternalReference::address_of_minus_zero() {
}
ExternalReference ExternalReference::address_of_zero() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::zero)));
}
ExternalReference ExternalReference::address_of_uint8_max_value() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::uint8_max_value)));
}
ExternalReference ExternalReference::address_of_negative_infinity() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::negative_infinity)));
}
ExternalReference ExternalReference::address_of_nan() {
return ExternalReference(reinterpret_cast<void*>(
const_cast<double*>(&DoubleConstant::nan)));
}
#ifndef V8_INTERPRETED_REGEXP
ExternalReference ExternalReference::re_check_stack_guard_state() {
ExternalReference ExternalReference::re_check_stack_guard_state(
Isolate* isolate) {
Address function;
#ifdef V8_TARGET_ARCH_X64
function = FUNCTION_ADDR(RegExpMacroAssemblerX64::CheckStackGuardState);
@ -756,19 +938,23 @@ ExternalReference ExternalReference::re_check_stack_guard_state() {
function = FUNCTION_ADDR(RegExpMacroAssemblerIA32::CheckStackGuardState);
#elif V8_TARGET_ARCH_ARM
function = FUNCTION_ADDR(RegExpMacroAssemblerARM::CheckStackGuardState);
#elif V8_TARGET_ARCH_MIPS
function = FUNCTION_ADDR(RegExpMacroAssemblerMIPS::CheckStackGuardState);
#else
UNREACHABLE();
#endif
return ExternalReference(Redirect(function));
return ExternalReference(Redirect(isolate, function));
}
ExternalReference ExternalReference::re_grow_stack() {
ExternalReference ExternalReference::re_grow_stack(Isolate* isolate) {
return ExternalReference(
Redirect(FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
Redirect(isolate, FUNCTION_ADDR(NativeRegExpMacroAssembler::GrowStack)));
}
ExternalReference ExternalReference::re_case_insensitive_compare_uc16() {
ExternalReference ExternalReference::re_case_insensitive_compare_uc16(
Isolate* isolate) {
return ExternalReference(Redirect(
isolate,
FUNCTION_ADDR(NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16)));
}
@ -777,16 +963,21 @@ ExternalReference ExternalReference::re_word_character_map() {
NativeRegExpMacroAssembler::word_character_map_address());
}
ExternalReference ExternalReference::address_of_static_offsets_vector() {
return ExternalReference(OffsetsVector::static_offsets_vector_address());
ExternalReference ExternalReference::address_of_static_offsets_vector(
Isolate* isolate) {
return ExternalReference(
OffsetsVector::static_offsets_vector_address(isolate));
}
ExternalReference ExternalReference::address_of_regexp_stack_memory_address() {
return ExternalReference(RegExpStack::memory_address());
ExternalReference ExternalReference::address_of_regexp_stack_memory_address(
Isolate* isolate) {
return ExternalReference(
isolate->regexp_stack()->memory_address());
}
ExternalReference ExternalReference::address_of_regexp_stack_memory_size() {
return ExternalReference(RegExpStack::memory_size_address());
ExternalReference ExternalReference::address_of_regexp_stack_memory_size(
Isolate* isolate) {
return ExternalReference(isolate->regexp_stack()->memory_size_address());
}
#endif // V8_INTERPRETED_REGEXP
@ -817,6 +1008,45 @@ static double mod_two_doubles(double x, double y) {
}
static double math_sin_double(double x) {
return sin(x);
}
static double math_cos_double(double x) {
return cos(x);
}
static double math_log_double(double x) {
return log(x);
}
ExternalReference ExternalReference::math_sin_double_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
FUNCTION_ADDR(math_sin_double),
BUILTIN_FP_CALL));
}
ExternalReference ExternalReference::math_cos_double_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
FUNCTION_ADDR(math_cos_double),
BUILTIN_FP_CALL));
}
ExternalReference ExternalReference::math_log_double_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
FUNCTION_ADDR(math_log_double),
BUILTIN_FP_CALL));
}
// Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry
@ -852,15 +1082,19 @@ double power_double_double(double x, double y) {
}
ExternalReference ExternalReference::power_double_double_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(power_double_double),
FP_RETURN_CALL));
ExternalReference ExternalReference::power_double_double_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
FUNCTION_ADDR(power_double_double),
BUILTIN_FP_FP_CALL));
}
ExternalReference ExternalReference::power_double_int_function() {
return ExternalReference(Redirect(FUNCTION_ADDR(power_double_int),
FP_RETURN_CALL));
ExternalReference ExternalReference::power_double_int_function(
Isolate* isolate) {
return ExternalReference(Redirect(isolate,
FUNCTION_ADDR(power_double_int),
BUILTIN_FP_INT_CALL));
}
@ -871,7 +1105,7 @@ static int native_compare_doubles(double y, double x) {
ExternalReference ExternalReference::double_fp_operation(
Token::Value operation) {
Token::Value operation, Isolate* isolate) {
typedef double BinaryFPOperation(double x, double y);
BinaryFPOperation* function = NULL;
switch (operation) {
@ -893,29 +1127,28 @@ ExternalReference ExternalReference::double_fp_operation(
default:
UNREACHABLE();
}
// Passing true as 2nd parameter indicates that they return an fp value.
return ExternalReference(Redirect(FUNCTION_ADDR(function), FP_RETURN_CALL));
return ExternalReference(Redirect(isolate,
FUNCTION_ADDR(function),
BUILTIN_FP_FP_CALL));
}
ExternalReference ExternalReference::compare_doubles() {
return ExternalReference(Redirect(FUNCTION_ADDR(native_compare_doubles),
BUILTIN_CALL));
ExternalReference ExternalReference::compare_doubles(Isolate* isolate) {
return ExternalReference(Redirect(isolate,
FUNCTION_ADDR(native_compare_doubles),
BUILTIN_COMPARE_CALL));
}
ExternalReference::ExternalReferenceRedirector*
ExternalReference::redirector_ = NULL;
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference ExternalReference::debug_break() {
return ExternalReference(Redirect(FUNCTION_ADDR(Debug::Break)));
ExternalReference ExternalReference::debug_break(Isolate* isolate) {
return ExternalReference(Redirect(isolate, FUNCTION_ADDR(Debug_Break)));
}
ExternalReference ExternalReference::debug_step_in_fp_address() {
return ExternalReference(Debug::step_in_fp_addr());
ExternalReference ExternalReference::debug_step_in_fp_address(
Isolate* isolate) {
return ExternalReference(isolate->debug()->step_in_fp_addr());
}
#endif

334
deps/v8/src/assembler.h

@ -30,19 +30,34 @@
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
#ifndef V8_ASSEMBLER_H_
#define V8_ASSEMBLER_H_
#include "allocation.h"
#include "gdb-jit.h"
#include "runtime.h"
#include "top.h"
#include "token.h"
namespace v8 {
namespace internal {
const unsigned kNoASTId = -1;
// -----------------------------------------------------------------------------
// Platform independent assembler base class.
class AssemblerBase: public Malloced {
public:
explicit AssemblerBase(Isolate* isolate);
Isolate* isolate() const { return isolate_; }
int jit_cookie() { return jit_cookie_; }
private:
Isolate* isolate_;
int jit_cookie_;
};
// -----------------------------------------------------------------------------
// Common double constants.
@ -52,7 +67,10 @@ class DoubleConstant: public AllStatic {
static const double min_int;
static const double one_half;
static const double minus_zero;
static const double zero;
static const double uint8_max_value;
static const double negative_infinity;
static const double nan;
};
@ -64,18 +82,32 @@ class DoubleConstant: public AllStatic {
class Label BASE_EMBEDDED {
public:
INLINE(Label()) { Unuse(); }
INLINE(~Label()) { ASSERT(!is_linked()); }
enum Distance {
kNear, kFar
};
INLINE(Label()) {
Unuse();
UnuseNear();
}
INLINE(void Unuse()) { pos_ = 0; }
INLINE(~Label()) {
ASSERT(!is_linked());
ASSERT(!is_near_linked());
}
INLINE(bool is_bound() const) { return pos_ < 0; }
INLINE(bool is_unused() const) { return pos_ == 0; }
INLINE(bool is_linked() const) { return pos_ > 0; }
INLINE(void Unuse()) { pos_ = 0; }
INLINE(void UnuseNear()) { near_link_pos_ = 0; }
INLINE(bool is_bound() const) { return pos_ < 0; }
INLINE(bool is_unused() const) { return pos_ == 0 && near_link_pos_ == 0; }
INLINE(bool is_linked() const) { return pos_ > 0; }
INLINE(bool is_near_linked() const) { return near_link_pos_ > 0; }
// Returns the position of bound or linked labels. Cannot be used
// for unused labels.
int pos() const;
int near_link_pos() const { return near_link_pos_ - 1; }
private:
// pos_ encodes both the binding state (via its sign)
@ -86,74 +118,30 @@ class Label BASE_EMBEDDED {
// pos_ > 0 linked label, pos() returns the last reference position
int pos_;
// Behaves like |pos_| in the "> 0" case, but for near jumps to this label.
int near_link_pos_;
void bind_to(int pos) {
pos_ = -pos - 1;
ASSERT(is_bound());
}
void link_to(int pos) {
pos_ = pos + 1;
ASSERT(is_linked());
void link_to(int pos, Distance distance = kFar) {
if (distance == kNear) {
near_link_pos_ = pos + 1;
ASSERT(is_near_linked());
} else {
pos_ = pos + 1;
ASSERT(is_linked());
}
}
friend class Assembler;
friend class RegexpAssembler;
friend class Displacement;
friend class ShadowTarget;
friend class RegExpMacroAssemblerIrregexp;
};
// -----------------------------------------------------------------------------
// NearLabels are labels used for short jumps (in Intel jargon).
// NearLabels should be used if it can be guaranteed that the jump range is
// within -128 to +127. We already use short jumps when jumping backwards,
// so using a NearLabel will only have performance impact if used for forward
// jumps.
class NearLabel BASE_EMBEDDED {
public:
NearLabel() { Unuse(); }
~NearLabel() { ASSERT(!is_linked()); }
void Unuse() {
pos_ = -1;
unresolved_branches_ = 0;
#ifdef DEBUG
for (int i = 0; i < kMaxUnresolvedBranches; i++) {
unresolved_positions_[i] = -1;
}
#endif
}
int pos() {
ASSERT(is_bound());
return pos_;
}
bool is_bound() { return pos_ >= 0; }
bool is_linked() { return !is_bound() && unresolved_branches_ > 0; }
bool is_unused() { return !is_bound() && unresolved_branches_ == 0; }
void bind_to(int position) {
ASSERT(!is_bound());
pos_ = position;
}
void link_to(int position) {
ASSERT(!is_bound());
ASSERT(unresolved_branches_ < kMaxUnresolvedBranches);
unresolved_positions_[unresolved_branches_++] = position;
}
private:
static const int kMaxUnresolvedBranches = 8;
int pos_;
int unresolved_branches_;
int unresolved_positions_[kMaxUnresolvedBranches];
friend class Assembler;
};
// -----------------------------------------------------------------------------
// Relocation information
@ -197,10 +185,11 @@ class RelocInfo BASE_EMBEDDED {
enum Mode {
// Please note the order is important (see IsCodeTarget, IsGCRelocMode).
CODE_TARGET, // Code target which is not any of the above.
CODE_TARGET_WITH_ID,
CONSTRUCT_CALL, // code target that is a call to a JavaScript constructor.
CODE_TARGET_CONTEXT, // Code target used for contextual loads and stores.
DEBUG_BREAK, // Code target for the debugger statement.
CODE_TARGET, // Code target which is not any of the above.
EMBEDDED_OBJECT,
GLOBAL_PROPERTY_CELL,
@ -216,10 +205,12 @@ class RelocInfo BASE_EMBEDDED {
// add more as needed
// Pseudo-types
NUMBER_OF_MODES, // must be no greater than 14 - see RelocInfoWriter
NUMBER_OF_MODES, // There are at most 14 modes with noncompact encoding.
NONE, // never recorded
LAST_CODE_ENUM = CODE_TARGET,
LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL
LAST_CODE_ENUM = DEBUG_BREAK,
LAST_GCED_ENUM = GLOBAL_PROPERTY_CELL,
// Modes <= LAST_COMPACT_ENUM are guaranteed to have compact encoding.
LAST_COMPACT_ENUM = CODE_TARGET_WITH_ID
};
@ -320,7 +311,7 @@ class RelocInfo BASE_EMBEDDED {
INLINE(void set_call_object(Object* target));
INLINE(Object** call_object_address());
template<typename StaticVisitor> inline void Visit();
template<typename StaticVisitor> inline void Visit(Heap* heap);
inline void Visit(ObjectVisitor* v);
// Patch the code with some other code.
@ -349,7 +340,8 @@ class RelocInfo BASE_EMBEDDED {
static const int kCodeTargetMask = (1 << (LAST_CODE_ENUM + 1)) - 1;
static const int kPositionMask = 1 << POSITION | 1 << STATEMENT_POSITION;
static const int kDebugMask = kPositionMask | 1 << COMMENT;
static const int kDataMask =
(1 << CODE_TARGET_WITH_ID) | kPositionMask | (1 << COMMENT);
static const int kApplyMask; // Modes affected by apply. Depends on arch.
private:
@ -360,6 +352,19 @@ class RelocInfo BASE_EMBEDDED {
byte* pc_;
Mode rmode_;
intptr_t data_;
#ifdef V8_TARGET_ARCH_MIPS
// Code and Embedded Object pointers in mips are stored split
// across two consecutive 32-bit instructions. Heap management
// routines expect to access these pointers indirectly. The following
// location provides a place for these pointers to exist natually
// when accessed via the Iterator.
Object *reconstructed_obj_ptr_;
// External-reference pointers are also split across instruction-pairs
// in mips, but are accessed via indirect pointers. This location
// provides a place for that pointer to exist naturally. Its address
// is returned by RelocInfo::target_reference_address().
Address reconstructed_adr_ptr_;
#endif // V8_TARGET_ARCH_MIPS
friend class RelocIterator;
};
@ -368,9 +373,14 @@ class RelocInfo BASE_EMBEDDED {
// lower addresses.
class RelocInfoWriter BASE_EMBEDDED {
public:
RelocInfoWriter() : pos_(NULL), last_pc_(NULL), last_data_(0) {}
RelocInfoWriter(byte* pos, byte* pc) : pos_(pos), last_pc_(pc),
last_data_(0) {}
RelocInfoWriter() : pos_(NULL),
last_pc_(NULL),
last_id_(0),
last_position_(0) {}
RelocInfoWriter(byte* pos, byte* pc) : pos_(pos),
last_pc_(pc),
last_id_(0),
last_position_(0) {}
byte* pos() const { return pos_; }
byte* last_pc() const { return last_pc_; }
@ -395,13 +405,15 @@ class RelocInfoWriter BASE_EMBEDDED {
inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta);
inline void WriteTaggedPC(uint32_t pc_delta, int tag);
inline void WriteExtraTaggedPC(uint32_t pc_delta, int extra_tag);
inline void WriteExtraTaggedIntData(int data_delta, int top_tag);
inline void WriteExtraTaggedData(intptr_t data_delta, int top_tag);
inline void WriteTaggedData(intptr_t data_delta, int tag);
inline void WriteExtraTag(int extra_tag, int top_tag);
byte* pos_;
byte* last_pc_;
intptr_t last_data_;
int last_id_;
int last_position_;
DISALLOW_COPY_AND_ASSIGN(RelocInfoWriter);
};
@ -443,12 +455,13 @@ class RelocIterator: public Malloced {
int GetTopTag();
void ReadTaggedPC();
void AdvanceReadPC();
void AdvanceReadId();
void AdvanceReadPosition();
void AdvanceReadData();
void AdvanceReadVariableLengthPCJump();
int GetPositionTypeTag();
void ReadTaggedData();
static RelocInfo::Mode DebugInfoModeFromTag(int tag);
int GetLocatableTypeTag();
void ReadTaggedId();
void ReadTaggedPosition();
// If the given mode is wanted, set it in rinfo_ and return true.
// Else return false. Used for efficiently skipping unwanted modes.
@ -461,6 +474,8 @@ class RelocIterator: public Malloced {
RelocInfo rinfo_;
bool done_;
int mode_mask_;
int last_id_;
int last_position_;
DISALLOW_COPY_AND_ASSIGN(RelocIterator);
};
@ -489,9 +504,21 @@ class ExternalReference BASE_EMBEDDED {
// MaybeObject* f(v8::internal::Arguments).
BUILTIN_CALL, // default
// Builtin that takes float arguments and returns an int.
// int f(double, double).
BUILTIN_COMPARE_CALL,
// Builtin call that returns floating point.
// double f(double, double).
FP_RETURN_CALL,
BUILTIN_FP_FP_CALL,
// Builtin call that returns floating point.
// double f(double).
BUILTIN_FP_CALL,
// Builtin call that returns floating point.
// double f(double, int).
BUILTIN_FP_INT_CALL,
// Direct call to API function callback.
// Handle<Value> f(v8::Arguments&)
@ -504,117 +531,131 @@ class ExternalReference BASE_EMBEDDED {
typedef void* ExternalReferenceRedirector(void* original, Type type);
explicit ExternalReference(Builtins::CFunctionId id);
ExternalReference(Builtins::CFunctionId id, Isolate* isolate);
explicit ExternalReference(ApiFunction* ptr, Type type);
ExternalReference(ApiFunction* ptr, Type type, Isolate* isolate);
explicit ExternalReference(Builtins::Name name);
ExternalReference(Builtins::Name name, Isolate* isolate);
explicit ExternalReference(Runtime::FunctionId id);
ExternalReference(Runtime::FunctionId id, Isolate* isolate);
explicit ExternalReference(Runtime::Function* f);
ExternalReference(const Runtime::Function* f, Isolate* isolate);
explicit ExternalReference(const IC_Utility& ic_utility);
ExternalReference(const IC_Utility& ic_utility, Isolate* isolate);
#ifdef ENABLE_DEBUGGER_SUPPORT
explicit ExternalReference(const Debug_Address& debug_address);
ExternalReference(const Debug_Address& debug_address, Isolate* isolate);
#endif
explicit ExternalReference(StatsCounter* counter);
explicit ExternalReference(Top::AddressId id);
ExternalReference(Isolate::AddressId id, Isolate* isolate);
explicit ExternalReference(const SCTableReference& table_ref);
// Isolate::Current() as an external reference.
static ExternalReference isolate_address();
// One-of-a-kind references. These references are not part of a general
// pattern. This means that they have to be added to the
// ExternalReferenceTable in serialize.cc manually.
static ExternalReference perform_gc_function();
static ExternalReference fill_heap_number_with_random_function();
static ExternalReference random_uint32_function();
static ExternalReference transcendental_cache_array_address();
static ExternalReference delete_handle_scope_extensions();
static ExternalReference perform_gc_function(Isolate* isolate);
static ExternalReference fill_heap_number_with_random_function(
Isolate* isolate);
static ExternalReference random_uint32_function(Isolate* isolate);
static ExternalReference transcendental_cache_array_address(Isolate* isolate);
static ExternalReference delete_handle_scope_extensions(Isolate* isolate);
// Deoptimization support.
static ExternalReference new_deoptimizer_function();
static ExternalReference compute_output_frames_function();
static ExternalReference global_contexts_list();
static ExternalReference new_deoptimizer_function(Isolate* isolate);
static ExternalReference compute_output_frames_function(Isolate* isolate);
static ExternalReference global_contexts_list(Isolate* isolate);
// Static data in the keyed lookup cache.
static ExternalReference keyed_lookup_cache_keys();
static ExternalReference keyed_lookup_cache_field_offsets();
static ExternalReference keyed_lookup_cache_keys(Isolate* isolate);
static ExternalReference keyed_lookup_cache_field_offsets(Isolate* isolate);
// Static variable Factory::the_hole_value.location()
static ExternalReference the_hole_value_location();
static ExternalReference the_hole_value_location(Isolate* isolate);
// Static variable Factory::arguments_marker.location()
static ExternalReference arguments_marker_location();
static ExternalReference arguments_marker_location(Isolate* isolate);
// Static variable Heap::roots_address()
static ExternalReference roots_address();
static ExternalReference roots_address(Isolate* isolate);
// Static variable StackGuard::address_of_jslimit()
static ExternalReference address_of_stack_limit();
static ExternalReference address_of_stack_limit(Isolate* isolate);
// Static variable StackGuard::address_of_real_jslimit()
static ExternalReference address_of_real_stack_limit();
static ExternalReference address_of_real_stack_limit(Isolate* isolate);
// Static variable RegExpStack::limit_address()
static ExternalReference address_of_regexp_stack_limit();
static ExternalReference address_of_regexp_stack_limit(Isolate* isolate);
// Static variables for RegExp.
static ExternalReference address_of_static_offsets_vector();
static ExternalReference address_of_regexp_stack_memory_address();
static ExternalReference address_of_regexp_stack_memory_size();
static ExternalReference address_of_static_offsets_vector(Isolate* isolate);
static ExternalReference address_of_regexp_stack_memory_address(
Isolate* isolate);
static ExternalReference address_of_regexp_stack_memory_size(
Isolate* isolate);
// Static variable Heap::NewSpaceStart()
static ExternalReference new_space_start();
static ExternalReference new_space_mask();
static ExternalReference heap_always_allocate_scope_depth();
static ExternalReference new_space_start(Isolate* isolate);
static ExternalReference new_space_mask(Isolate* isolate);
static ExternalReference heap_always_allocate_scope_depth(Isolate* isolate);
// Used for fast allocation in generated code.
static ExternalReference new_space_allocation_top_address();
static ExternalReference new_space_allocation_limit_address();
static ExternalReference new_space_allocation_top_address(Isolate* isolate);
static ExternalReference new_space_allocation_limit_address(Isolate* isolate);
static ExternalReference double_fp_operation(Token::Value operation);
static ExternalReference compare_doubles();
static ExternalReference power_double_double_function();
static ExternalReference power_double_int_function();
static ExternalReference double_fp_operation(Token::Value operation,
Isolate* isolate);
static ExternalReference compare_doubles(Isolate* isolate);
static ExternalReference power_double_double_function(Isolate* isolate);
static ExternalReference power_double_int_function(Isolate* isolate);
static ExternalReference handle_scope_next_address();
static ExternalReference handle_scope_limit_address();
static ExternalReference handle_scope_level_address();
static ExternalReference scheduled_exception_address();
static ExternalReference scheduled_exception_address(Isolate* isolate);
// Static variables containing common double constants.
static ExternalReference address_of_min_int();
static ExternalReference address_of_one_half();
static ExternalReference address_of_minus_zero();
static ExternalReference address_of_zero();
static ExternalReference address_of_uint8_max_value();
static ExternalReference address_of_negative_infinity();
static ExternalReference address_of_nan();
static ExternalReference math_sin_double_function(Isolate* isolate);
static ExternalReference math_cos_double_function(Isolate* isolate);
static ExternalReference math_log_double_function(Isolate* isolate);
Address address() const {return reinterpret_cast<Address>(address_);}
#ifdef ENABLE_DEBUGGER_SUPPORT
// Function Debug::Break()
static ExternalReference debug_break();
static ExternalReference debug_break(Isolate* isolate);
// Used to check if single stepping is enabled in generated code.
static ExternalReference debug_step_in_fp_address();
static ExternalReference debug_step_in_fp_address(Isolate* isolate);
#endif
#ifndef V8_INTERPRETED_REGEXP
// C functions called from RegExp generated code.
// Function NativeRegExpMacroAssembler::CaseInsensitiveCompareUC16()
static ExternalReference re_case_insensitive_compare_uc16();
static ExternalReference re_case_insensitive_compare_uc16(Isolate* isolate);
// Function RegExpMacroAssembler*::CheckStackGuardState()
static ExternalReference re_check_stack_guard_state();
static ExternalReference re_check_stack_guard_state(Isolate* isolate);
// Function NativeRegExpMacroAssembler::GrowStack()
static ExternalReference re_grow_stack();
static ExternalReference re_grow_stack(Isolate* isolate);
// byte NativeRegExpMacroAssembler::word_character_bitmap
static ExternalReference re_word_character_map();
@ -623,30 +664,39 @@ class ExternalReference BASE_EMBEDDED {
// This lets you register a function that rewrites all external references.
// Used by the ARM simulator to catch calls to external references.
static void set_redirector(ExternalReferenceRedirector* redirector) {
ASSERT(redirector_ == NULL); // We can't stack them.
redirector_ = redirector;
static void set_redirector(Isolate* isolate,
ExternalReferenceRedirector* redirector) {
// We can't stack them.
ASSERT(isolate->external_reference_redirector() == NULL);
isolate->set_external_reference_redirector(
reinterpret_cast<ExternalReferenceRedirectorPointer*>(redirector));
}
private:
explicit ExternalReference(void* address)
: address_(address) {}
static ExternalReferenceRedirector* redirector_;
static void* Redirect(void* address,
static void* Redirect(Isolate* isolate,
void* address,
Type type = ExternalReference::BUILTIN_CALL) {
if (redirector_ == NULL) return address;
void* answer = (*redirector_)(address, type);
ExternalReferenceRedirector* redirector =
reinterpret_cast<ExternalReferenceRedirector*>(
isolate->external_reference_redirector());
if (redirector == NULL) return address;
void* answer = (*redirector)(address, type);
return answer;
}
static void* Redirect(Address address_arg,
static void* Redirect(Isolate* isolate,
Address address_arg,
Type type = ExternalReference::BUILTIN_CALL) {
ExternalReferenceRedirector* redirector =
reinterpret_cast<ExternalReferenceRedirector*>(
isolate->external_reference_redirector());
void* address = reinterpret_cast<void*>(address_arg);
void* answer = (redirector_ == NULL) ?
void* answer = (redirector == NULL) ?
address :
(*redirector_)(address, type);
(*redirector)(address, type);
return answer;
}
@ -785,6 +835,28 @@ static inline int NumberOfBitsSet(uint32_t x) {
double power_double_int(double x, int y);
double power_double_double(double x, double y);
// Helper class for generating code or data associated with the code
// right after a call instruction. As an example this can be used to
// generate safepoint data after calls for crankshaft.
class CallWrapper {
public:
CallWrapper() { }
virtual ~CallWrapper() { }
// Called just before emitting a call. Argument is the size of the generated
// call code.
virtual void BeforeCall(int call_size) const = 0;
// Called just after emitting a call, i.e., at the return site for the call.
virtual void AfterCall() const = 0;
};
class NullCallWrapper : public CallWrapper {
public:
NullCallWrapper() { }
virtual ~NullCallWrapper() { }
virtual void BeforeCall(int call_size) const { }
virtual void AfterCall() const { }
};
} } // namespace v8::internal
#endif // V8_ASSEMBLER_H_

11
deps/v8/src/ast-inl.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -31,7 +31,7 @@
#include "v8.h"
#include "ast.h"
#include "jump-target-inl.h"
#include "scopes.h"
namespace v8 {
namespace internal {
@ -62,7 +62,7 @@ BreakableStatement::BreakableStatement(ZoneStringList* labels, Type type)
IterationStatement::IterationStatement(ZoneStringList* labels)
: BreakableStatement(labels, TARGET_FOR_ANONYMOUS),
body_(NULL),
continue_target_(JumpTarget::BIDIRECTIONAL),
continue_target_(),
osr_entry_id_(GetNextId()) {
}
@ -102,6 +102,11 @@ ForInStatement::ForInStatement(ZoneStringList* labels)
}
bool FunctionLiteral::strict_mode() const {
return scope()->is_strict_mode();
}
} } // namespace v8::internal
#endif // V8_AST_INL_H_

393
deps/v8/src/ast.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -28,21 +28,21 @@
#include "v8.h"
#include "ast.h"
#include "jump-target-inl.h"
#include "parser.h"
#include "scopes.h"
#include "string-stream.h"
#include "type-info.h"
namespace v8 {
namespace internal {
unsigned AstNode::current_id_ = 0;
unsigned AstNode::count_ = 0;
VariableProxySentinel VariableProxySentinel::this_proxy_(true);
VariableProxySentinel VariableProxySentinel::identifier_proxy_(false);
ValidLeftHandSideSentinel ValidLeftHandSideSentinel::instance_;
Property Property::this_property_(VariableProxySentinel::this_proxy(), NULL, 0);
Call Call::sentinel_(NULL, NULL, 0);
AstSentinels::AstSentinels()
: this_proxy_(true),
identifier_proxy_(false),
valid_left_hand_side_sentinel_(),
this_property_(&this_proxy_, NULL, 0),
call_sentinel_(NULL, NULL, 0) {
}
// ----------------------------------------------------------------------------
@ -77,20 +77,23 @@ VariableProxy::VariableProxy(Variable* var)
var_(NULL), // Will be set by the call to BindTo.
is_this_(var->is_this()),
inside_with_(false),
is_trivial_(false) {
is_trivial_(false),
position_(RelocInfo::kNoPosition) {
BindTo(var);
}
VariableProxy::VariableProxy(Handle<String> name,
bool is_this,
bool inside_with)
bool inside_with,
int position)
: name_(name),
var_(NULL),
is_this_(is_this),
inside_with_(inside_with),
is_trivial_(false) {
// names must be canonicalized for fast equality checks
is_trivial_(false),
position_(position) {
// Names must be canonicalized for fast equality checks.
ASSERT(name->IsSymbol());
}
@ -170,7 +173,7 @@ ObjectLiteral::Property::Property(Literal* key, Expression* value) {
key_ = key;
value_ = value;
Object* k = *key->handle();
if (k->IsSymbol() && Heap::Proto_symbol()->Equals(String::cast(k))) {
if (k->IsSymbol() && HEAP->Proto_symbol()->Equals(String::cast(k))) {
kind_ = PROTOTYPE;
} else if (value_->AsMaterializedLiteral() != NULL) {
kind_ = MATERIALIZED_LITERAL;
@ -249,10 +252,11 @@ void ObjectLiteral::CalculateEmitStore() {
uint32_t hash;
HashMap* table;
void* key;
Factory* factory = Isolate::Current()->factory();
if (handle->IsSymbol()) {
Handle<String> name(String::cast(*handle));
if (name->AsArrayIndex(&hash)) {
Handle<Object> key_handle = Factory::NewNumberFromUint(hash);
Handle<Object> key_handle = factory->NewNumberFromUint(hash);
key = key_handle.location();
table = &elements;
} else {
@ -269,7 +273,7 @@ void ObjectLiteral::CalculateEmitStore() {
char arr[100];
Vector<char> buffer(arr, ARRAY_SIZE(arr));
const char* str = DoubleToCString(num, buffer);
Handle<String> name = Factory::NewStringFromAscii(CStrVector(str));
Handle<String> name = factory->NewStringFromAscii(CStrVector(str));
key = name.location();
hash = name->Hash();
table = &properties;
@ -287,86 +291,13 @@ void ObjectLiteral::CalculateEmitStore() {
}
void TargetCollector::AddTarget(BreakTarget* target) {
void TargetCollector::AddTarget(Label* target) {
// Add the label to the collector, but discard duplicates.
int length = targets_->length();
int length = targets_.length();
for (int i = 0; i < length; i++) {
if (targets_->at(i) == target) return;
}
targets_->Add(target);
}
bool Expression::GuaranteedSmiResult() {
BinaryOperation* node = AsBinaryOperation();
if (node == NULL) return false;
Token::Value op = node->op();
switch (op) {
case Token::COMMA:
case Token::OR:
case Token::AND:
case Token::ADD:
case Token::SUB:
case Token::MUL:
case Token::DIV:
case Token::MOD:
case Token::BIT_XOR:
case Token::SHL:
return false;
break;
case Token::BIT_OR:
case Token::BIT_AND: {
Literal* left = node->left()->AsLiteral();
Literal* right = node->right()->AsLiteral();
if (left != NULL && left->handle()->IsSmi()) {
int value = Smi::cast(*left->handle())->value();
if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) {
// Result of bitwise or is always a negative Smi.
return true;
}
if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) {
// Result of bitwise and is always a positive Smi.
return true;
}
}
if (right != NULL && right->handle()->IsSmi()) {
int value = Smi::cast(*right->handle())->value();
if (op == Token::BIT_OR && ((value & 0xc0000000) == 0xc0000000)) {
// Result of bitwise or is always a negative Smi.
return true;
}
if (op == Token::BIT_AND && ((value & 0xc0000000) == 0)) {
// Result of bitwise and is always a positive Smi.
return true;
}
}
return false;
break;
}
case Token::SAR:
case Token::SHR: {
Literal* right = node->right()->AsLiteral();
if (right != NULL && right->handle()->IsSmi()) {
int value = Smi::cast(*right->handle())->value();
if ((value & 0x1F) > 1 ||
(op == Token::SAR && (value & 0x1F) == 1)) {
return true;
}
}
return false;
break;
}
default:
UNREACHABLE();
break;
if (targets_[i] == target) return;
}
return false;
}
void Expression::CopyAnalysisResultsFrom(Expression* other) {
bitfields_ = other->bitfields_;
type_ = other->type_;
targets_.Add(target);
}
@ -406,19 +337,192 @@ bool BinaryOperation::ResultOverwriteAllowed() {
}
BinaryOperation::BinaryOperation(Assignment* assignment) {
ASSERT(assignment->is_compound());
op_ = assignment->binary_op();
left_ = assignment->target();
right_ = assignment->value();
pos_ = assignment->position();
CopyAnalysisResultsFrom(assignment);
bool CompareOperation::IsLiteralCompareTypeof(Expression** expr,
Handle<String>* check) {
if (op_ != Token::EQ && op_ != Token::EQ_STRICT) return false;
UnaryOperation* left_unary = left_->AsUnaryOperation();
UnaryOperation* right_unary = right_->AsUnaryOperation();
Literal* left_literal = left_->AsLiteral();
Literal* right_literal = right_->AsLiteral();
// Check for the pattern: typeof <expression> == <string literal>.
if (left_unary != NULL && left_unary->op() == Token::TYPEOF &&
right_literal != NULL && right_literal->handle()->IsString()) {
*expr = left_unary->expression();
*check = Handle<String>::cast(right_literal->handle());
return true;
}
// Check for the pattern: <string literal> == typeof <expression>.
if (right_unary != NULL && right_unary->op() == Token::TYPEOF &&
left_literal != NULL && left_literal->handle()->IsString()) {
*expr = right_unary->expression();
*check = Handle<String>::cast(left_literal->handle());
return true;
}
return false;
}
bool CompareOperation::IsLiteralCompareUndefined(Expression** expr) {
if (op_ != Token::EQ_STRICT) return false;
UnaryOperation* left_unary = left_->AsUnaryOperation();
UnaryOperation* right_unary = right_->AsUnaryOperation();
// Check for the pattern: <expression> === void <literal>.
if (right_unary != NULL && right_unary->op() == Token::VOID &&
right_unary->expression()->AsLiteral() != NULL) {
*expr = left_;
return true;
}
// Check for the pattern: void <literal> === <expression>.
if (left_unary != NULL && left_unary->op() == Token::VOID &&
left_unary->expression()->AsLiteral() != NULL) {
*expr = right_;
return true;
}
return false;
}
// ----------------------------------------------------------------------------
// Inlining support
bool Declaration::IsInlineable() const {
return proxy()->var()->IsStackAllocated() && fun() == NULL;
}
bool TargetCollector::IsInlineable() const {
UNREACHABLE();
return false;
}
bool Slot::IsInlineable() const {
UNREACHABLE();
return false;
}
bool ForInStatement::IsInlineable() const {
return false;
}
bool EnterWithContextStatement::IsInlineable() const {
return false;
}
bool ExitContextStatement::IsInlineable() const {
return false;
}
bool SwitchStatement::IsInlineable() const {
return false;
}
bool TryStatement::IsInlineable() const {
return false;
}
bool TryCatchStatement::IsInlineable() const {
return false;
}
bool TryFinallyStatement::IsInlineable() const {
return false;
}
bool DebuggerStatement::IsInlineable() const {
return false;
}
bool Throw::IsInlineable() const {
return exception()->IsInlineable();
}
bool MaterializedLiteral::IsInlineable() const {
// TODO(1322): Allow materialized literals.
return false;
}
bool FunctionLiteral::IsInlineable() const {
// TODO(1322): Allow materialized literals.
return false;
}
bool ThisFunction::IsInlineable() const {
return false;
}
bool SharedFunctionInfoLiteral::IsInlineable() const {
return false;
}
bool ValidLeftHandSideSentinel::IsInlineable() const {
UNREACHABLE();
return false;
}
bool ForStatement::IsInlineable() const {
return (init() == NULL || init()->IsInlineable())
&& (cond() == NULL || cond()->IsInlineable())
&& (next() == NULL || next()->IsInlineable())
&& body()->IsInlineable();
}
bool WhileStatement::IsInlineable() const {
return cond()->IsInlineable()
&& body()->IsInlineable();
}
bool DoWhileStatement::IsInlineable() const {
return cond()->IsInlineable()
&& body()->IsInlineable();
}
bool ContinueStatement::IsInlineable() const {
return true;
}
bool BreakStatement::IsInlineable() const {
return true;
}
bool EmptyStatement::IsInlineable() const {
return true;
}
bool Literal::IsInlineable() const {
return true;
}
bool Block::IsInlineable() const {
const int count = statements_.length();
for (int i = 0; i < count; ++i) {
@ -434,8 +538,9 @@ bool ExpressionStatement::IsInlineable() const {
bool IfStatement::IsInlineable() const {
return condition()->IsInlineable() && then_statement()->IsInlineable() &&
else_statement()->IsInlineable();
return condition()->IsInlineable()
&& then_statement()->IsInlineable()
&& else_statement()->IsInlineable();
}
@ -486,6 +591,17 @@ bool CallNew::IsInlineable() const {
bool CallRuntime::IsInlineable() const {
// Don't try to inline JS runtime calls because we don't (currently) even
// optimize them.
if (is_jsruntime()) return false;
// Don't inline the %_ArgumentsLength or %_Arguments because their
// implementation will not work. There is no stack frame to get them
// from.
if (function()->intrinsic_type == Runtime::INLINE &&
(name()->IsEqualTo(CStrVector("_ArgumentsLength")) ||
name()->IsEqualTo(CStrVector("_Arguments")))) {
return false;
}
const int count = arguments()->length();
for (int i = 0; i < count; ++i) {
if (!arguments()->at(i)->IsInlineable()) return false;
@ -524,14 +640,14 @@ bool CountOperation::IsInlineable() const {
void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
// Record type feedback from the oracle in the AST.
is_monomorphic_ = oracle->LoadIsMonomorphic(this);
is_monomorphic_ = oracle->LoadIsMonomorphicNormal(this);
if (key()->IsPropertyName()) {
if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_ArrayLength)) {
if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_ArrayLength)) {
is_array_length_ = true;
} else if (oracle->LoadIsBuiltin(this, Builtins::LoadIC_StringLength)) {
} else if (oracle->LoadIsBuiltin(this, Builtins::kLoadIC_StringLength)) {
is_string_length_ = true;
} else if (oracle->LoadIsBuiltin(this,
Builtins::LoadIC_FunctionPrototype)) {
Builtins::kLoadIC_FunctionPrototype)) {
is_function_prototype_ = true;
} else {
Literal* lit_key = key()->AsLiteral();
@ -540,8 +656,13 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
ZoneMapList* types = oracle->LoadReceiverTypes(this, name);
receiver_types_ = types;
}
} else if (oracle->LoadIsBuiltin(this, Builtins::kKeyedLoadIC_String)) {
is_string_access_ = true;
} else if (is_monomorphic_) {
monomorphic_receiver_type_ = oracle->LoadMonomorphicReceiverType(this);
} else if (oracle->LoadIsMegamorphicWithTypeInfo(this)) {
receiver_types_ = new ZoneMapList(kMaxKeyedPolymorphism);
oracle->CollectKeyedReceiverTypes(this->id(), receiver_types_);
}
}
@ -549,7 +670,7 @@ void Property::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
Property* prop = target()->AsProperty();
ASSERT(prop != NULL);
is_monomorphic_ = oracle->StoreIsMonomorphic(this);
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
if (prop->key()->IsPropertyName()) {
Literal* lit_key = prop->key()->AsLiteral();
ASSERT(lit_key != NULL && lit_key->handle()->IsString());
@ -557,8 +678,23 @@ void Assignment::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
ZoneMapList* types = oracle->StoreReceiverTypes(this, name);
receiver_types_ = types;
} else if (is_monomorphic_) {
// Record receiver type for monomorphic keyed loads.
// Record receiver type for monomorphic keyed stores.
monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
} else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
receiver_types_ = new ZoneMapList(kMaxKeyedPolymorphism);
oracle->CollectKeyedReceiverTypes(this->id(), receiver_types_);
}
}
void CountOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
is_monomorphic_ = oracle->StoreIsMonomorphicNormal(this);
if (is_monomorphic_) {
// Record receiver type for monomorphic keyed stores.
monomorphic_receiver_type_ = oracle->StoreMonomorphicReceiverType(this);
} else if (oracle->StoreIsMegamorphicWithTypeInfo(this)) {
receiver_types_ = new ZoneMapList(kMaxKeyedPolymorphism);
oracle->CollectKeyedReceiverTypes(this->id(), receiver_types_);
}
}
@ -613,38 +749,36 @@ bool Call::ComputeTarget(Handle<Map> type, Handle<String> name) {
bool Call::ComputeGlobalTarget(Handle<GlobalObject> global,
Handle<String> name) {
LookupResult* lookup) {
target_ = Handle<JSFunction>::null();
cell_ = Handle<JSGlobalPropertyCell>::null();
LookupResult lookup;
global->Lookup(*name, &lookup);
if (lookup.IsProperty() &&
lookup.type() == NORMAL &&
lookup.holder() == *global) {
cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(&lookup));
if (cell_->value()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
if (!Heap::InNewSpace(*candidate) &&
CanCallWithoutIC(candidate, arguments()->length())) {
target_ = candidate;
return true;
}
ASSERT(lookup->IsProperty() &&
lookup->type() == NORMAL &&
lookup->holder() == *global);
cell_ = Handle<JSGlobalPropertyCell>(global->GetPropertyCell(lookup));
if (cell_->value()->IsJSFunction()) {
Handle<JSFunction> candidate(JSFunction::cast(cell_->value()));
// If the function is in new space we assume it's more likely to
// change and thus prefer the general IC code.
if (!HEAP->InNewSpace(*candidate) &&
CanCallWithoutIC(candidate, arguments()->length())) {
target_ = candidate;
return true;
}
}
return false;
}
void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
void Call::RecordTypeFeedback(TypeFeedbackOracle* oracle,
CallKind call_kind) {
Property* property = expression()->AsProperty();
ASSERT(property != NULL);
// Specialize for the receiver types seen at runtime.
Literal* key = property->key()->AsLiteral();
ASSERT(key != NULL && key->handle()->IsString());
Handle<String> name = Handle<String>::cast(key->handle());
receiver_types_ = oracle->CallReceiverTypes(this, name);
receiver_types_ = oracle->CallReceiverTypes(this, name, call_kind);
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
if (receiver_types_ != NULL) {
@ -691,7 +825,7 @@ void CompareOperation::RecordTypeFeedback(TypeFeedbackOracle* oracle) {
bool AstVisitor::CheckStackOverflow() {
if (stack_overflow_) return true;
StackLimitCheck check;
StackLimitCheck check(isolate_);
if (!check.HasOverflowed()) return false;
return (stack_overflow_ = true);
}
@ -1062,6 +1196,9 @@ CaseClause::CaseClause(Expression* label,
: label_(label),
statements_(statements),
position_(pos),
compare_type_(NONE) {}
compare_type_(NONE),
compare_id_(AstNode::GetNextId()),
entry_id_(AstNode::GetNextId()) {
}
} } // namespace v8::internal

459
deps/v8/src/ast.h

File diff suppressed because it is too large

2
deps/v8/src/atomicops.h

@ -158,6 +158,8 @@ Atomic64 Release_Load(volatile const Atomic64* ptr);
#include "atomicops_internals_x86_gcc.h"
#elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
#include "atomicops_internals_arm_gcc.h"
#elif defined(__GNUC__) && defined(V8_HOST_ARCH_MIPS)
#include "atomicops_internals_mips_gcc.h"
#else
#error "Atomic operations are not supported on your platform"
#endif

169
deps/v8/src/atomicops_internals_mips_gcc.h

@ -0,0 +1,169 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is an internal atomic implementation, use atomicops.h instead.
#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("sync" : : : "memory")
namespace v8 {
namespace internal {
// Atomically execute:
// result = *ptr;
// if (*ptr == old_value)
// *ptr = new_value;
// return result;
//
// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
// Always return the old value of "*ptr"
//
// This routine implies no memory barriers.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 prev;
__asm__ __volatile__("1:\n"
"ll %0, %1\n" // prev = *ptr
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2
"nop\n" // delay slot nop
"sc %2, %1\n" // *ptr = new_value (with atomic check)
"beqz %2, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
"2:\n"
: "=&r" (prev), "=m" (*ptr), "+&r" (new_value)
: "Ir" (old_value), "r" (new_value), "m" (*ptr)
: "memory");
return prev;
}
// Atomically store new_value into *ptr, returning the previous value held in
// *ptr. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
Atomic32 new_value) {
Atomic32 temp, old;
__asm__ __volatile__("1:\n"
"ll %1, %2\n" // old = *ptr
"move %0, %3\n" // temp = new_value
"sc %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
: "=&r" (temp), "=&r" (old), "=m" (*ptr)
: "r" (new_value), "m" (*ptr)
: "memory");
return old;
}
// Atomically increment *ptr by "increment". Returns the new value of
// *ptr with the increment applied. This routine implies no memory barriers.
inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 temp, temp2;
__asm__ __volatile__("1:\n"
"ll %0, %2\n" // temp = *ptr
"addu %0, %3\n" // temp = temp + increment
"move %1, %0\n" // temp2 = temp
"sc %0, %2\n" // *ptr = temp (with atomic check)
"beqz %0, 1b\n" // start again on atomic error
"nop\n" // delay slot nop
: "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
: "Ir" (increment), "m" (*ptr)
: "memory");
// temp2 now holds the final value.
return temp2;
}
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
Atomic32 increment) {
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
ATOMICOPS_COMPILER_BARRIER();
return res;
}
// "Acquire" operations
// ensure that no later memory access can be reordered ahead of the operation.
// "Release" operations ensure that no previous memory access can be reordered
// after the operation. "Barrier" operations have both "Acquire" and "Release"
// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
// access.
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
ATOMICOPS_COMPILER_BARRIER();
return x;
}
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
Atomic32 old_value,
Atomic32 new_value) {
ATOMICOPS_COMPILER_BARRIER();
return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
}
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
}
inline void MemoryBarrier() {
ATOMICOPS_COMPILER_BARRIER();
}
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
*ptr = value;
MemoryBarrier();
}
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
MemoryBarrier();
*ptr = value;
}
inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
return *ptr;
}
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
Atomic32 value = *ptr;
MemoryBarrier();
return value;
}
inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
MemoryBarrier();
return *ptr;
}
} } // namespace v8::internal
#undef ATOMICOPS_COMPILER_BARRIER
#endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_

13
deps/v8/src/atomicops_internals_x86_gcc.cc

@ -57,6 +57,9 @@
#if defined(cpuid) // initialize the struct only on x86
namespace v8 {
namespace internal {
// Set the flags so that code will run correctly and conservatively, so even
// if we haven't been initialized yet, we're probably single threaded, and our
// default values should hopefully be pretty safe.
@ -65,8 +68,14 @@ struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures = {
false, // no SSE2
};
} } // namespace v8::internal
namespace {
// Initialize the AtomicOps_Internalx86CPUFeatures struct.
static void AtomicOps_Internalx86CPUFeaturesInit() {
void AtomicOps_Internalx86CPUFeaturesInit() {
using v8::internal::AtomicOps_Internalx86CPUFeatures;
uint32_t eax;
uint32_t ebx;
uint32_t ecx;
@ -107,8 +116,6 @@ static void AtomicOps_Internalx86CPUFeaturesInit() {
AtomicOps_Internalx86CPUFeatures.has_sse2 = ((edx >> 26) & 1);
}
namespace {
class AtomicOpsx86Initializer {
public:
AtomicOpsx86Initializer() {

6
deps/v8/src/atomicops_internals_x86_gcc.h

@ -30,6 +30,9 @@
#ifndef V8_ATOMICOPS_INTERNALS_X86_GCC_H_
#define V8_ATOMICOPS_INTERNALS_X86_GCC_H_
namespace v8 {
namespace internal {
// This struct is not part of the public API of this module; clients may not
// use it.
// Features of this x86. Values may not be correct before main() is run,
@ -43,9 +46,6 @@ extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
namespace v8 {
namespace internal {
// 32-bit low-level operations on any platform.
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,

1210
deps/v8/src/bootstrapper.cc

File diff suppressed because it is too large

123
deps/v8/src/bootstrapper.h

@ -29,77 +29,148 @@
#ifndef V8_BOOTSTRAPPER_H_
#define V8_BOOTSTRAPPER_H_
#include "allocation.h"
namespace v8 {
namespace internal {
class BootstrapperActive BASE_EMBEDDED {
// A SourceCodeCache uses a FixedArray to store pairs of
// (AsciiString*, JSFunction*), mapping names of native code files
// (runtime.js, etc.) to precompiled functions. Instead of mapping
// names to functions it might make sense to let the JS2C tool
// generate an index for each native JS file.
class SourceCodeCache BASE_EMBEDDED {
public:
BootstrapperActive() { nesting_++; }
~BootstrapperActive() { nesting_--; }
explicit SourceCodeCache(Script::Type type): type_(type), cache_(NULL) { }
// Support for thread preemption.
static int ArchiveSpacePerThread();
static char* ArchiveState(char* to);
static char* RestoreState(char* from);
void Initialize(bool create_heap_objects) {
cache_ = create_heap_objects ? HEAP->empty_fixed_array() : NULL;
}
void Iterate(ObjectVisitor* v) {
v->VisitPointer(BitCast<Object**, FixedArray**>(&cache_));
}
bool Lookup(Vector<const char> name, Handle<SharedFunctionInfo>* handle) {
for (int i = 0; i < cache_->length(); i+=2) {
SeqAsciiString* str = SeqAsciiString::cast(cache_->get(i));
if (str->IsEqualTo(name)) {
*handle = Handle<SharedFunctionInfo>(
SharedFunctionInfo::cast(cache_->get(i + 1)));
return true;
}
}
return false;
}
void Add(Vector<const char> name, Handle<SharedFunctionInfo> shared) {
HandleScope scope;
int length = cache_->length();
Handle<FixedArray> new_array =
FACTORY->NewFixedArray(length + 2, TENURED);
cache_->CopyTo(0, *new_array, 0, cache_->length());
cache_ = *new_array;
Handle<String> str = FACTORY->NewStringFromAscii(name, TENURED);
cache_->set(length, *str);
cache_->set(length + 1, *shared);
Script::cast(shared->script())->set_type(Smi::FromInt(type_));
}
private:
static bool IsActive() { return nesting_ != 0; }
static int nesting_;
friend class Bootstrapper;
Script::Type type_;
FixedArray* cache_;
DISALLOW_COPY_AND_ASSIGN(SourceCodeCache);
};
// The Boostrapper is the public interface for creating a JavaScript global
// context.
class Bootstrapper : public AllStatic {
class Bootstrapper {
public:
// Requires: Heap::Setup has been called.
static void Initialize(bool create_heap_objects);
static void TearDown();
void Initialize(bool create_heap_objects);
void TearDown();
// Creates a JavaScript Global Context with initial object graph.
// The returned value is a global handle casted to V8Environment*.
static Handle<Context> CreateEnvironment(
Handle<Context> CreateEnvironment(
Isolate* isolate,
Handle<Object> global_object,
v8::Handle<v8::ObjectTemplate> global_template,
v8::ExtensionConfiguration* extensions);
// Detach the environment from its outer global object.
static void DetachGlobal(Handle<Context> env);
void DetachGlobal(Handle<Context> env);
// Reattach an outer global object to an environment.
static void ReattachGlobal(Handle<Context> env, Handle<Object> global_object);
void ReattachGlobal(Handle<Context> env, Handle<Object> global_object);
// Traverses the pointers for memory management.
static void Iterate(ObjectVisitor* v);
void Iterate(ObjectVisitor* v);
// Accessor for the native scripts source code.
static Handle<String> NativesSourceLookup(int index);
Handle<String> NativesSourceLookup(int index);
// Tells whether bootstrapping is active.
static bool IsActive() { return BootstrapperActive::IsActive(); }
bool IsActive() const { return nesting_ != 0; }
// Support for thread preemption.
static int ArchiveSpacePerThread();
static char* ArchiveState(char* to);
static char* RestoreState(char* from);
static void FreeThreadResources();
char* ArchiveState(char* to);
char* RestoreState(char* from);
void FreeThreadResources();
// This will allocate a char array that is deleted when V8 is shut down.
// It should only be used for strictly finite allocations.
static char* AllocateAutoDeletedArray(int bytes);
char* AllocateAutoDeletedArray(int bytes);
// Used for new context creation.
static bool InstallExtensions(Handle<Context> global_context,
v8::ExtensionConfiguration* extensions);
bool InstallExtensions(Handle<Context> global_context,
v8::ExtensionConfiguration* extensions);
SourceCodeCache* extensions_cache() { return &extensions_cache_; }
private:
typedef int NestingCounterType;
NestingCounterType nesting_;
SourceCodeCache extensions_cache_;
// This is for delete, not delete[].
List<char*>* delete_these_non_arrays_on_tear_down_;
// This is for delete[]
List<char*>* delete_these_arrays_on_tear_down_;
friend class BootstrapperActive;
friend class Isolate;
friend class NativesExternalStringResource;
Bootstrapper();
DISALLOW_COPY_AND_ASSIGN(Bootstrapper);
};
class BootstrapperActive BASE_EMBEDDED {
public:
BootstrapperActive() {
++Isolate::Current()->bootstrapper()->nesting_;
}
~BootstrapperActive() {
--Isolate::Current()->bootstrapper()->nesting_;
}
private:
DISALLOW_COPY_AND_ASSIGN(BootstrapperActive);
};
class NativesExternalStringResource
: public v8::String::ExternalAsciiStringResource {
public:
explicit NativesExternalStringResource(const char* source);
NativesExternalStringResource(Bootstrapper* bootstrapper,
const char* source,
size_t length);
const char* data() const {
return data_;

580
deps/v8/src/builtins.cc

File diff suppressed because it is too large

294
deps/v8/src/builtins.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -58,119 +58,131 @@ enum BuiltinExtraArguments {
V(FastHandleApiCall, NO_EXTRA_ARGUMENTS) \
V(HandleApiCallConstruct, NEEDS_CALLED_FUNCTION) \
V(HandleApiCallAsFunction, NO_EXTRA_ARGUMENTS) \
V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS)
V(HandleApiCallAsConstructor, NO_EXTRA_ARGUMENTS) \
\
V(StrictModePoisonPill, NO_EXTRA_ARGUMENTS)
// Define list of builtins implemented in assembly.
#define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructCall, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LazyCompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LazyRecompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyOSR, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
\
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
\
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
kStrictMode) \
V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \
kStrictMode) \
V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
kStrictMode) \
V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
kStrictMode) \
V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \
kStrictMode) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
\
V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
kStrictMode) \
V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
kStrictMode) \
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(FunctionApply, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(ArrayCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(StringConstructCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
#define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructCall, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubCountdown, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructStubApi, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LazyCompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LazyRecompile, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyLazyDeoptimized, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(NotifyOSR, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(LoadIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedLoadIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedLoadIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedLoadIC_Slow, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(StoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_Miss, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_MissForceGeneric, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_Slow, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LoadIC_Initialize, LOAD_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(LoadIC_PreMonomorphic, LOAD_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Normal, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_ArrayLength, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_StringLength, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_StringWrapperLength, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_FunctionPrototype, LOAD_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(LoadIC_Megamorphic, LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
\
V(KeyedLoadIC_Initialize, KEYED_LOAD_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedLoadIC_PreMonomorphic, KEYED_LOAD_IC, PREMONOMORPHIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_Generic, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_String, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_IndexedInterceptor, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(KeyedLoadIC_NonStrictArguments, KEYED_LOAD_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
\
V(StoreIC_Initialize, STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(StoreIC_ArrayLength, STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Normal, STORE_IC, MONOMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Megamorphic, STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_GlobalProxy, STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
V(StoreIC_Initialize_Strict, STORE_IC, UNINITIALIZED, \
kStrictMode) \
V(StoreIC_ArrayLength_Strict, STORE_IC, MONOMORPHIC, \
kStrictMode) \
V(StoreIC_Normal_Strict, STORE_IC, MONOMORPHIC, \
kStrictMode) \
V(StoreIC_Megamorphic_Strict, STORE_IC, MEGAMORPHIC, \
kStrictMode) \
V(StoreIC_GlobalProxy_Strict, STORE_IC, MEGAMORPHIC, \
kStrictMode) \
\
V(KeyedStoreIC_Initialize, KEYED_STORE_IC, UNINITIALIZED, \
Code::kNoExtraICState) \
V(KeyedStoreIC_Generic, KEYED_STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
\
V(KeyedStoreIC_Initialize_Strict, KEYED_STORE_IC, UNINITIALIZED, \
kStrictMode) \
V(KeyedStoreIC_Generic_Strict, KEYED_STORE_IC, MEGAMORPHIC, \
kStrictMode) \
V(KeyedStoreIC_NonStrictArguments, KEYED_STORE_IC, MEGAMORPHIC, \
Code::kNoExtraICState) \
\
/* Uses KeyedLoadIC_Initialize; must be after in list. */ \
V(FunctionCall, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(FunctionApply, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(ArrayCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
V(ArrayConstructCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(StringConstructCode, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState) \
\
V(OnStackReplacement, BUILTIN, UNINITIALIZED, \
Code::kNoExtraICState)
@ -235,25 +247,28 @@ enum BuiltinExtraArguments {
V(APPLY_OVERFLOW, 1)
class BuiltinFunctionTable;
class ObjectVisitor;
class Builtins : public AllStatic {
class Builtins {
public:
~Builtins();
// Generate all builtin code objects. Should be called once during
// VM initialization.
static void Setup(bool create_heap_objects);
static void TearDown();
// isolate initialization.
void Setup(bool create_heap_objects);
void TearDown();
// Garbage collection support.
static void IterateBuiltins(ObjectVisitor* v);
void IterateBuiltins(ObjectVisitor* v);
// Disassembler support.
static const char* Lookup(byte* pc);
const char* Lookup(byte* pc);
enum Name {
#define DEF_ENUM_C(name, ignore) name,
#define DEF_ENUM_A(name, kind, state, extra) name,
#define DEF_ENUM_C(name, ignore) k##name,
#define DEF_ENUM_A(name, kind, state, extra) k##name,
BUILTIN_LIST_C(DEF_ENUM_C)
BUILTIN_LIST_A(DEF_ENUM_A)
BUILTIN_LIST_DEBUG_A(DEF_ENUM_A)
@ -276,13 +291,22 @@ class Builtins : public AllStatic {
id_count
};
static Code* builtin(Name name) {
#define DECLARE_BUILTIN_ACCESSOR_C(name, ignore) Handle<Code> name();
#define DECLARE_BUILTIN_ACCESSOR_A(name, kind, state, extra) \
Handle<Code> name();
BUILTIN_LIST_C(DECLARE_BUILTIN_ACCESSOR_C)
BUILTIN_LIST_A(DECLARE_BUILTIN_ACCESSOR_A)
BUILTIN_LIST_DEBUG_A(DECLARE_BUILTIN_ACCESSOR_A)
#undef DECLARE_BUILTIN_ACCESSOR_C
#undef DECLARE_BUILTIN_ACCESSOR_A
Code* builtin(Name name) {
// Code::cast cannot be used here since we access builtins
// during the marking phase of mark sweep. See IC::Clear.
return reinterpret_cast<Code*>(builtins_[name]);
}
static Address builtin_address(Name name) {
Address builtin_address(Name name) {
return reinterpret_cast<Address>(&builtins_[name]);
}
@ -292,20 +316,24 @@ class Builtins : public AllStatic {
static const char* GetName(JavaScript id) { return javascript_names_[id]; }
static int GetArgumentsCount(JavaScript id) { return javascript_argc_[id]; }
static Handle<Code> GetCode(JavaScript id, bool* resolved);
Handle<Code> GetCode(JavaScript id, bool* resolved);
static int NumberOfJavaScriptBuiltins() { return id_count; }
bool is_initialized() const { return initialized_; }
private:
Builtins();
// The external C++ functions called from the code.
static Address c_functions_[cfunction_count];
static Address const c_functions_[cfunction_count];
// Note: These are always Code objects, but to conform with
// IterateBuiltins() above which assumes Object**'s for the callback
// function f, we use an Object* array here.
static Object* builtins_[builtin_count];
static const char* names_[builtin_count];
static const char* javascript_names_[id_count];
static int javascript_argc_[id_count];
Object* builtins_[builtin_count];
const char* names_[builtin_count];
static const char* const javascript_names_[id_count];
static int const javascript_argc_[id_count];
static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
@ -330,8 +358,16 @@ class Builtins : public AllStatic {
static void Generate_ArrayConstructCode(MacroAssembler* masm);
static void Generate_StringConstructCode(MacroAssembler* masm);
static void Generate_OnStackReplacement(MacroAssembler* masm);
static void InitBuiltinFunctionTable();
bool initialized_;
friend class BuiltinFunctionTable;
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(Builtins);
};
} } // namespace v8::internal

4
deps/v8/src/char-predicates.h

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -28,6 +28,8 @@
#ifndef V8_CHAR_PREDICATES_H_
#define V8_CHAR_PREDICATES_H_
#include "unicode.h"
namespace v8 {
namespace internal {

4
deps/v8/src/checks.cc

@ -30,8 +30,8 @@
#include "v8.h"
#include "platform.h"
#include "top.h"
// TODO(isolates): is it necessary to lift this?
static int fatal_error_handler_nesting_depth = 0;
// Contains protection against recursive calls (faults while handling faults).
@ -52,7 +52,7 @@ extern "C" void V8_Fatal(const char* file, int line, const char* format, ...) {
if (fatal_error_handler_nesting_depth < 3) {
if (i::FLAG_stack_trace_on_abort) {
// Call this one twice on double fault
i::Top::PrintStack();
i::Isolate::Current()->PrintStack();
}
}
i::OS::Abort();

4
deps/v8/src/checks.h

@ -271,6 +271,8 @@ bool EnableSlowAsserts();
#define ASSERT_EQ(v1, v2) CHECK_EQ(v1, v2)
#define ASSERT_NE(v1, v2) CHECK_NE(v1, v2)
#define ASSERT_GE(v1, v2) CHECK_GE(v1, v2)
#define ASSERT_LT(v1, v2) CHECK_LT(v1, v2)
#define ASSERT_LE(v1, v2) CHECK_LE(v1, v2)
#define SLOW_ASSERT(condition) if (EnableSlowAsserts()) CHECK(condition)
#else
#define ASSERT_RESULT(expr) (expr)
@ -278,6 +280,8 @@ bool EnableSlowAsserts();
#define ASSERT_EQ(v1, v2) ((void) 0)
#define ASSERT_NE(v1, v2) ((void) 0)
#define ASSERT_GE(v1, v2) ((void) 0)
#define ASSERT_LT(v1, v2) ((void) 0)
#define ASSERT_LE(v1, v2) ((void) 0)
#define SLOW_ASSERT(condition) ((void) 0)
#endif
// Static asserts has no impact on runtime performance, so they can be

73
deps/v8/src/code-stubs.cc

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -29,6 +29,7 @@
#include "bootstrapper.h"
#include "code-stubs.h"
#include "stub-cache.h"
#include "factory.h"
#include "gdb-jit.h"
#include "macro-assembler.h"
@ -37,9 +38,10 @@ namespace v8 {
namespace internal {
bool CodeStub::FindCodeInCache(Code** code_out) {
int index = Heap::code_stubs()->FindEntry(GetKey());
Heap* heap = Isolate::Current()->heap();
int index = heap->code_stubs()->FindEntry(GetKey());
if (index != NumberDictionary::kNotFound) {
*code_out = Code::cast(Heap::code_stubs()->ValueAt(index));
*code_out = Code::cast(heap->code_stubs()->ValueAt(index));
return true;
}
return false;
@ -48,7 +50,7 @@ bool CodeStub::FindCodeInCache(Code** code_out) {
void CodeStub::GenerateCode(MacroAssembler* masm) {
// Update the static counter each time a new code stub is generated.
Counters::code_stubs.Increment();
masm->isolate()->counters()->code_stubs()->Increment();
// Nested stubs are not allowed for leafs.
AllowStubCallsScope allow_scope(masm, AllowsStubCalls());
@ -62,9 +64,11 @@ void CodeStub::GenerateCode(MacroAssembler* masm) {
void CodeStub::RecordCodeGeneration(Code* code, MacroAssembler* masm) {
code->set_major_key(MajorKey());
PROFILE(CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
Isolate* isolate = masm->isolate();
PROFILE(isolate, CodeCreateEvent(Logger::STUB_TAG, code, GetName()));
GDBJIT(AddCode(GDBJITInterface::STUB, GetName(), code));
Counters::total_stubs_code_size.Increment(code->instruction_size());
Counters* counters = isolate->counters();
counters->total_stubs_code_size()->Increment(code->instruction_size());
#ifdef ENABLE_DISASSEMBLER
if (FLAG_print_code_stubs) {
@ -84,12 +88,15 @@ int CodeStub::GetCodeKind() {
Handle<Code> CodeStub::GetCode() {
Isolate* isolate = Isolate::Current();
Factory* factory = isolate->factory();
Heap* heap = isolate->heap();
Code* code;
if (!FindCodeInCache(&code)) {
v8::HandleScope scope;
HandleScope scope(isolate);
// Generate the new code.
MacroAssembler masm(NULL, 256);
MacroAssembler masm(isolate, NULL, 256);
GenerateCode(&masm);
// Create the code object.
@ -101,22 +108,24 @@ Handle<Code> CodeStub::GetCode() {
static_cast<Code::Kind>(GetCodeKind()),
InLoop(),
GetICState());
Handle<Code> new_object = Factory::NewCode(desc, flags, masm.CodeObject());
Handle<Code> new_object = factory->NewCode(
desc, flags, masm.CodeObject(), NeedsImmovableCode());
RecordCodeGeneration(*new_object, &masm);
FinishCode(*new_object);
// Update the dictionary and the root in Heap.
Handle<NumberDictionary> dict =
Factory::DictionaryAtNumberPut(
Handle<NumberDictionary>(Heap::code_stubs()),
factory->DictionaryAtNumberPut(
Handle<NumberDictionary>(heap->code_stubs()),
GetKey(),
new_object);
Heap::public_set_code_stubs(*dict);
heap->public_set_code_stubs(*dict);
code = *new_object;
}
return Handle<Code>(code);
ASSERT(!NeedsImmovableCode() || heap->lo_space()->Contains(code));
return Handle<Code>(code, isolate);
}
@ -124,8 +133,9 @@ MaybeObject* CodeStub::TryGetCode() {
Code* code;
if (!FindCodeInCache(&code)) {
// Generate the new code.
MacroAssembler masm(NULL, 256);
MacroAssembler masm(Isolate::Current(), NULL, 256);
GenerateCode(&masm);
Heap* heap = masm.isolate()->heap();
// Create the code object.
CodeDesc desc;
@ -138,7 +148,7 @@ MaybeObject* CodeStub::TryGetCode() {
GetICState());
Object* new_object;
{ MaybeObject* maybe_new_object =
Heap::CreateCode(desc, flags, masm.CodeObject());
heap->CreateCode(desc, flags, masm.CodeObject());
if (!maybe_new_object->ToObject(&new_object)) return maybe_new_object;
}
code = Code::cast(new_object);
@ -147,9 +157,9 @@ MaybeObject* CodeStub::TryGetCode() {
// Try to update the code cache but do not fail if unable.
MaybeObject* maybe_new_object =
Heap::code_stubs()->AtNumberPut(GetKey(), code);
heap->code_stubs()->AtNumberPut(GetKey(), code);
if (maybe_new_object->ToObject(&new_object)) {
Heap::public_set_code_stubs(NumberDictionary::cast(new_object));
heap->public_set_code_stubs(NumberDictionary::cast(new_object));
}
}
@ -188,6 +198,12 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
case CompareIC::HEAP_NUMBERS:
GenerateHeapNumbers(masm);
break;
case CompareIC::STRINGS:
GenerateStrings(masm);
break;
case CompareIC::SYMBOLS:
GenerateSymbols(masm);
break;
case CompareIC::OBJECTS:
GenerateObjects(masm);
break;
@ -200,7 +216,8 @@ void ICCompareStub::Generate(MacroAssembler* masm) {
const char* InstanceofStub::GetName() {
if (name_ != NULL) return name_;
const int kMaxNameLength = 100;
name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength);
name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
kMaxNameLength);
if (name_ == NULL) return "OOM";
const char* args = "";
@ -227,4 +244,24 @@ const char* InstanceofStub::GetName() {
}
void KeyedLoadFastElementStub::Generate(MacroAssembler* masm) {
KeyedLoadStubCompiler::GenerateLoadFastElement(masm);
}
void KeyedStoreFastElementStub::Generate(MacroAssembler* masm) {
KeyedStoreStubCompiler::GenerateStoreFastElement(masm, is_js_array_);
}
void KeyedLoadExternalArrayStub::Generate(MacroAssembler* masm) {
KeyedLoadStubCompiler::GenerateLoadExternalArray(masm, elements_kind_);
}
void KeyedStoreExternalArrayStub::Generate(MacroAssembler* masm) {
KeyedStoreStubCompiler::GenerateStoreExternalArray(masm, elements_kind_);
}
} } // namespace v8::internal

226
deps/v8/src/code-stubs.h

@ -28,35 +28,38 @@
#ifndef V8_CODE_STUBS_H_
#define V8_CODE_STUBS_H_
#include "allocation.h"
#include "globals.h"
namespace v8 {
namespace internal {
// List of code stubs used on all platforms. The order in this list is important
// as only the stubs up to and including Instanceof allows nested stub calls.
// List of code stubs used on all platforms.
#define CODE_STUB_LIST_ALL_PLATFORMS(V) \
V(CallFunction) \
V(GenericBinaryOp) \
V(TypeRecordingBinaryOp) \
V(UnaryOp) \
V(BinaryOp) \
V(StringAdd) \
V(StringCharAt) \
V(SubString) \
V(StringCompare) \
V(SmiOp) \
V(Compare) \
V(CompareIC) \
V(MathPow) \
V(TranscendentalCache) \
V(Instanceof) \
/* All stubs above this line only exist in a few versions, which are */ \
/* generated ahead of time. Therefore compiling a call to one of */ \
/* them can't cause a new stub to be compiled, so compiling a call to */ \
/* them is GC safe. The ones below this line exist in many variants */ \
/* so code compiling a call to one can cause a GC. This means they */ \
/* can't be called from other stubs, since stub generation code is */ \
/* not GC safe. */ \
V(ConvertToDouble) \
V(WriteInt32ToHeapNumber) \
V(IntegerMod) \
V(StackCheck) \
V(FastNewClosure) \
V(FastNewContext) \
V(FastCloneShallowArray) \
V(GenericUnaryOp) \
V(RevertToNumber) \
V(ToBoolean) \
V(ToNumber) \
@ -67,7 +70,12 @@ namespace internal {
V(NumberToString) \
V(CEntry) \
V(JSEntry) \
V(DebuggerStatement)
V(KeyedLoadFastElement) \
V(KeyedStoreFastElement) \
V(KeyedLoadExternalArray) \
V(KeyedStoreExternalArray) \
V(DebuggerStatement) \
V(StringDictionaryNegativeLookup)
// List of code stubs only used on ARM platforms.
#ifdef V8_TARGET_ARCH_ARM
@ -81,10 +89,20 @@ namespace internal {
#define CODE_STUB_LIST_ARM(V)
#endif
// List of code stubs only used on MIPS platforms.
#ifdef V8_TARGET_ARCH_MIPS
#define CODE_STUB_LIST_MIPS(V) \
V(RegExpCEntry) \
V(DirectCEntry)
#else
#define CODE_STUB_LIST_MIPS(V)
#endif
// Combined list of code stubs.
#define CODE_STUB_LIST(V) \
CODE_STUB_LIST_ALL_PLATFORMS(V) \
CODE_STUB_LIST_ARM(V)
CODE_STUB_LIST_ARM(V) \
CODE_STUB_LIST_MIPS(V)
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
@ -156,10 +174,10 @@ class CodeStub BASE_EMBEDDED {
// lazily generated function should be fully optimized or not.
virtual InLoopFlag InLoop() { return NOT_IN_LOOP; }
// GenericBinaryOpStub needs to override this.
// BinaryOpStub needs to override this.
virtual int GetCodeKind();
// GenericBinaryOpStub needs to override this.
// BinaryOpStub needs to override this.
virtual InlineCacheState GetICState() {
return UNINITIALIZED;
}
@ -167,7 +185,11 @@ class CodeStub BASE_EMBEDDED {
// Returns a name for logging/debugging purposes.
virtual const char* GetName() { return MajorName(MajorKey(), false); }
#ifdef DEBUG
// Returns whether the code generated for this stub needs to be allocated as
// a fixed (non-moveable) code object.
virtual bool NeedsImmovableCode() { return false; }
#ifdef DEBUG
virtual void Print() { PrintF("%s\n", GetName()); }
#endif
@ -178,6 +200,7 @@ class CodeStub BASE_EMBEDDED {
MajorKeyBits::encode(MajorKey());
}
// See comment above, where Instanceof is defined.
bool AllowsStubCalls() { return MajorKey() <= Instanceof; }
class MajorKeyBits: public BitField<uint32_t, 0, kMajorBits> {};
@ -251,7 +274,6 @@ class StackCheckStub : public CodeStub {
void Generate(MacroAssembler* masm);
private:
const char* GetName() { return "StackCheckStub"; }
Major MajorKey() { return StackCheck; }
@ -274,12 +296,17 @@ class ToNumberStub: public CodeStub {
class FastNewClosureStub : public CodeStub {
public:
explicit FastNewClosureStub(StrictModeFlag strict_mode)
: strict_mode_(strict_mode) { }
void Generate(MacroAssembler* masm);
private:
const char* GetName() { return "FastNewClosureStub"; }
Major MajorKey() { return FastNewClosure; }
int MinorKey() { return 0; }
int MinorKey() { return strict_mode_; }
StrictModeFlag strict_mode_;
};
@ -373,54 +400,6 @@ class InstanceofStub: public CodeStub {
};
enum NegativeZeroHandling {
kStrictNegativeZero,
kIgnoreNegativeZero
};
enum UnaryOpFlags {
NO_UNARY_FLAGS = 0,
NO_UNARY_SMI_CODE_IN_STUB = 1 << 0
};
class GenericUnaryOpStub : public CodeStub {
public:
GenericUnaryOpStub(Token::Value op,
UnaryOverwriteMode overwrite,
UnaryOpFlags flags,
NegativeZeroHandling negative_zero = kStrictNegativeZero)
: op_(op),
overwrite_(overwrite),
include_smi_code_((flags & NO_UNARY_SMI_CODE_IN_STUB) == 0),
negative_zero_(negative_zero) { }
private:
Token::Value op_;
UnaryOverwriteMode overwrite_;
bool include_smi_code_;
NegativeZeroHandling negative_zero_;
class OverwriteField: public BitField<UnaryOverwriteMode, 0, 1> {};
class IncludeSmiCodeField: public BitField<bool, 1, 1> {};
class NegativeZeroField: public BitField<NegativeZeroHandling, 2, 1> {};
class OpField: public BitField<Token::Value, 3, kMinorBits - 3> {};
Major MajorKey() { return GenericUnaryOp; }
int MinorKey() {
return OpField::encode(op_) |
OverwriteField::encode(overwrite_) |
IncludeSmiCodeField::encode(include_smi_code_) |
NegativeZeroField::encode(negative_zero_);
}
void Generate(MacroAssembler* masm);
const char* GetName();
};
class MathPowStub: public CodeStub {
public:
MathPowStub() {}
@ -434,18 +413,6 @@ class MathPowStub: public CodeStub {
};
class StringCharAtStub: public CodeStub {
public:
StringCharAtStub() {}
private:
Major MajorKey() { return StringCharAt; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class ICCompareStub: public CodeStub {
public:
ICCompareStub(Token::Value op, CompareIC::State state)
@ -468,6 +435,8 @@ class ICCompareStub: public CodeStub {
void GenerateSmis(MacroAssembler* masm);
void GenerateHeapNumbers(MacroAssembler* masm);
void GenerateSymbols(MacroAssembler* masm);
void GenerateStrings(MacroAssembler* masm);
void GenerateObjects(MacroAssembler* masm);
void GenerateMiss(MacroAssembler* masm);
@ -623,6 +592,8 @@ class CEntryStub : public CodeStub {
Major MajorKey() { return CEntry; }
int MinorKey();
bool NeedsImmovableCode();
const char* GetName() { return "CEntryStub"; }
};
@ -661,7 +632,9 @@ class ArgumentsAccessStub: public CodeStub {
public:
enum Type {
READ_ELEMENT,
NEW_OBJECT
NEW_NON_STRICT_FAST,
NEW_NON_STRICT_SLOW,
NEW_STRICT
};
explicit ArgumentsAccessStub(Type type) : type_(type) { }
@ -674,7 +647,9 @@ class ArgumentsAccessStub: public CodeStub {
void Generate(MacroAssembler* masm);
void GenerateReadElement(MacroAssembler* masm);
void GenerateNewObject(MacroAssembler* masm);
void GenerateNewStrict(MacroAssembler* masm);
void GenerateNewNonStrictFast(MacroAssembler* masm);
void GenerateNewNonStrictSlow(MacroAssembler* masm);
const char* GetName() { return "ArgumentsAccessStub"; }
@ -765,8 +740,9 @@ class CallFunctionStub: public CodeStub {
}
InLoopFlag InLoop() { return in_loop_; }
bool ReceiverMightBeValue() {
return (flags_ & RECEIVER_MIGHT_BE_VALUE) != 0;
bool ReceiverMightBeImplicit() {
return (flags_ & RECEIVER_MIGHT_BE_IMPLICIT) != 0;
}
};
@ -945,6 +921,98 @@ class AllowStubCallsScope {
DISALLOW_COPY_AND_ASSIGN(AllowStubCallsScope);
};
#ifdef DEBUG
#define DECLARE_ARRAY_STUB_PRINT(name) void Print() { PrintF(#name); }
#else
#define DECLARE_ARRAY_STUB_PRINT(name)
#endif
class KeyedLoadFastElementStub : public CodeStub {
public:
explicit KeyedLoadFastElementStub() {
}
Major MajorKey() { return KeyedLoadFastElement; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "KeyedLoadFastElementStub"; }
DECLARE_ARRAY_STUB_PRINT(KeyedLoadFastElementStub)
};
class KeyedStoreFastElementStub : public CodeStub {
public:
explicit KeyedStoreFastElementStub(bool is_js_array)
: is_js_array_(is_js_array) { }
Major MajorKey() { return KeyedStoreFastElement; }
int MinorKey() { return is_js_array_ ? 1 : 0; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "KeyedStoreFastElementStub"; }
DECLARE_ARRAY_STUB_PRINT(KeyedStoreFastElementStub)
private:
bool is_js_array_;
};
class KeyedLoadExternalArrayStub : public CodeStub {
public:
explicit KeyedLoadExternalArrayStub(JSObject::ElementsKind elements_kind)
: elements_kind_(elements_kind) { }
Major MajorKey() { return KeyedLoadExternalArray; }
int MinorKey() { return elements_kind_; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "KeyedLoadExternalArrayStub"; }
DECLARE_ARRAY_STUB_PRINT(KeyedLoadExternalArrayStub)
protected:
JSObject::ElementsKind elements_kind_;
};
class KeyedStoreExternalArrayStub : public CodeStub {
public:
explicit KeyedStoreExternalArrayStub(JSObject::ElementsKind elements_kind)
: elements_kind_(elements_kind) { }
Major MajorKey() { return KeyedStoreExternalArray; }
int MinorKey() { return elements_kind_; }
void Generate(MacroAssembler* masm);
const char* GetName() { return "KeyedStoreExternalArrayStub"; }
DECLARE_ARRAY_STUB_PRINT(KeyedStoreExternalArrayStub)
protected:
JSObject::ElementsKind elements_kind_;
};
class ToBooleanStub: public CodeStub {
public:
explicit ToBooleanStub(Register tos) : tos_(tos) { }
void Generate(MacroAssembler* masm);
private:
Register tos_;
Major MajorKey() { return ToBoolean; }
int MinorKey() { return tos_.code(); }
};
} } // namespace v8::internal
#endif // V8_CODE_STUBS_H_

2
deps/v8/src/code.h

@ -28,6 +28,8 @@
#ifndef V8_CODE_H_
#define V8_CODE_H_
#include "allocation.h"
namespace v8 {
namespace internal {

315
deps/v8/src/codegen.cc

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -28,16 +28,13 @@
#include "v8.h"
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "codegen.h"
#include "compiler.h"
#include "debug.h"
#include "prettyprinter.h"
#include "register-allocator-inl.h"
#include "rewriter.h"
#include "runtime.h"
#include "scopeinfo.h"
#include "stub-cache.h"
#include "virtual-frame-inl.h"
namespace v8 {
namespace internal {
@ -61,67 +58,6 @@ Comment::~Comment() {
#undef __
CodeGenerator* CodeGeneratorScope::top_ = NULL;
void CodeGenerator::ProcessDeferred() {
while (!deferred_.is_empty()) {
DeferredCode* code = deferred_.RemoveLast();
ASSERT(masm_ == code->masm());
// Record position of deferred code stub.
masm_->positions_recorder()->RecordStatementPosition(
code->statement_position());
if (code->position() != RelocInfo::kNoPosition) {
masm_->positions_recorder()->RecordPosition(code->position());
}
// Generate the code.
Comment cmnt(masm_, code->comment());
masm_->bind(code->entry_label());
if (code->AutoSaveAndRestore()) {
code->SaveRegisters();
}
code->Generate();
if (code->AutoSaveAndRestore()) {
code->RestoreRegisters();
code->Exit();
}
}
}
void DeferredCode::Exit() {
masm_->jmp(exit_label());
}
void CodeGenerator::SetFrame(VirtualFrame* new_frame,
RegisterFile* non_frame_registers) {
RegisterFile saved_counts;
if (has_valid_frame()) {
frame_->DetachFromCodeGenerator();
// The remaining register reference counts are the non-frame ones.
allocator_->SaveTo(&saved_counts);
}
if (new_frame != NULL) {
// Restore the non-frame register references that go with the new frame.
allocator_->RestoreFrom(non_frame_registers);
new_frame->AttachToCodeGenerator();
}
frame_ = new_frame;
saved_counts.CopyTo(non_frame_registers);
}
void CodeGenerator::DeleteFrame() {
if (has_valid_frame()) {
frame_->DetachFromCodeGenerator();
frame_ = NULL;
}
}
void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
#ifdef DEBUG
bool print_source = false;
@ -129,7 +65,7 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
bool print_json_ast = false;
const char* ftype;
if (Bootstrapper::IsActive()) {
if (Isolate::Current()->bootstrapper()->IsActive()) {
print_source = FLAG_print_builtin_source;
print_ast = FLAG_print_builtin_ast;
print_json_ast = FLAG_print_builtin_json_ast;
@ -178,13 +114,17 @@ void CodeGenerator::MakeCodePrologue(CompilationInfo* info) {
Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
Code::Flags flags,
CompilationInfo* info) {
Isolate* isolate = info->isolate();
// Allocate and install the code.
CodeDesc desc;
masm->GetCode(&desc);
Handle<Code> code = Factory::NewCode(desc, flags, masm->CodeObject());
Handle<Code> code =
isolate->factory()->NewCode(desc, flags, masm->CodeObject());
if (!code.is_null()) {
Counters::total_compiled_code_size.Increment(code->instruction_size());
isolate->counters()->total_compiled_code_size()->Increment(
code->instruction_size());
}
return code;
}
@ -192,7 +132,7 @@ Handle<Code> CodeGenerator::MakeCodeEpilogue(MacroAssembler* masm,
void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#ifdef ENABLE_DISASSEMBLER
bool print_code = Bootstrapper::IsActive()
bool print_code = Isolate::Current()->bootstrapper()->IsActive()
? FLAG_print_builtin_code
: (FLAG_print_code || (info->IsOptimizing() && FLAG_print_opt_code));
Vector<const char> filter = CStrVector(FLAG_hydrogen_filter);
@ -229,62 +169,18 @@ void CodeGenerator::PrintCode(Handle<Code> code, CompilationInfo* info) {
#endif // ENABLE_DISASSEMBLER
}
// Generate the code. Compile the AST and assemble all the pieces into a
// Code object.
bool CodeGenerator::MakeCode(CompilationInfo* info) {
// When using Crankshaft the classic backend should never be used.
ASSERT(!V8::UseCrankshaft());
Handle<Script> script = info->script();
if (!script->IsUndefined() && !script->source()->IsUndefined()) {
int len = String::cast(script->source())->length();
Counters::total_old_codegen_source_size.Increment(len);
}
if (FLAG_trace_codegen) {
PrintF("Classic Compiler - ");
}
MakeCodePrologue(info);
// Generate code.
const int kInitialBufferSize = 4 * KB;
MacroAssembler masm(NULL, kInitialBufferSize);
#ifdef ENABLE_GDB_JIT_INTERFACE
masm.positions_recorder()->StartGDBJITLineInfoRecording();
#endif
CodeGenerator cgen(&masm);
CodeGeneratorScope scope(&cgen);
cgen.Generate(info);
if (cgen.HasStackOverflow()) {
ASSERT(!Top::has_pending_exception());
return false;
}
InLoopFlag in_loop = info->is_in_loop() ? IN_LOOP : NOT_IN_LOOP;
Code::Flags flags = Code::ComputeFlags(Code::FUNCTION, in_loop);
Handle<Code> code = MakeCodeEpilogue(cgen.masm(), flags, info);
// There is no stack check table in code generated by the classic backend.
code->SetNoStackCheckTable();
CodeGenerator::PrintCode(code, info);
info->SetCode(code); // May be an empty handle.
#ifdef ENABLE_GDB_JIT_INTERFACE
if (FLAG_gdbjit && !code.is_null()) {
GDBJITLineInfo* lineinfo =
masm.positions_recorder()->DetachGDBJITLineInfo();
GDBJIT(RegisterDetailedLineInfo(*code, lineinfo));
}
#endif
return !code.is_null();
}
#ifdef ENABLE_LOGGING_AND_PROFILING
static Vector<const char> kRegexp = CStrVector("regexp");
bool CodeGenerator::ShouldGenerateLog(Expression* type) {
ASSERT(type != NULL);
if (!Logger::is_logging() && !CpuProfiler::is_profiling()) return false;
Isolate* isolate = Isolate::Current();
if (!isolate->logger()->is_logging() && !CpuProfiler::is_profiling(isolate)) {
return false;
}
Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
if (FLAG_log_regexp) {
static Vector<const char> kRegexp = CStrVector("regexp");
if (name->IsEqualTo(kRegexp))
return true;
}
@ -294,120 +190,6 @@ bool CodeGenerator::ShouldGenerateLog(Expression* type) {
#endif
void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
int length = declarations->length();
int globals = 0;
for (int i = 0; i < length; i++) {
Declaration* node = declarations->at(i);
Variable* var = node->proxy()->var();
Slot* slot = var->AsSlot();
// If it was not possible to allocate the variable at compile
// time, we need to "declare" it at runtime to make sure it
// actually exists in the local context.
if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
VisitDeclaration(node);
} else {
// Count global variables and functions for later processing
globals++;
}
}
// Return in case of no declared global functions or variables.
if (globals == 0) return;
// Compute array of global variable and function declarations.
Handle<FixedArray> array = Factory::NewFixedArray(2 * globals, TENURED);
for (int j = 0, i = 0; i < length; i++) {
Declaration* node = declarations->at(i);
Variable* var = node->proxy()->var();
Slot* slot = var->AsSlot();
if ((slot != NULL && slot->type() == Slot::LOOKUP) || !var->is_global()) {
// Skip - already processed.
} else {
array->set(j++, *(var->name()));
if (node->fun() == NULL) {
if (var->mode() == Variable::CONST) {
// In case this is const property use the hole.
array->set_the_hole(j++);
} else {
array->set_undefined(j++);
}
} else {
Handle<SharedFunctionInfo> function =
Compiler::BuildFunctionInfo(node->fun(), script());
// Check for stack-overflow exception.
if (function.is_null()) {
SetStackOverflow();
return;
}
array->set(j++, *function);
}
}
}
// Invoke the platform-dependent code generator to do the actual
// declaration the global variables and functions.
DeclareGlobals(array);
}
void CodeGenerator::VisitIncrementOperation(IncrementOperation* expr) {
UNREACHABLE();
}
// Lookup table for code generators for special runtime calls which are
// generated inline.
#define INLINE_FUNCTION_GENERATOR_ADDRESS(Name, argc, ressize) \
&CodeGenerator::Generate##Name,
const CodeGenerator::InlineFunctionGenerator
CodeGenerator::kInlineFunctionGenerators[] = {
INLINE_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
INLINE_RUNTIME_FUNCTION_LIST(INLINE_FUNCTION_GENERATOR_ADDRESS)
};
#undef INLINE_FUNCTION_GENERATOR_ADDRESS
bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
ZoneList<Expression*>* args = node->arguments();
Handle<String> name = node->name();
Runtime::Function* function = node->function();
if (function != NULL && function->intrinsic_type == Runtime::INLINE) {
int lookup_index = static_cast<int>(function->function_id) -
static_cast<int>(Runtime::kFirstInlineFunction);
ASSERT(lookup_index >= 0);
ASSERT(static_cast<size_t>(lookup_index) <
ARRAY_SIZE(kInlineFunctionGenerators));
InlineFunctionGenerator generator = kInlineFunctionGenerators[lookup_index];
(this->*generator)(args);
return true;
}
return false;
}
// Simple condition analysis. ALWAYS_TRUE and ALWAYS_FALSE represent a
// known result for the test expression, with no side effects.
CodeGenerator::ConditionAnalysis CodeGenerator::AnalyzeCondition(
Expression* cond) {
if (cond == NULL) return ALWAYS_TRUE;
Literal* lit = cond->AsLiteral();
if (lit == NULL) return DONT_KNOW;
if (lit->IsTrue()) {
return ALWAYS_TRUE;
} else if (lit->IsFalse()) {
return ALWAYS_FALSE;
}
return DONT_KNOW;
}
bool CodeGenerator::RecordPositions(MacroAssembler* masm,
int pos,
bool right_here) {
@ -422,61 +204,20 @@ bool CodeGenerator::RecordPositions(MacroAssembler* masm,
}
void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
if (FLAG_debug_info) RecordPositions(masm(), fun->start_position(), false);
}
void CodeGenerator::CodeForReturnPosition(FunctionLiteral* fun) {
if (FLAG_debug_info) RecordPositions(masm(), fun->end_position() - 1, false);
}
void CodeGenerator::CodeForStatementPosition(Statement* stmt) {
if (FLAG_debug_info) RecordPositions(masm(), stmt->statement_pos(), false);
}
void CodeGenerator::CodeForDoWhileConditionPosition(DoWhileStatement* stmt) {
if (FLAG_debug_info)
RecordPositions(masm(), stmt->condition_position(), false);
}
void CodeGenerator::CodeForSourcePosition(int pos) {
if (FLAG_debug_info && pos != RelocInfo::kNoPosition) {
masm()->positions_recorder()->RecordPosition(pos);
}
}
const char* GenericUnaryOpStub::GetName() {
switch (op_) {
case Token::SUB:
if (negative_zero_ == kStrictNegativeZero) {
return overwrite_ == UNARY_OVERWRITE
? "GenericUnaryOpStub_SUB_Overwrite_Strict0"
: "GenericUnaryOpStub_SUB_Alloc_Strict0";
} else {
return overwrite_ == UNARY_OVERWRITE
? "GenericUnaryOpStub_SUB_Overwrite_Ignore0"
: "GenericUnaryOpStub_SUB_Alloc_Ignore0";
}
case Token::BIT_NOT:
return overwrite_ == UNARY_OVERWRITE
? "GenericUnaryOpStub_BIT_NOT_Overwrite"
: "GenericUnaryOpStub_BIT_NOT_Alloc";
default:
UNREACHABLE();
return "<unknown>";
}
}
void ArgumentsAccessStub::Generate(MacroAssembler* masm) {
switch (type_) {
case READ_ELEMENT: GenerateReadElement(masm); break;
case NEW_OBJECT: GenerateNewObject(masm); break;
case READ_ELEMENT:
GenerateReadElement(masm);
break;
case NEW_NON_STRICT_FAST:
GenerateNewNonStrictFast(masm);
break;
case NEW_NON_STRICT_SLOW:
GenerateNewNonStrictSlow(masm);
break;
case NEW_STRICT:
GenerateNewStrict(masm);
break;
}
}

163
deps/v8/src/codegen.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -54,7 +54,6 @@
// shared code:
// CodeGenerator
// ~CodeGenerator
// ProcessDeferred
// Generate
// ComputeLazyCompile
// BuildFunctionInfo
@ -68,7 +67,6 @@
// CodeForDoWhileConditionPosition
// CodeForSourcePosition
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#if V8_TARGET_ARCH_IA32
@ -83,163 +81,4 @@ enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
#error Unsupported target architecture.
#endif
#include "register-allocator.h"
namespace v8 {
namespace internal {
// Code generation can be nested. Code generation scopes form a stack
// of active code generators.
class CodeGeneratorScope BASE_EMBEDDED {
public:
explicit CodeGeneratorScope(CodeGenerator* cgen) {
previous_ = top_;
top_ = cgen;
}
~CodeGeneratorScope() {
top_ = previous_;
}
static CodeGenerator* Current() {
ASSERT(top_ != NULL);
return top_;
}
private:
static CodeGenerator* top_;
CodeGenerator* previous_;
};
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64
// State of used registers in a virtual frame.
class FrameRegisterState {
public:
// Captures the current state of the given frame.
explicit FrameRegisterState(VirtualFrame* frame);
// Saves the state in the stack.
void Save(MacroAssembler* masm) const;
// Restores the state from the stack.
void Restore(MacroAssembler* masm) const;
private:
// Constants indicating special actions. They should not be multiples
// of kPointerSize so they will not collide with valid offsets from
// the frame pointer.
static const int kIgnore = -1;
static const int kPush = 1;
// This flag is ored with a valid offset from the frame pointer, so
// it should fit in the low zero bits of a valid offset.
static const int kSyncedFlag = 2;
int registers_[RegisterAllocator::kNumRegisters];
};
#elif V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_MIPS
class FrameRegisterState {
public:
inline FrameRegisterState(VirtualFrame frame) : frame_(frame) { }
inline const VirtualFrame* frame() const { return &frame_; }
private:
VirtualFrame frame_;
};
#else
#error Unsupported target architecture.
#endif
// RuntimeCallHelper implementation that saves/restores state of a
// virtual frame.
class VirtualFrameRuntimeCallHelper : public RuntimeCallHelper {
public:
// Does not take ownership of |frame_state|.
explicit VirtualFrameRuntimeCallHelper(const FrameRegisterState* frame_state)
: frame_state_(frame_state) {}
virtual void BeforeCall(MacroAssembler* masm) const;
virtual void AfterCall(MacroAssembler* masm) const;
private:
const FrameRegisterState* frame_state_;
};
// Deferred code objects are small pieces of code that are compiled
// out of line. They are used to defer the compilation of uncommon
// paths thereby avoiding expensive jumps around uncommon code parts.
class DeferredCode: public ZoneObject {
public:
DeferredCode();
virtual ~DeferredCode() { }
virtual void Generate() = 0;
MacroAssembler* masm() { return masm_; }
int statement_position() const { return statement_position_; }
int position() const { return position_; }
Label* entry_label() { return &entry_label_; }
Label* exit_label() { return &exit_label_; }
#ifdef DEBUG
void set_comment(const char* comment) { comment_ = comment; }
const char* comment() const { return comment_; }
#else
void set_comment(const char* comment) { }
const char* comment() const { return ""; }
#endif
inline void Jump();
inline void Branch(Condition cc);
void BindExit() { masm_->bind(&exit_label_); }
const FrameRegisterState* frame_state() const { return &frame_state_; }
void SaveRegisters();
void RestoreRegisters();
void Exit();
// If this returns true then all registers will be saved for the duration
// of the Generate() call. Otherwise the registers are not saved and the
// Generate() call must bracket runtime any runtime calls with calls to
// SaveRegisters() and RestoreRegisters(). In this case the Generate
// method must also call Exit() in order to return to the non-deferred
// code.
virtual bool AutoSaveAndRestore() { return true; }
protected:
MacroAssembler* masm_;
private:
int statement_position_;
int position_;
Label entry_label_;
Label exit_label_;
FrameRegisterState frame_state_;
#ifdef DEBUG
const char* comment_;
#endif
DISALLOW_COPY_AND_ASSIGN(DeferredCode);
};
} } // namespace v8::internal
#endif // V8_CODEGEN_H_

298
deps/v8/src/compilation-cache.cc

@ -33,8 +33,6 @@
namespace v8 {
namespace internal {
// The number of sub caches covering the different types to cache.
static const int kSubCacheCount = 4;
// The number of generations for each sub cache.
// The number of ScriptGenerations is carefully chosen based on histograms.
@ -47,162 +45,32 @@ static const int kRegExpGenerations = 2;
// Initial size of each compilation cache table allocated.
static const int kInitialCacheSize = 64;
// Index for the first generation in the cache.
static const int kFirstGeneration = 0;
// The compilation cache consists of several generational sub-caches which uses
// this class as a base class. A sub-cache contains a compilation cache tables
// for each generation of the sub-cache. Since the same source code string has
// different compiled code for scripts and evals, we use separate sub-caches
// for different compilation modes, to avoid retrieving the wrong result.
class CompilationSubCache {
public:
explicit CompilationSubCache(int generations): generations_(generations) {
tables_ = NewArray<Object*>(generations);
}
~CompilationSubCache() { DeleteArray(tables_); }
// Get the compilation cache tables for a specific generation.
Handle<CompilationCacheTable> GetTable(int generation);
// Accessors for first generation.
Handle<CompilationCacheTable> GetFirstTable() {
return GetTable(kFirstGeneration);
CompilationCache::CompilationCache(Isolate* isolate)
: isolate_(isolate),
script_(isolate, kScriptGenerations),
eval_global_(isolate, kEvalGlobalGenerations),
eval_contextual_(isolate, kEvalContextualGenerations),
reg_exp_(isolate, kRegExpGenerations),
enabled_(true),
eager_optimizing_set_(NULL) {
CompilationSubCache* subcaches[kSubCacheCount] =
{&script_, &eval_global_, &eval_contextual_, &reg_exp_};
for (int i = 0; i < kSubCacheCount; ++i) {
subcaches_[i] = subcaches[i];
}
void SetFirstTable(Handle<CompilationCacheTable> value) {
ASSERT(kFirstGeneration < generations_);
tables_[kFirstGeneration] = *value;
}
// Age the sub-cache by evicting the oldest generation and creating a new
// young generation.
void Age();
// GC support.
void Iterate(ObjectVisitor* v);
void IterateFunctions(ObjectVisitor* v);
// Clear this sub-cache evicting all its content.
void Clear();
// Remove given shared function info from sub-cache.
void Remove(Handle<SharedFunctionInfo> function_info);
// Number of generations in this sub-cache.
inline int generations() { return generations_; }
private:
int generations_; // Number of generations.
Object** tables_; // Compilation cache tables - one for each generation.
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
};
// Sub-cache for scripts.
class CompilationCacheScript : public CompilationSubCache {
public:
explicit CompilationCacheScript(int generations)
: CompilationSubCache(generations) { }
Handle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset);
void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
private:
MUST_USE_RESULT MaybeObject* TryTablePut(
Handle<String> source, Handle<SharedFunctionInfo> function_info);
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(
Handle<String> source, Handle<SharedFunctionInfo> function_info);
bool HasOrigin(Handle<SharedFunctionInfo> function_info,
Handle<Object> name,
int line_offset,
int column_offset);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
};
// Sub-cache for eval scripts.
class CompilationCacheEval: public CompilationSubCache {
public:
explicit CompilationCacheEval(int generations)
: CompilationSubCache(generations) { }
Handle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<Context> context,
StrictModeFlag strict_mode);
void Put(Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
private:
MUST_USE_RESULT MaybeObject* TryTablePut(
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
};
// Sub-cache for regular expressions.
class CompilationCacheRegExp: public CompilationSubCache {
public:
explicit CompilationCacheRegExp(int generations)
: CompilationSubCache(generations) { }
Handle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags);
void Put(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
private:
MUST_USE_RESULT MaybeObject* TryTablePut(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
};
// Statically allocate all the sub-caches.
static CompilationCacheScript script(kScriptGenerations);
static CompilationCacheEval eval_global(kEvalGlobalGenerations);
static CompilationCacheEval eval_contextual(kEvalContextualGenerations);
static CompilationCacheRegExp reg_exp(kRegExpGenerations);
static CompilationSubCache* subcaches[kSubCacheCount] =
{&script, &eval_global, &eval_contextual, &reg_exp};
}
// Current enable state of the compilation cache.
static bool enabled = true;
static inline bool IsEnabled() {
return FLAG_compilation_cache && enabled;
CompilationCache::~CompilationCache() {
delete eager_optimizing_set_;
eager_optimizing_set_ = NULL;
}
static Handle<CompilationCacheTable> AllocateTable(int size) {
CALL_HEAP_FUNCTION(CompilationCacheTable::Allocate(size),
static Handle<CompilationCacheTable> AllocateTable(Isolate* isolate, int size) {
CALL_HEAP_FUNCTION(isolate,
CompilationCacheTable::Allocate(size),
CompilationCacheTable);
}
@ -211,17 +79,16 @@ Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
ASSERT(generation < generations_);
Handle<CompilationCacheTable> result;
if (tables_[generation]->IsUndefined()) {
result = AllocateTable(kInitialCacheSize);
result = AllocateTable(isolate(), kInitialCacheSize);
tables_[generation] = *result;
} else {
CompilationCacheTable* table =
CompilationCacheTable::cast(tables_[generation]);
result = Handle<CompilationCacheTable>(table);
result = Handle<CompilationCacheTable>(table, isolate());
}
return result;
}
void CompilationSubCache::Age() {
// Age the generations implicitly killing off the oldest.
for (int i = generations_ - 1; i > 0; i--) {
@ -229,12 +96,12 @@ void CompilationSubCache::Age() {
}
// Set the first generation as unborn.
tables_[0] = Heap::undefined_value();
tables_[0] = isolate()->heap()->undefined_value();
}
void CompilationSubCache::IterateFunctions(ObjectVisitor* v) {
Object* undefined = Heap::raw_unchecked_undefined_value();
Object* undefined = isolate()->heap()->raw_unchecked_undefined_value();
for (int i = 0; i < generations_; i++) {
if (tables_[i] != undefined) {
reinterpret_cast<CompilationCacheTable*>(tables_[i])->IterateElements(v);
@ -249,14 +116,14 @@ void CompilationSubCache::Iterate(ObjectVisitor* v) {
void CompilationSubCache::Clear() {
MemsetPointer(tables_, Heap::undefined_value(), generations_);
MemsetPointer(tables_, isolate()->heap()->undefined_value(), generations_);
}
void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
// Probe the script generation tables. Make sure not to leak handles
// into the caller's handle scope.
{ HandleScope scope;
{ HandleScope scope(isolate());
for (int generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
table->Remove(*function_info);
@ -265,6 +132,13 @@ void CompilationSubCache::Remove(Handle<SharedFunctionInfo> function_info) {
}
CompilationCacheScript::CompilationCacheScript(Isolate* isolate,
int generations)
: CompilationSubCache(isolate, generations),
script_histogram_(NULL),
script_histogram_initialized_(false) { }
// We only re-use a cached function for some script source code if the
// script originates from the same place. This is to avoid issues
// when reporting errors, etc.
@ -274,7 +148,7 @@ bool CompilationCacheScript::HasOrigin(
int line_offset,
int column_offset) {
Handle<Script> script =
Handle<Script>(Script::cast(function_info->script()));
Handle<Script>(Script::cast(function_info->script()), isolate());
// If the script name isn't set, the boilerplate script should have
// an undefined name to have the same origin.
if (name.is_null()) {
@ -303,10 +177,10 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
// Probe the script generation tables. Make sure not to leak handles
// into the caller's handle scope.
{ HandleScope scope;
{ HandleScope scope(isolate());
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
Handle<Object> probe(table->Lookup(*source));
Handle<Object> probe(table->Lookup(*source), isolate());
if (probe->IsSharedFunctionInfo()) {
Handle<SharedFunctionInfo> function_info =
Handle<SharedFunctionInfo>::cast(probe);
@ -320,30 +194,34 @@ Handle<SharedFunctionInfo> CompilationCacheScript::Lookup(Handle<String> source,
}
}
static void* script_histogram = StatsTable::CreateHistogram(
"V8.ScriptCache",
0,
kScriptGenerations,
kScriptGenerations + 1);
if (!script_histogram_initialized_) {
script_histogram_ = isolate()->stats_table()->CreateHistogram(
"V8.ScriptCache",
0,
kScriptGenerations,
kScriptGenerations + 1);
script_histogram_initialized_ = true;
}
if (script_histogram != NULL) {
if (script_histogram_ != NULL) {
// The level NUMBER_OF_SCRIPT_GENERATIONS is equivalent to a cache miss.
StatsTable::AddHistogramSample(script_histogram, generation);
isolate()->stats_table()->AddHistogramSample(script_histogram_, generation);
}
// Once outside the manacles of the handle scope, we need to recheck
// to see if we actually found a cached script. If so, we return a
// handle created in the caller's handle scope.
if (result != NULL) {
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result));
Handle<SharedFunctionInfo> shared(SharedFunctionInfo::cast(result),
isolate());
ASSERT(HasOrigin(shared, name, line_offset, column_offset));
// If the script was found in a later generation, we promote it to
// the first generation to let it survive longer in the cache.
if (generation != 0) Put(source, shared);
Counters::compilation_cache_hits.Increment();
isolate()->counters()->compilation_cache_hits()->Increment();
return shared;
} else {
Counters::compilation_cache_misses.Increment();
isolate()->counters()->compilation_cache_misses()->Increment();
return Handle<SharedFunctionInfo>::null();
}
}
@ -360,13 +238,15 @@ MaybeObject* CompilationCacheScript::TryTablePut(
Handle<CompilationCacheTable> CompilationCacheScript::TablePut(
Handle<String> source,
Handle<SharedFunctionInfo> function_info) {
CALL_HEAP_FUNCTION(TryTablePut(source, function_info), CompilationCacheTable);
CALL_HEAP_FUNCTION(isolate(),
TryTablePut(source, function_info),
CompilationCacheTable);
}
void CompilationCacheScript::Put(Handle<String> source,
Handle<SharedFunctionInfo> function_info) {
HandleScope scope;
HandleScope scope(isolate());
SetFirstTable(TablePut(source, function_info));
}
@ -380,7 +260,7 @@ Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
// having cleared the cache.
Object* result = NULL;
int generation;
{ HandleScope scope;
{ HandleScope scope(isolate());
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
result = table->LookupEval(*source, *context, strict_mode);
@ -391,14 +271,14 @@ Handle<SharedFunctionInfo> CompilationCacheEval::Lookup(
}
if (result->IsSharedFunctionInfo()) {
Handle<SharedFunctionInfo>
function_info(SharedFunctionInfo::cast(result));
function_info(SharedFunctionInfo::cast(result), isolate());
if (generation != 0) {
Put(source, context, function_info);
}
Counters::compilation_cache_hits.Increment();
isolate()->counters()->compilation_cache_hits()->Increment();
return function_info;
} else {
Counters::compilation_cache_misses.Increment();
isolate()->counters()->compilation_cache_misses()->Increment();
return Handle<SharedFunctionInfo>::null();
}
}
@ -417,7 +297,8 @@ Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
CALL_HEAP_FUNCTION(TryTablePut(source, context, function_info),
CALL_HEAP_FUNCTION(isolate(),
TryTablePut(source, context, function_info),
CompilationCacheTable);
}
@ -425,7 +306,7 @@ Handle<CompilationCacheTable> CompilationCacheEval::TablePut(
void CompilationCacheEval::Put(Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info) {
HandleScope scope;
HandleScope scope(isolate());
SetFirstTable(TablePut(source, context, function_info));
}
@ -437,7 +318,7 @@ Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
// having cleared the cache.
Object* result = NULL;
int generation;
{ HandleScope scope;
{ HandleScope scope(isolate());
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
result = table->LookupRegExp(*source, flags);
@ -447,14 +328,14 @@ Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
}
}
if (result->IsFixedArray()) {
Handle<FixedArray> data(FixedArray::cast(result));
Handle<FixedArray> data(FixedArray::cast(result), isolate());
if (generation != 0) {
Put(source, flags, data);
}
Counters::compilation_cache_hits.Increment();
isolate()->counters()->compilation_cache_hits()->Increment();
return data;
} else {
Counters::compilation_cache_misses.Increment();
isolate()->counters()->compilation_cache_misses()->Increment();
return Handle<FixedArray>::null();
}
}
@ -473,14 +354,16 @@ Handle<CompilationCacheTable> CompilationCacheRegExp::TablePut(
Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
CALL_HEAP_FUNCTION(TryTablePut(source, flags, data), CompilationCacheTable);
CALL_HEAP_FUNCTION(isolate(),
TryTablePut(source, flags, data),
CompilationCacheTable);
}
void CompilationCacheRegExp::Put(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
HandleScope scope;
HandleScope scope(isolate());
SetFirstTable(TablePut(source, flags, data));
}
@ -488,9 +371,9 @@ void CompilationCacheRegExp::Put(Handle<String> source,
void CompilationCache::Remove(Handle<SharedFunctionInfo> function_info) {
if (!IsEnabled()) return;
eval_global.Remove(function_info);
eval_contextual.Remove(function_info);
script.Remove(function_info);
eval_global_.Remove(function_info);
eval_contextual_.Remove(function_info);
script_.Remove(function_info);
}
@ -502,7 +385,7 @@ Handle<SharedFunctionInfo> CompilationCache::LookupScript(Handle<String> source,
return Handle<SharedFunctionInfo>::null();
}
return script.Lookup(source, name, line_offset, column_offset);
return script_.Lookup(source, name, line_offset, column_offset);
}
@ -517,9 +400,9 @@ Handle<SharedFunctionInfo> CompilationCache::LookupEval(
Handle<SharedFunctionInfo> result;
if (is_global) {
result = eval_global.Lookup(source, context, strict_mode);
result = eval_global_.Lookup(source, context, strict_mode);
} else {
result = eval_contextual.Lookup(source, context, strict_mode);
result = eval_contextual_.Lookup(source, context, strict_mode);
}
return result;
}
@ -531,7 +414,7 @@ Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
return Handle<FixedArray>::null();
}
return reg_exp.Lookup(source, flags);
return reg_exp_.Lookup(source, flags);
}
@ -541,7 +424,7 @@ void CompilationCache::PutScript(Handle<String> source,
return;
}
script.Put(source, function_info);
script_.Put(source, function_info);
}
@ -553,11 +436,11 @@ void CompilationCache::PutEval(Handle<String> source,
return;
}
HandleScope scope;
HandleScope scope(isolate());
if (is_global) {
eval_global.Put(source, context, function_info);
eval_global_.Put(source, context, function_info);
} else {
eval_contextual.Put(source, context, function_info);
eval_contextual_.Put(source, context, function_info);
}
}
@ -570,7 +453,7 @@ void CompilationCache::PutRegExp(Handle<String> source,
return;
}
reg_exp.Put(source, flags, data);
reg_exp_.Put(source, flags, data);
}
@ -579,9 +462,11 @@ static bool SourceHashCompare(void* key1, void* key2) {
}
static HashMap* EagerOptimizingSet() {
static HashMap map(&SourceHashCompare);
return &map;
HashMap* CompilationCache::EagerOptimizingSet() {
if (eager_optimizing_set_ == NULL) {
eager_optimizing_set_ = new HashMap(&SourceHashCompare);
}
return eager_optimizing_set_;
}
@ -615,38 +500,39 @@ void CompilationCache::ResetEagerOptimizingData() {
void CompilationCache::Clear() {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches[i]->Clear();
subcaches_[i]->Clear();
}
}
void CompilationCache::Iterate(ObjectVisitor* v) {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches[i]->Iterate(v);
subcaches_[i]->Iterate(v);
}
}
void CompilationCache::IterateFunctions(ObjectVisitor* v) {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches[i]->IterateFunctions(v);
subcaches_[i]->IterateFunctions(v);
}
}
void CompilationCache::MarkCompactPrologue() {
for (int i = 0; i < kSubCacheCount; i++) {
subcaches[i]->Age();
subcaches_[i]->Age();
}
}
void CompilationCache::Enable() {
enabled = true;
enabled_ = true;
}
void CompilationCache::Disable() {
enabled = false;
enabled_ = false;
Clear();
}

235
deps/v8/src/compilation-cache.h

@ -31,6 +31,152 @@
namespace v8 {
namespace internal {
class HashMap;
// The compilation cache consists of several generational sub-caches which uses
// this class as a base class. A sub-cache contains a compilation cache tables
// for each generation of the sub-cache. Since the same source code string has
// different compiled code for scripts and evals, we use separate sub-caches
// for different compilation modes, to avoid retrieving the wrong result.
class CompilationSubCache {
public:
CompilationSubCache(Isolate* isolate, int generations)
: isolate_(isolate),
generations_(generations) {
tables_ = NewArray<Object*>(generations);
}
~CompilationSubCache() { DeleteArray(tables_); }
// Index for the first generation in the cache.
static const int kFirstGeneration = 0;
// Get the compilation cache tables for a specific generation.
Handle<CompilationCacheTable> GetTable(int generation);
// Accessors for first generation.
Handle<CompilationCacheTable> GetFirstTable() {
return GetTable(kFirstGeneration);
}
void SetFirstTable(Handle<CompilationCacheTable> value) {
ASSERT(kFirstGeneration < generations_);
tables_[kFirstGeneration] = *value;
}
// Age the sub-cache by evicting the oldest generation and creating a new
// young generation.
void Age();
// GC support.
void Iterate(ObjectVisitor* v);
void IterateFunctions(ObjectVisitor* v);
// Clear this sub-cache evicting all its content.
void Clear();
// Remove given shared function info from sub-cache.
void Remove(Handle<SharedFunctionInfo> function_info);
// Number of generations in this sub-cache.
inline int generations() { return generations_; }
protected:
Isolate* isolate() { return isolate_; }
private:
Isolate* isolate_;
int generations_; // Number of generations.
Object** tables_; // Compilation cache tables - one for each generation.
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
};
// Sub-cache for scripts.
class CompilationCacheScript : public CompilationSubCache {
public:
CompilationCacheScript(Isolate* isolate, int generations);
Handle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset);
void Put(Handle<String> source, Handle<SharedFunctionInfo> function_info);
private:
MUST_USE_RESULT MaybeObject* TryTablePut(
Handle<String> source, Handle<SharedFunctionInfo> function_info);
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(
Handle<String> source, Handle<SharedFunctionInfo> function_info);
bool HasOrigin(Handle<SharedFunctionInfo> function_info,
Handle<Object> name,
int line_offset,
int column_offset);
void* script_histogram_;
bool script_histogram_initialized_;
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
};
// Sub-cache for eval scripts.
class CompilationCacheEval: public CompilationSubCache {
public:
CompilationCacheEval(Isolate* isolate, int generations)
: CompilationSubCache(isolate, generations) { }
Handle<SharedFunctionInfo> Lookup(Handle<String> source,
Handle<Context> context,
StrictModeFlag strict_mode);
void Put(Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
private:
MUST_USE_RESULT MaybeObject* TryTablePut(
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(
Handle<String> source,
Handle<Context> context,
Handle<SharedFunctionInfo> function_info);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
};
// Sub-cache for regular expressions.
class CompilationCacheRegExp: public CompilationSubCache {
public:
CompilationCacheRegExp(Isolate* isolate, int generations)
: CompilationSubCache(isolate, generations) { }
Handle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags);
void Put(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
private:
MUST_USE_RESULT MaybeObject* TryTablePut(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
// Note: Returns a new hash table if operation results in expansion.
Handle<CompilationCacheTable> TablePut(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
};
// The compilation cache keeps shared function infos for compiled
// scripts and evals. The shared function infos are looked up using
@ -41,69 +187,98 @@ class CompilationCache {
// Finds the script shared function info for a source
// string. Returns an empty handle if the cache doesn't contain a
// script for the given source string with the right origin.
static Handle<SharedFunctionInfo> LookupScript(Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset);
Handle<SharedFunctionInfo> LookupScript(Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset);
// Finds the shared function info for a source string for eval in a
// given context. Returns an empty handle if the cache doesn't
// contain a script for the given source string.
static Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
Handle<Context> context,
bool is_global,
StrictModeFlag strict_mode);
Handle<SharedFunctionInfo> LookupEval(Handle<String> source,
Handle<Context> context,
bool is_global,
StrictModeFlag strict_mode);
// Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle.
static Handle<FixedArray> LookupRegExp(Handle<String> source,
JSRegExp::Flags flags);
Handle<FixedArray> LookupRegExp(Handle<String> source,
JSRegExp::Flags flags);
// Associate the (source, kind) pair to the shared function
// info. This may overwrite an existing mapping.
static void PutScript(Handle<String> source,
Handle<SharedFunctionInfo> function_info);
void PutScript(Handle<String> source,
Handle<SharedFunctionInfo> function_info);
// Associate the (source, context->closure()->shared(), kind) triple
// with the shared function info. This may overwrite an existing mapping.
static void PutEval(Handle<String> source,
Handle<Context> context,
bool is_global,
Handle<SharedFunctionInfo> function_info);
void PutEval(Handle<String> source,
Handle<Context> context,
bool is_global,
Handle<SharedFunctionInfo> function_info);
// Associate the (source, flags) pair to the given regexp data.
// This may overwrite an existing mapping.
static void PutRegExp(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
void PutRegExp(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
// Support for eager optimization tracking.
static bool ShouldOptimizeEagerly(Handle<JSFunction> function);
static void MarkForEagerOptimizing(Handle<JSFunction> function);
static void MarkForLazyOptimizing(Handle<JSFunction> function);
bool ShouldOptimizeEagerly(Handle<JSFunction> function);
void MarkForEagerOptimizing(Handle<JSFunction> function);
void MarkForLazyOptimizing(Handle<JSFunction> function);
// Reset the eager optimization tracking data.
static void ResetEagerOptimizingData();
void ResetEagerOptimizingData();
// Clear the cache - also used to initialize the cache at startup.
static void Clear();
void Clear();
// Remove given shared function info from all caches.
static void Remove(Handle<SharedFunctionInfo> function_info);
void Remove(Handle<SharedFunctionInfo> function_info);
// GC support.
static void Iterate(ObjectVisitor* v);
static void IterateFunctions(ObjectVisitor* v);
void Iterate(ObjectVisitor* v);
void IterateFunctions(ObjectVisitor* v);
// Notify the cache that a mark-sweep garbage collection is about to
// take place. This is used to retire entries from the cache to
// avoid keeping them alive too long without using them.
static void MarkCompactPrologue();
void MarkCompactPrologue();
// Enable/disable compilation cache. Used by debugger to disable compilation
// cache during debugging to make sure new scripts are always compiled.
static void Enable();
static void Disable();
void Enable();
void Disable();
private:
explicit CompilationCache(Isolate* isolate);
~CompilationCache();
HashMap* EagerOptimizingSet();
// The number of sub caches covering the different types to cache.
static const int kSubCacheCount = 4;
bool IsEnabled() { return FLAG_compilation_cache && enabled_; }
Isolate* isolate() { return isolate_; }
Isolate* isolate_;
CompilationCacheScript script_;
CompilationCacheEval eval_global_;
CompilationCacheEval eval_contextual_;
CompilationCacheRegExp reg_exp_;
CompilationSubCache* subcaches_[kSubCacheCount];
// Current enable state of the compilation cache.
bool enabled_;
HashMap* eager_optimizing_set_;
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(CompilationCache);
};

340
deps/v8/src/compiler.cc

@ -30,9 +30,8 @@
#include "compiler.h"
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "codegen.h"
#include "compilation-cache.h"
#include "data-flow.h"
#include "debug.h"
#include "full-codegen.h"
#include "gdb-jit.h"
@ -51,7 +50,8 @@ namespace internal {
CompilationInfo::CompilationInfo(Handle<Script> script)
: flags_(0),
: isolate_(script->GetIsolate()),
flags_(0),
function_(NULL),
scope_(NULL),
script_(script),
@ -64,7 +64,8 @@ CompilationInfo::CompilationInfo(Handle<Script> script)
CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
: flags_(IsLazy::encode(true)),
: isolate_(shared_info->GetIsolate()),
flags_(IsLazy::encode(true)),
function_(NULL),
scope_(NULL),
shared_info_(shared_info),
@ -78,7 +79,8 @@ CompilationInfo::CompilationInfo(Handle<SharedFunctionInfo> shared_info)
CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
: flags_(IsLazy::encode(true)),
: isolate_(closure->GetIsolate()),
flags_(IsLazy::encode(true)),
function_(NULL),
scope_(NULL),
closure_(closure),
@ -92,22 +94,23 @@ CompilationInfo::CompilationInfo(Handle<JSFunction> closure)
}
// Disable optimization for the rest of the compilation pipeline.
void CompilationInfo::DisableOptimization() {
if (FLAG_optimize_closures) {
// If we allow closures optimizations and it's an optimizable closure
// mark it correspondingly.
bool is_closure = closure_.is_null() && !scope_->HasTrivialOuterContext();
if (is_closure) {
bool is_optimizable_closure =
!scope_->outer_scope_calls_eval() && !scope_->inside_with();
if (is_optimizable_closure) {
SetMode(BASE);
return;
}
}
}
bool is_optimizable_closure =
FLAG_optimize_closures &&
closure_.is_null() &&
!scope_->HasTrivialOuterContext() &&
!scope_->outer_scope_calls_non_strict_eval() &&
!scope_->inside_with();
SetMode(is_optimizable_closure ? BASE : NONOPT);
}
SetMode(NONOPT);
void CompilationInfo::AbortOptimization() {
Handle<Code> code(shared_info()->code());
SetCode(code);
Isolate* isolate = code->GetIsolate();
isolate->compilation_cache()->MarkForLazyOptimizing(closure());
}
@ -119,19 +122,23 @@ void CompilationInfo::DisableOptimization() {
// all. However crankshaft support recompilation of functions, so in this case
// the full compiler need not be be used if a debugger is attached, but only if
// break points has actually been set.
static bool AlwaysFullCompiler() {
static bool is_debugging_active() {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (V8::UseCrankshaft()) {
return FLAG_always_full_compiler || Debug::has_break_points();
} else {
return FLAG_always_full_compiler || Debugger::IsDebuggerActive();
}
Isolate* isolate = Isolate::Current();
return V8::UseCrankshaft() ?
isolate->debug()->has_break_points() :
isolate->debugger()->IsDebuggerActive();
#else
return FLAG_always_full_compiler;
return false;
#endif
}
static bool AlwaysFullCompiler() {
return FLAG_always_full_compiler || is_debugging_active();
}
static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
int opt_count = function->shared()->opt_count();
function->shared()->set_opt_count(opt_count + 1);
@ -158,29 +165,6 @@ static void FinishOptimization(Handle<JSFunction> function, int64_t start) {
}
static void AbortAndDisable(CompilationInfo* info) {
// Disable optimization for the shared function info and mark the
// code as non-optimizable. The marker on the shared function info
// is there because we flush non-optimized code thereby loosing the
// non-optimizable information for the code. When the code is
// regenerated and set on the shared function info it is marked as
// non-optimizable if optimization is disabled for the shared
// function info.
Handle<SharedFunctionInfo> shared = info->shared_info();
shared->set_optimization_disabled(true);
Handle<Code> code = Handle<Code>(shared->code());
ASSERT(code->kind() == Code::FUNCTION);
code->set_optimizable(false);
info->SetCode(code);
if (FLAG_trace_opt) {
PrintF("[disabled optimization for: ");
info->closure()->PrintName();
PrintF(" / %" V8PRIxPTR "]\n",
reinterpret_cast<intptr_t>(*info->closure()));
}
}
static bool MakeCrankshaftCode(CompilationInfo* info) {
// Test if we can optimize this function when asked to. We can only
// do this after the scopes are computed.
@ -197,6 +181,10 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
Handle<Code> code(info->shared_info()->code());
ASSERT(code->kind() == Code::FUNCTION);
// We should never arrive here if optimization has been disabled on the
// shared function info.
ASSERT(!info->shared_info()->optimization_disabled());
// Fall back to using the full code generator if it's not possible
// to use the Hydrogen-based optimizing compiler. We already have
// generated code for this from the shared function object.
@ -210,7 +198,9 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
const int kMaxOptCount =
FLAG_deopt_every_n_times == 0 ? Compiler::kDefaultMaxOptCount : 1000;
if (info->shared_info()->opt_count() > kMaxOptCount) {
AbortAndDisable(info);
info->AbortOptimization();
Handle<JSFunction> closure = info->closure();
info->shared_info()->DisableOptimization(*closure);
// True indicates the compilation pipeline is still going, not
// necessarily that we optimized the code.
return true;
@ -229,7 +219,9 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
if ((scope->num_parameters() + 1) > parameter_limit ||
(info->osr_ast_id() != AstNode::kNoNumber &&
scope->num_parameters() + 1 + scope->num_stack_slots() > locals_limit)) {
AbortAndDisable(info);
info->AbortOptimization();
Handle<JSFunction> closure = info->closure();
info->shared_info()->DisableOptimization(*closure);
// True indicates the compilation pipeline is still going, not
// necessarily that we optimized the code.
return true;
@ -250,7 +242,7 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
// performance of the hydrogen-based compiler.
int64_t start = OS::Ticks();
bool should_recompile = !info->shared_info()->has_deoptimization_support();
if (should_recompile || FLAG_time_hydrogen) {
if (should_recompile || FLAG_hydrogen_stats) {
HPhase phase(HPhase::kFullCodeGen);
CompilationInfo unoptimized(info->shared_info());
// Note that we use the same AST that we will use for generating the
@ -283,18 +275,18 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
HTracer::Instance()->TraceCompilation(info->function());
}
TypeFeedbackOracle oracle(
code, Handle<Context>(info->closure()->context()->global_context()));
HGraphBuilder builder(&oracle);
Handle<Context> global_context(info->closure()->context()->global_context());
TypeFeedbackOracle oracle(code, global_context);
HGraphBuilder builder(info, &oracle);
HPhase phase(HPhase::kTotal);
HGraph* graph = builder.CreateGraph(info);
if (Top::has_pending_exception()) {
HGraph* graph = builder.CreateGraph();
if (info->isolate()->has_pending_exception()) {
info->SetCode(Handle<Code>::null());
return false;
}
if (graph != NULL && FLAG_build_lithium) {
Handle<Code> optimized_code = graph->Compile();
Handle<Code> optimized_code = graph->Compile(info);
if (!optimized_code.is_null()) {
info->SetCode(optimized_code);
FinishOptimization(info->closure(), start);
@ -302,49 +294,32 @@ static bool MakeCrankshaftCode(CompilationInfo* info) {
}
}
// Compilation with the Hydrogen compiler failed. Keep using the
// shared code but mark it as unoptimizable.
AbortAndDisable(info);
// Keep using the shared code.
info->AbortOptimization();
if (!builder.inline_bailout()) {
// Mark the shared code as unoptimizable unless it was an inlined
// function that bailed out.
Handle<JSFunction> closure = info->closure();
info->shared_info()->DisableOptimization(*closure);
}
// True indicates the compilation pipeline is still going, not necessarily
// that we optimized the code.
return true;
}
static bool GenerateCode(CompilationInfo* info) {
return V8::UseCrankshaft() ?
MakeCrankshaftCode(info) :
FullCodeGenerator::MakeCode(info);
}
static bool MakeCode(CompilationInfo* info) {
// Precondition: code has been parsed. Postcondition: the code field in
// the compilation info is set if compilation succeeded.
ASSERT(info->function() != NULL);
if (Rewriter::Rewrite(info) && Scope::Analyze(info)) {
if (V8::UseCrankshaft()) return MakeCrankshaftCode(info);
// Generate code and return it. Code generator selection is governed by
// which backends are enabled and whether the function is considered
// run-once code or not.
//
// --full-compiler enables the dedicated backend for code we expect to
// be run once
//
// The normal choice of backend can be overridden with the flags
// --always-full-compiler.
if (Rewriter::Analyze(info)) {
Handle<SharedFunctionInfo> shared = info->shared_info();
bool is_run_once = (shared.is_null())
? info->scope()->is_global_scope()
: (shared->is_toplevel() || shared->try_full_codegen());
bool can_use_full =
FLAG_full_compiler && !info->function()->contains_loops();
if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
return FullCodeGenerator::MakeCode(info);
} else {
return AssignedVariablesAnalyzer::Analyze(info) &&
CodeGenerator::MakeCode(info);
}
}
}
return false;
return Rewriter::Rewrite(info) && Scope::Analyze(info) && GenerateCode(info);
}
@ -364,13 +339,13 @@ bool Compiler::MakeCodeForLiveEdit(CompilationInfo* info) {
static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
CompilationZoneScope zone_scope(DELETE_ON_EXIT);
PostponeInterruptsScope postpone;
Isolate* isolate = info->isolate();
ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
PostponeInterruptsScope postpone(isolate);
ASSERT(!i::Top::global_context().is_null());
ASSERT(!isolate->global_context().is_null());
Handle<Script> script = info->script();
script->set_context_data((*i::Top::global_context())->data());
script->set_context_data((*isolate->global_context())->data());
#ifdef ENABLE_DEBUGGER_SUPPORT
if (info->is_eval()) {
@ -379,19 +354,20 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
// For eval scripts add information on the function from which eval was
// called.
if (info->is_eval()) {
StackTraceFrameIterator it;
StackTraceFrameIterator it(isolate);
if (!it.done()) {
script->set_eval_from_shared(
JSFunction::cast(it.frame()->function())->shared());
Code* code = it.frame()->LookupCode();
int offset = static_cast<int>(
it.frame()->pc() - it.frame()->code()->instruction_start());
it.frame()->pc() - code->instruction_start());
script->set_eval_from_instructions_offset(Smi::FromInt(offset));
}
}
}
// Notify debugger
Debugger::OnBeforeCompile(script);
isolate->debugger()->OnBeforeCompile(script);
#endif
// Only allow non-global compiles for eval.
@ -403,22 +379,22 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
// rest of the function into account to avoid overlap with the
// parsing statistics.
HistogramTimer* rate = info->is_eval()
? &Counters::compile_eval
: &Counters::compile;
? info->isolate()->counters()->compile_eval()
: info->isolate()->counters()->compile();
HistogramTimerScope timer(rate);
// Compile the code.
FunctionLiteral* lit = info->function();
LiveEditFunctionTracker live_edit_tracker(lit);
LiveEditFunctionTracker live_edit_tracker(isolate, lit);
if (!MakeCode(info)) {
Top::StackOverflow();
isolate->StackOverflow();
return Handle<SharedFunctionInfo>::null();
}
// Allocate function.
ASSERT(!info->code().is_null());
Handle<SharedFunctionInfo> result =
Factory::NewSharedFunctionInfo(
isolate->factory()->NewSharedFunctionInfo(
lit->name(),
lit->materialized_literal_count(),
info->code(),
@ -428,7 +404,7 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
Compiler::SetFunctionInfo(result, lit, true, script);
if (script->name()->IsString()) {
PROFILE(CodeCreateEvent(
PROFILE(isolate, CodeCreateEvent(
info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
@ -439,13 +415,13 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
script,
info->code()));
} else {
PROFILE(CodeCreateEvent(
PROFILE(isolate, CodeCreateEvent(
info->is_eval()
? Logger::EVAL_TAG
: Logger::ToNativeByScript(Logger::SCRIPT_TAG, *script),
*info->code(),
*result,
Heap::empty_string()));
isolate->heap()->empty_string()));
GDBJIT(AddCode(Handle<String>(), script, info->code()));
}
@ -456,7 +432,8 @@ static Handle<SharedFunctionInfo> MakeFunctionInfo(CompilationInfo* info) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger
Debugger::OnAfterCompile(script, Debugger::NO_AFTER_COMPILE_FLAGS);
isolate->debugger()->OnAfterCompile(
script, Debugger::NO_AFTER_COMPILE_FLAGS);
#endif
live_edit_tracker.RecordFunctionInfo(result, lit);
@ -473,20 +450,23 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
ScriptDataImpl* input_pre_data,
Handle<Object> script_data,
NativesFlag natives) {
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
Counters::total_load_size.Increment(source_length);
Counters::total_compile_size.Increment(source_length);
isolate->counters()->total_load_size()->Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length);
// The VM is in the COMPILER state until exiting this function.
VMState state(COMPILER);
VMState state(isolate, COMPILER);
CompilationCache* compilation_cache = isolate->compilation_cache();
// Do a lookup in the compilation cache but not for extensions.
Handle<SharedFunctionInfo> result;
if (extension == NULL) {
result = CompilationCache::LookupScript(source,
script_name,
line_offset,
column_offset);
result = compilation_cache->LookupScript(source,
script_name,
line_offset,
column_offset);
}
if (result.is_null()) {
@ -512,7 +492,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
}
// Create a script object describing the script to be compiled.
Handle<Script> script = Factory::NewScript(source);
Handle<Script> script = FACTORY->NewScript(source);
if (natives == NATIVES_CODE) {
script->set_type(Smi::FromInt(Script::TYPE_NATIVE));
}
@ -522,7 +502,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
script->set_column_offset(Smi::FromInt(column_offset));
}
script->set_data(script_data.is_null() ? Heap::undefined_value()
script->set_data(script_data.is_null() ? HEAP->undefined_value()
: *script_data);
// Compile the function and add it to the cache.
@ -530,9 +510,13 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
info.MarkAsGlobal();
info.SetExtension(extension);
info.SetPreParseData(pre_data);
if (natives == NATIVES_CODE) {
info.MarkAsAllowingNativesSyntax();
info.MarkAsNative();
}
result = MakeFunctionInfo(&info);
if (extension == NULL && !result.is_null()) {
CompilationCache::PutScript(source, result);
compilation_cache->PutScript(source, result);
}
// Get rid of the pre-parsing data (if necessary).
@ -541,7 +525,7 @@ Handle<SharedFunctionInfo> Compiler::Compile(Handle<String> source,
}
}
if (result.is_null()) Top::ReportPendingMessages();
if (result.is_null()) isolate->ReportPendingMessages();
return result;
}
@ -550,36 +534,39 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
Handle<Context> context,
bool is_global,
StrictModeFlag strict_mode) {
Isolate* isolate = source->GetIsolate();
int source_length = source->length();
Counters::total_eval_size.Increment(source_length);
Counters::total_compile_size.Increment(source_length);
isolate->counters()->total_eval_size()->Increment(source_length);
isolate->counters()->total_compile_size()->Increment(source_length);
// The VM is in the COMPILER state until exiting this function.
VMState state(COMPILER);
VMState state(isolate, COMPILER);
// Do a lookup in the compilation cache; if the entry is not there, invoke
// the compiler and add the result to the cache.
Handle<SharedFunctionInfo> result;
result = CompilationCache::LookupEval(source,
context,
is_global,
strict_mode);
CompilationCache* compilation_cache = isolate->compilation_cache();
result = compilation_cache->LookupEval(source,
context,
is_global,
strict_mode);
if (result.is_null()) {
// Create a script object describing the script to be compiled.
Handle<Script> script = Factory::NewScript(source);
Handle<Script> script = isolate->factory()->NewScript(source);
CompilationInfo info(script);
info.MarkAsEval();
if (is_global) info.MarkAsGlobal();
if (strict_mode == kStrictMode) info.MarkAsStrict();
if (strict_mode == kStrictMode) info.MarkAsStrictMode();
info.SetCallingContext(context);
result = MakeFunctionInfo(&info);
if (!result.is_null()) {
CompilationCache* compilation_cache = isolate->compilation_cache();
// If caller is strict mode, the result must be strict as well,
// but not the other way around. Consider:
// eval("'use strict'; ...");
ASSERT(strict_mode == kNonStrictMode || result->strict_mode());
CompilationCache::PutEval(source, context, is_global, result);
compilation_cache->PutEval(source, context, is_global, result);
}
}
@ -588,32 +575,45 @@ Handle<SharedFunctionInfo> Compiler::CompileEval(Handle<String> source,
bool Compiler::CompileLazy(CompilationInfo* info) {
CompilationZoneScope zone_scope(DELETE_ON_EXIT);
Isolate* isolate = info->isolate();
ZoneScope zone_scope(isolate, DELETE_ON_EXIT);
// The VM is in the COMPILER state until exiting this function.
VMState state(COMPILER);
VMState state(isolate, COMPILER);
PostponeInterruptsScope postpone;
PostponeInterruptsScope postpone(isolate);
Handle<SharedFunctionInfo> shared = info->shared_info();
int compiled_size = shared->end_position() - shared->start_position();
Counters::total_compile_size.Increment(compiled_size);
isolate->counters()->total_compile_size()->Increment(compiled_size);
// Generate the AST for the lazily compiled function.
if (ParserApi::Parse(info)) {
// Measure how long it takes to do the lazy compilation; only take the
// rest of the function into account to avoid overlap with the lazy
// parsing statistics.
HistogramTimerScope timer(&Counters::compile_lazy);
HistogramTimerScope timer(isolate->counters()->compile_lazy());
// After parsing we know function's strict mode. Remember it.
if (info->function()->strict_mode()) {
shared->set_strict_mode(true);
info->MarkAsStrictMode();
}
// Compile the code.
if (!MakeCode(info)) {
if (!Top::has_pending_exception()) {
Top::StackOverflow();
if (!isolate->has_pending_exception()) {
isolate->StackOverflow();
}
} else {
ASSERT(!info->code().is_null());
Handle<Code> code = info->code();
// Set optimizable to false if this is disallowed by the shared
// function info, e.g., we might have flushed the code and must
// reset this bit when lazy compiling the code again.
if (shared->optimization_disabled()) code->set_optimizable(false);
Handle<JSFunction> function = info->closure();
RecordFunctionCompilation(Logger::LAZY_COMPILE_TAG, info, shared);
@ -650,16 +650,18 @@ bool Compiler::CompileLazy(CompilationInfo* info) {
ASSERT(shared->is_compiled());
shared->set_code_age(0);
if (V8::UseCrankshaft() && info->AllowOptimize()) {
if (info->AllowOptimize() && !shared->optimization_disabled()) {
// If we're asked to always optimize, we compile the optimized
// version of the function right away - unless the debugger is
// active as it makes no sense to compile optimized code then.
if (FLAG_always_opt && !Debug::has_break_points()) {
if (FLAG_always_opt &&
!Isolate::Current()->DebuggerHasBreakPoints()) {
CompilationInfo optimized(function);
optimized.SetOptimizing(AstNode::kNoNumber);
return CompileLazy(&optimized);
} else if (CompilationCache::ShouldOptimizeEagerly(function)) {
RuntimeProfiler::OptimizeSoon(*function);
} else if (isolate->compilation_cache()->ShouldOptimizeEagerly(
function)) {
isolate->runtime_profiler()->OptimizeSoon(*function);
}
}
}
@ -679,56 +681,35 @@ Handle<SharedFunctionInfo> Compiler::BuildFunctionInfo(FunctionLiteral* literal,
CompilationInfo info(script);
info.SetFunction(literal);
info.SetScope(literal->scope());
if (literal->scope()->is_strict_mode()) info.MarkAsStrictMode();
if (script->type()->value() == Script::TYPE_NATIVE) info.MarkAsNative();
LiveEditFunctionTracker live_edit_tracker(literal);
LiveEditFunctionTracker live_edit_tracker(info.isolate(), literal);
// Determine if the function can be lazily compiled. This is necessary to
// allow some of our builtin JS files to be lazily compiled. These
// builtins cannot be handled lazily by the parser, since we have to know
// if a function uses the special natives syntax, which is something the
// parser records.
bool allow_lazy = literal->AllowsLazyCompilation() &&
!LiveEditFunctionTracker::IsActive();
!LiveEditFunctionTracker::IsActive(info.isolate());
Handle<SerializedScopeInfo> scope_info(SerializedScopeInfo::Empty());
// Generate code
if (FLAG_lazy && allow_lazy) {
Handle<Code> code(Builtins::builtin(Builtins::LazyCompile));
Handle<Code> code = info.isolate()->builtins()->LazyCompile();
info.SetCode(code);
} else {
if (V8::UseCrankshaft()) {
if (!MakeCrankshaftCode(&info)) {
return Handle<SharedFunctionInfo>::null();
}
} else {
// The bodies of function literals have not yet been visited by the
// AST optimizer/analyzer.
if (!Rewriter::Analyze(&info)) return Handle<SharedFunctionInfo>::null();
bool is_run_once = literal->try_full_codegen();
bool can_use_full = FLAG_full_compiler && !literal->contains_loops();
if (AlwaysFullCompiler() || (is_run_once && can_use_full)) {
if (!FullCodeGenerator::MakeCode(&info)) {
return Handle<SharedFunctionInfo>::null();
}
} else {
// We fall back to the classic V8 code generator.
if (!AssignedVariablesAnalyzer::Analyze(&info) ||
!CodeGenerator::MakeCode(&info)) {
return Handle<SharedFunctionInfo>::null();
}
}
}
} else if ((V8::UseCrankshaft() && MakeCrankshaftCode(&info)) ||
(!V8::UseCrankshaft() && FullCodeGenerator::MakeCode(&info))) {
ASSERT(!info.code().is_null());
// Function compilation complete.
scope_info = SerializedScopeInfo::Create(info.scope());
} else {
return Handle<SharedFunctionInfo>::null();
}
// Create a shared function info object.
Handle<SharedFunctionInfo> result =
Factory::NewSharedFunctionInfo(literal->name(),
FACTORY->NewSharedFunctionInfo(literal->name(),
literal->materialized_literal_count(),
info.code(),
scope_info);
@ -765,9 +746,10 @@ void Compiler::SetFunctionInfo(Handle<SharedFunctionInfo> function_info,
function_info->SetThisPropertyAssignmentsInfo(
lit->has_only_simple_this_property_assignments(),
*lit->this_property_assignments());
function_info->set_try_full_codegen(lit->try_full_codegen());
function_info->set_allows_lazy_compilation(lit->AllowsLazyCompilation());
function_info->set_strict_mode(lit->strict_mode());
function_info->set_uses_arguments(lit->scope()->arguments() != NULL);
function_info->set_has_duplicate_parameters(lit->has_duplicate_parameters());
}
@ -780,27 +762,31 @@ void Compiler::RecordFunctionCompilation(Logger::LogEventsAndTags tag,
// Log the code generation. If source information is available include
// script name and line number. Check explicitly whether logging is
// enabled as finding the line number is not free.
if (Logger::is_logging() || CpuProfiler::is_profiling()) {
if (info->isolate()->logger()->is_logging() ||
CpuProfiler::is_profiling(info->isolate())) {
Handle<Script> script = info->script();
Handle<Code> code = info->code();
if (*code == Builtins::builtin(Builtins::LazyCompile)) return;
if (*code == info->isolate()->builtins()->builtin(Builtins::kLazyCompile))
return;
if (script->name()->IsString()) {
int line_num = GetScriptLineNumber(script, shared->start_position()) + 1;
USE(line_num);
PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
PROFILE(info->isolate(),
CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
*shared,
String::cast(script->name()),
line_num));
} else {
PROFILE(CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
PROFILE(info->isolate(),
CodeCreateEvent(Logger::ToNativeByScript(tag, *script),
*code,
*shared,
shared->DebugName()));
}
}
GDBJIT(AddCode(name,
GDBJIT(AddCode(Handle<String>(shared->DebugName()),
Handle<Script>(info->script()),
Handle<Code>(info->code())));
}

64
deps/v8/src/compiler.h

@ -1,4 +1,4 @@
// Copyright 2010 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -28,9 +28,8 @@
#ifndef V8_COMPILER_H_
#define V8_COMPILER_H_
#include "allocation.h"
#include "ast.h"
#include "frame-element.h"
#include "register-allocator.h"
#include "zone.h"
namespace v8 {
@ -46,10 +45,14 @@ class CompilationInfo BASE_EMBEDDED {
explicit CompilationInfo(Handle<SharedFunctionInfo> shared_info);
explicit CompilationInfo(Handle<JSFunction> closure);
Isolate* isolate() {
ASSERT(Isolate::Current() == isolate_);
return isolate_;
}
bool is_lazy() const { return (flags_ & IsLazy::mask()) != 0; }
bool is_eval() const { return (flags_ & IsEval::mask()) != 0; }
bool is_global() const { return (flags_ & IsGlobal::mask()) != 0; }
bool is_strict() const { return (flags_ & IsStrict::mask()) != 0; }
bool is_strict_mode() const { return (flags_ & IsStrictMode::mask()) != 0; }
bool is_in_loop() const { return (flags_ & IsInLoop::mask()) != 0; }
FunctionLiteral* function() const { return function_; }
Scope* scope() const { return scope_; }
@ -70,16 +73,28 @@ class CompilationInfo BASE_EMBEDDED {
ASSERT(!is_lazy());
flags_ |= IsGlobal::encode(true);
}
void MarkAsStrict() {
flags_ |= IsStrict::encode(true);
void MarkAsStrictMode() {
flags_ |= IsStrictMode::encode(true);
}
StrictModeFlag StrictMode() {
return is_strict() ? kStrictMode : kNonStrictMode;
return is_strict_mode() ? kStrictMode : kNonStrictMode;
}
void MarkAsInLoop() {
ASSERT(is_lazy());
flags_ |= IsInLoop::encode(true);
}
void MarkAsAllowingNativesSyntax() {
flags_ |= IsNativesSyntaxAllowed::encode(true);
}
bool allows_natives_syntax() const {
return IsNativesSyntaxAllowed::decode(flags_);
}
void MarkAsNative() {
flags_ |= IsNative::encode(true);
}
bool is_native() const {
return IsNative::decode(flags_);
}
void SetFunction(FunctionLiteral* literal) {
ASSERT(function_ == NULL);
function_ = literal;
@ -135,7 +150,13 @@ class CompilationInfo BASE_EMBEDDED {
return V8::UseCrankshaft() && !closure_.is_null();
}
// Disable all optimization attempts of this info for the rest of the
// current compilation pipeline.
void AbortOptimization();
private:
Isolate* isolate_;
// Compilation mode.
// BASE is generated by the full codegen, optionally prepared for bailouts.
// OPTIMIZE is optimized code generated by the Hydrogen-based backend.
@ -152,8 +173,9 @@ class CompilationInfo BASE_EMBEDDED {
void Initialize(Mode mode) {
mode_ = V8::UseCrankshaft() ? mode : NONOPT;
if (!shared_info_.is_null() && shared_info_->strict_mode()) {
MarkAsStrict();
if (!shared_info_.is_null()) {
if (shared_info_->strict_mode()) MarkAsStrictMode();
if (shared_info_->native()) MarkAsNative();
}
}
@ -173,7 +195,12 @@ class CompilationInfo BASE_EMBEDDED {
// Flags that can be set for lazy compilation.
class IsInLoop: public BitField<bool, 3, 1> {};
// Strict mode - used in eager compilation.
class IsStrict: public BitField<bool, 4, 1> {};
class IsStrictMode: public BitField<bool, 4, 1> {};
// Native syntax (%-stuff) allowed?
class IsNativesSyntaxAllowed: public BitField<bool, 5, 1> {};
// Is this a function from our natives.
class IsNative: public BitField<bool, 6, 1> {};
unsigned flags_;
@ -225,6 +252,8 @@ class Compiler : public AllStatic {
// give up.
static const int kDefaultMaxOptCount = 10;
static const int kMaxInliningLevels = 3;
// All routines return a SharedFunctionInfo.
// If an error occurs an exception is raised and the return handle
// contains NULL.
@ -270,21 +299,6 @@ class Compiler : public AllStatic {
};
// During compilation we need a global list of handles to constants
// for frame elements. When the zone gets deleted, we make sure to
// clear this list of handles as well.
class CompilationZoneScope : public ZoneScope {
public:
explicit CompilationZoneScope(ZoneScopeMode mode) : ZoneScope(mode) { }
virtual ~CompilationZoneScope() {
if (ShouldDeleteOnExit()) {
FrameElement::ClearConstantList();
Result::ClearConstantList();
}
}
};
} } // namespace v8::internal
#endif // V8_COMPILER_H_

172
deps/v8/src/contexts.cc

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -34,6 +34,16 @@
namespace v8 {
namespace internal {
Context* Context::declaration_context() {
Context* current = this;
while (!current->IsFunctionContext() && !current->IsGlobalContext()) {
current = current->previous();
ASSERT(current->closure() == closure());
}
return current;
}
JSBuiltinsObject* Context::builtins() {
GlobalObject* object = global();
if (object->IsJSGlobalObject()) {
@ -55,7 +65,7 @@ Context* Context::global_context() {
// During bootstrapping, the global object might not be set and we
// have to search the context chain to find the global context.
ASSERT(Bootstrapper::IsActive());
ASSERT(Isolate::Current()->bootstrapper()->IsActive());
Context* current = this;
while (!current->IsGlobalContext()) {
JSFunction* closure = JSFunction::cast(current->closure());
@ -74,9 +84,12 @@ void Context::set_global_proxy(JSObject* object) {
}
Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
int* index_, PropertyAttributes* attributes) {
Handle<Context> context(this);
Handle<Object> Context::Lookup(Handle<String> name,
ContextLookupFlags flags,
int* index_,
PropertyAttributes* attributes) {
Isolate* isolate = GetIsolate();
Handle<Context> context(this, isolate);
bool follow_context_chain = (flags & FOLLOW_CONTEXT_CHAIN) != 0;
*index_ = -1;
@ -95,39 +108,52 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
PrintF("\n");
}
// check extension/with object
// Check extension/with/global object.
if (context->has_extension()) {
Handle<JSObject> extension = Handle<JSObject>(context->extension());
// Context extension objects needs to behave as if they have no
// prototype. So even if we want to follow prototype chains, we
// need to only do a local lookup for context extension objects.
if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
extension->IsJSContextExtensionObject()) {
*attributes = extension->GetLocalPropertyAttribute(*name);
if (context->IsCatchContext()) {
// Catch contexts have the variable name in the extension slot.
if (name->Equals(String::cast(context->extension()))) {
if (FLAG_trace_contexts) {
PrintF("=> found in catch context\n");
}
*index_ = Context::THROWN_OBJECT_INDEX;
*attributes = NONE;
return context;
}
} else {
*attributes = extension->GetPropertyAttribute(*name);
}
if (*attributes != ABSENT) {
// property found
if (FLAG_trace_contexts) {
PrintF("=> found property in context object %p\n",
reinterpret_cast<void*>(*extension));
// Global, function, and with contexts may have an object in the
// extension slot.
Handle<JSObject> extension(JSObject::cast(context->extension()),
isolate);
// Context extension objects needs to behave as if they have no
// prototype. So even if we want to follow prototype chains, we
// need to only do a local lookup for context extension objects.
if ((flags & FOLLOW_PROTOTYPE_CHAIN) == 0 ||
extension->IsJSContextExtensionObject()) {
*attributes = extension->GetLocalPropertyAttribute(*name);
} else {
*attributes = extension->GetPropertyAttribute(*name);
}
if (*attributes != ABSENT) {
// property found
if (FLAG_trace_contexts) {
PrintF("=> found property in context object %p\n",
reinterpret_cast<void*>(*extension));
}
return extension;
}
return extension;
}
}
if (context->is_function_context()) {
// we have context-local slots
// check non-parameter locals in context
// Only functions can have locals, parameters, and a function name.
if (context->IsFunctionContext()) {
// We may have context-local slots. Check locals in the context.
Handle<SerializedScopeInfo> scope_info(
context->closure()->shared()->scope_info());
context->closure()->shared()->scope_info(), isolate);
Variable::Mode mode;
int index = scope_info->ContextSlotIndex(*name, &mode);
ASSERT(index < 0 || index >= MIN_CONTEXT_SLOTS);
if (index >= 0) {
// slot found
if (FLAG_trace_contexts) {
PrintF("=> found local in context slot %d (mode = %d)\n",
index, mode);
@ -140,39 +166,28 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
// declared variables that were introduced through declaration nodes)
// must not appear here.
switch (mode) {
case Variable::INTERNAL: // fall through
case Variable::VAR: *attributes = NONE; break;
case Variable::CONST: *attributes = READ_ONLY; break;
case Variable::DYNAMIC: UNREACHABLE(); break;
case Variable::DYNAMIC_GLOBAL: UNREACHABLE(); break;
case Variable::DYNAMIC_LOCAL: UNREACHABLE(); break;
case Variable::TEMPORARY: UNREACHABLE(); break;
case Variable::INTERNAL: // Fall through.
case Variable::VAR:
*attributes = NONE;
break;
case Variable::CONST:
*attributes = READ_ONLY;
break;
case Variable::DYNAMIC:
case Variable::DYNAMIC_GLOBAL:
case Variable::DYNAMIC_LOCAL:
case Variable::TEMPORARY:
UNREACHABLE();
break;
}
return context;
}
// check parameter locals in context
int param_index = scope_info->ParameterIndex(*name);
if (param_index >= 0) {
// slot found.
int index =
scope_info->ContextSlotIndex(Heap::arguments_shadow_symbol(), NULL);
ASSERT(index >= 0); // arguments must exist and be in the heap context
Handle<JSObject> arguments(JSObject::cast(context->get(index)));
ASSERT(arguments->HasLocalProperty(Heap::length_symbol()));
if (FLAG_trace_contexts) {
PrintF("=> found parameter %d in arguments object\n", param_index);
}
*index_ = param_index;
*attributes = NONE;
return arguments;
}
// check intermediate context (holding only the function name variable)
// Check the slot corresponding to the intermediate context holding
// only the function name variable.
if (follow_context_chain) {
int index = scope_info->FunctionContextSlotIndex(*name);
if (index >= 0) {
// slot found
if (FLAG_trace_contexts) {
PrintF("=> found intermediate function in context slot %d\n",
index);
@ -184,17 +199,14 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
}
}
// proceed with enclosing context
// Proceed with the previous context.
if (context->IsGlobalContext()) {
follow_context_chain = false;
} else if (context->is_function_context()) {
context = Handle<Context>(Context::cast(context->closure()->context()));
} else {
context = Handle<Context>(context->previous());
context = Handle<Context>(context->previous(), isolate);
}
} while (follow_context_chain);
// slot not found
if (FLAG_trace_contexts) {
PrintF("=> no property/slot found\n");
}
@ -209,11 +221,12 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
// before the global context and check that there are no context
// extension objects (conservative check for with statements).
while (!context->IsGlobalContext()) {
// Check if the context is a potentially a with context.
// Check if the context is a catch or with context, or has introduced
// bindings by calling non-strict eval.
if (context->has_extension()) return false;
// Not a with context so it must be a function context.
ASSERT(context->is_function_context());
ASSERT(context->IsFunctionContext());
// Check non-parameter locals.
Handle<SerializedScopeInfo> scope_info(
@ -230,7 +243,7 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
// Check context only holding the function name variable.
index = scope_info->FunctionContextSlotIndex(*name);
if (index >= 0) return false;
context = Context::cast(context->closure()->context());
context = context->previous();
}
// No local or potential with statement found so the variable is
@ -239,6 +252,30 @@ bool Context::GlobalIfNotShadowedByEval(Handle<String> name) {
}
void Context::ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
bool* outer_scope_calls_non_strict_eval) {
// Skip up the context chain checking all the function contexts to see
// whether they call eval.
Context* context = this;
while (!context->IsGlobalContext()) {
if (context->IsFunctionContext()) {
Handle<SerializedScopeInfo> scope_info(
context->closure()->shared()->scope_info());
if (scope_info->CallsEval()) {
*outer_scope_calls_eval = true;
if (!scope_info->IsStrictMode()) {
// No need to go further since the answers will not change from
// here.
*outer_scope_calls_non_strict_eval = true;
return;
}
}
}
context = context->previous();
}
}
void Context::AddOptimizedFunction(JSFunction* function) {
ASSERT(IsGlobalContext());
#ifdef DEBUG
@ -252,7 +289,7 @@ void Context::AddOptimizedFunction(JSFunction* function) {
// Check that the context belongs to the weak global contexts list.
bool found = false;
Object* context = Heap::global_contexts_list();
Object* context = GetHeap()->global_contexts_list();
while (!context->IsUndefined()) {
if (context == this) {
found = true;
@ -281,7 +318,7 @@ void Context::RemoveOptimizedFunction(JSFunction* function) {
} else {
prev->set_next_function_link(element_function->next_function_link());
}
element_function->set_next_function_link(Heap::undefined_value());
element_function->set_next_function_link(GetHeap()->undefined_value());
return;
}
prev = element_function;
@ -298,7 +335,7 @@ Object* Context::OptimizedFunctionsListHead() {
void Context::ClearOptimizedFunctions() {
set(OPTIMIZED_FUNCTIONS_LIST, Heap::undefined_value());
set(OPTIMIZED_FUNCTIONS_LIST, GetHeap()->undefined_value());
}
@ -306,14 +343,17 @@ void Context::ClearOptimizedFunctions() {
bool Context::IsBootstrappingOrContext(Object* object) {
// During bootstrapping we allow all objects to pass as
// contexts. This is necessary to fix circular dependencies.
return Bootstrapper::IsActive() || object->IsContext();
return Isolate::Current()->bootstrapper()->IsActive() || object->IsContext();
}
bool Context::IsBootstrappingOrGlobalObject(Object* object) {
// During bootstrapping we allow all objects to pass as global
// objects. This is necessary to fix circular dependencies.
return Bootstrapper::IsActive() || object->IsGlobalObject();
Isolate* isolate = Isolate::Current();
return isolate->heap()->gc_state() != Heap::NOT_IN_GC ||
isolate->bootstrapper()->IsActive() ||
object->IsGlobalObject();
}
#endif

98
deps/v8/src/contexts.h

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -78,11 +78,20 @@ enum ContextLookupFlags {
V(INSTANTIATE_FUN_INDEX, JSFunction, instantiate_fun) \
V(CONFIGURE_INSTANCE_FUN_INDEX, JSFunction, configure_instance_fun) \
V(FUNCTION_MAP_INDEX, Map, function_map) \
V(STRICT_MODE_FUNCTION_MAP_INDEX, Map, strict_mode_function_map) \
V(FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, function_without_prototype_map) \
V(STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX, Map, \
strict_mode_function_without_prototype_map) \
V(FUNCTION_INSTANCE_MAP_INDEX, Map, function_instance_map) \
V(STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX, Map, \
strict_mode_function_instance_map) \
V(JS_ARRAY_MAP_INDEX, Map, js_array_map)\
V(REGEXP_RESULT_MAP_INDEX, Map, regexp_result_map)\
V(ARGUMENTS_BOILERPLATE_INDEX, JSObject, arguments_boilerplate) \
V(ALIASED_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
aliased_arguments_boilerplate) \
V(STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX, JSObject, \
strict_mode_arguments_boilerplate) \
V(MESSAGE_LISTENERS_INDEX, JSObject, message_listeners) \
V(MAKE_MESSAGE_FUN_INDEX, JSFunction, make_message_fun) \
V(GET_STACK_TRACE_LINE_INDEX, JSFunction, get_stack_trace_line_fun) \
@ -99,7 +108,10 @@ enum ContextLookupFlags {
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
V(MAP_CACHE_INDEX, Object, map_cache) \
V(CONTEXT_DATA_INDEX, Object, data)
V(CONTEXT_DATA_INDEX, Object, data) \
V(ALLOW_CODE_GEN_FROM_STRINGS_INDEX, Object, allow_code_gen_from_strings) \
V(DERIVED_GET_TRAP_INDEX, JSFunction, derived_get_trap) \
V(DERIVED_SET_TRAP_INDEX, JSFunction, derived_set_trap)
// JSFunctions are pairs (context, function code), sometimes also called
// closures. A Context object is used to represent function contexts and
@ -118,13 +130,6 @@ enum ContextLookupFlags {
// statically allocated context slots. The names are needed
// for dynamic lookups in the presence of 'with' or 'eval'.
//
// [ fcontext ] A pointer to the innermost enclosing function context.
// It is the same for all contexts *allocated* inside a
// function, and the function context's fcontext points
// to itself. It is only needed for fast access of the
// function context (used for declarations, and static
// context slot access).
//
// [ previous ] A pointer to the previous context. It is NULL for
// function contexts, and non-NULL for 'with' contexts.
// Used to implement the 'with' statement.
@ -146,19 +151,6 @@ enum ContextLookupFlags {
// (via static context addresses) or through 'eval' (dynamic context lookups).
// Finally, the global context contains additional slots for fast access to
// global properties.
//
// We may be able to simplify the implementation:
//
// - We may be able to get rid of 'fcontext': We can always use the fact that
// previous == NULL for function contexts and so we can search for them. They
// are only needed when doing dynamic declarations, and the context chains
// tend to be very very short (depth of nesting of 'with' statements). At
// the moment we also use it in generated code for context slot accesses -
// and there we don't want a loop because of code bloat - but we may not
// need it there after all (see comment in codegen_*.cc).
//
// - If we cannot get rid of fcontext, consider making 'previous' never NULL
// except for the global context. This could simplify Context::Lookup.
class Context: public FixedArray {
public:
@ -172,21 +164,31 @@ class Context: public FixedArray {
enum {
// These slots are in all contexts.
CLOSURE_INDEX,
FCONTEXT_INDEX,
PREVIOUS_INDEX,
// The extension slot is used for either the global object (in global
// contexts), eval extension object (function contexts), subject of with
// (with contexts), or the variable name (catch contexts).
EXTENSION_INDEX,
GLOBAL_INDEX,
MIN_CONTEXT_SLOTS,
// This slot holds the thrown value in catch contexts.
THROWN_OBJECT_INDEX = MIN_CONTEXT_SLOTS,
// These slots are only in global contexts.
GLOBAL_PROXY_INDEX = MIN_CONTEXT_SLOTS,
SECURITY_TOKEN_INDEX,
ARGUMENTS_BOILERPLATE_INDEX,
ALIASED_ARGUMENTS_BOILERPLATE_INDEX,
STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX,
JS_ARRAY_MAP_INDEX,
REGEXP_RESULT_MAP_INDEX,
FUNCTION_MAP_INDEX,
STRICT_MODE_FUNCTION_MAP_INDEX,
FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
STRICT_MODE_FUNCTION_WITHOUT_PROTOTYPE_MAP_INDEX,
FUNCTION_INSTANCE_MAP_INDEX,
STRICT_MODE_FUNCTION_INSTANCE_MAP_INDEX,
INITIAL_OBJECT_PROTOTYPE_INDEX,
BOOLEAN_FUNCTION_INDEX,
NUMBER_FUNCTION_INDEX,
@ -225,6 +227,9 @@ class Context: public FixedArray {
OUT_OF_MEMORY_INDEX,
MAP_CACHE_INDEX,
CONTEXT_DATA_INDEX,
ALLOW_CODE_GEN_FROM_STRINGS_INDEX,
DERIVED_GET_TRAP_INDEX,
DERIVED_SET_TRAP_INDEX,
// Properties from here are treated as weak references by the full GC.
// Scavenge treats them as strong references.
@ -241,9 +246,6 @@ class Context: public FixedArray {
JSFunction* closure() { return JSFunction::cast(get(CLOSURE_INDEX)); }
void set_closure(JSFunction* closure) { set(CLOSURE_INDEX, closure); }
Context* fcontext() { return Context::cast(get(FCONTEXT_INDEX)); }
void set_fcontext(Context* context) { set(FCONTEXT_INDEX, context); }
Context* previous() {
Object* result = unchecked_previous();
ASSERT(IsBootstrappingOrContext(result));
@ -251,14 +253,17 @@ class Context: public FixedArray {
}
void set_previous(Context* context) { set(PREVIOUS_INDEX, context); }
bool has_extension() { return unchecked_extension() != NULL; }
JSObject* extension() { return JSObject::cast(unchecked_extension()); }
void set_extension(JSObject* object) { set(EXTENSION_INDEX, object); }
bool has_extension() { return extension() != NULL; }
Object* extension() { return get(EXTENSION_INDEX); }
void set_extension(Object* object) { set(EXTENSION_INDEX, object); }
// Get the context where var declarations will be hoisted to, which
// may be the context itself.
Context* declaration_context();
GlobalObject* global() {
Object* result = get(GLOBAL_INDEX);
ASSERT(Heap::gc_state() != Heap::NOT_IN_GC ||
IsBootstrappingOrGlobalObject(result));
ASSERT(IsBootstrappingOrGlobalObject(result));
return reinterpret_cast<GlobalObject*>(result);
}
void set_global(GlobalObject* global) { set(GLOBAL_INDEX, global); }
@ -273,18 +278,27 @@ class Context: public FixedArray {
// Compute the global context by traversing the context chain.
Context* global_context();
// Tells if this is a function context (as opposed to a 'with' context).
bool is_function_context() { return unchecked_previous() == NULL; }
// Predicates for context types. IsGlobalContext is defined on Object
// because we frequently have to know if arbitrary objects are global
// contexts.
bool IsFunctionContext() {
Map* map = this->map();
return map == map->GetHeap()->function_context_map();
}
bool IsCatchContext() {
Map* map = this->map();
return map == map->GetHeap()->catch_context_map();
}
bool IsWithContext() {
Map* map = this->map();
return map == map->GetHeap()->with_context_map();
}
// Tells whether the global context is marked with out of memory.
bool has_out_of_memory() {
return global_context()->out_of_memory() == Heap::true_value();
}
inline bool has_out_of_memory();
// Mark the global context with out of memory.
void mark_out_of_memory() {
global_context()->set_out_of_memory(Heap::true_value());
}
inline void mark_out_of_memory();
// The exception holder is the object used as a with object in
// the implementation of a catch block.
@ -343,6 +357,11 @@ class Context: public FixedArray {
// eval.
bool GlobalIfNotShadowedByEval(Handle<String> name);
// Determine if any function scope in the context call eval and if
// any of those calls are in non-strict mode.
void ComputeEvalScopeInfo(bool* outer_scope_calls_eval,
bool* outer_scope_calls_non_strict_eval);
// Code generation support.
static int SlotOffset(int index) {
return kHeaderSize + index * kPointerSize - kHeapObjectTag;
@ -362,7 +381,6 @@ class Context: public FixedArray {
private:
// Unchecked access to the slots.
Object* unchecked_previous() { return get(PREVIOUS_INDEX); }
Object* unchecked_extension() { return get(EXTENSION_INDEX); }
#ifdef DEBUG
// Bootstrapping-aware type checks.

4
deps/v8/src/conversions-inl.h

@ -60,11 +60,7 @@ static inline unsigned int FastD2UI(double x) {
if (x < k2Pow52) {
x += k2Pow52;
uint32_t result;
#ifdef BIG_ENDIAN_FLOATING_POINT
Address mantissa_ptr = reinterpret_cast<Address>(&x) + kIntSize;
#else
Address mantissa_ptr = reinterpret_cast<Address>(&x);
#endif
// Copy least significant 32 bits of mantissa.
memcpy(&result, mantissa_ptr, sizeof(result));
return negative ? ~result + 1 : result;

132
deps/v8/src/conversions.cc

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -109,9 +109,11 @@ static const double JUNK_STRING_VALUE = OS::nan_value();
// Returns true if a nonspace found and false if the end has reached.
template <class Iterator, class EndMark>
static inline bool AdvanceToNonspace(Iterator* current, EndMark end) {
static inline bool AdvanceToNonspace(UnicodeCache* unicode_cache,
Iterator* current,
EndMark end) {
while (*current != end) {
if (!ScannerConstants::kIsWhiteSpace.get(**current)) return true;
if (!unicode_cache->IsWhiteSpace(**current)) return true;
++*current;
}
return false;
@ -132,7 +134,8 @@ static double SignedZero(bool negative) {
// Parsing integers with radix 2, 4, 8, 16, 32. Assumes current != end.
template <int radix_log_2, class Iterator, class EndMark>
static double InternalStringToIntDouble(Iterator current,
static double InternalStringToIntDouble(UnicodeCache* unicode_cache,
Iterator current,
EndMark end,
bool negative,
bool allow_trailing_junk) {
@ -157,7 +160,8 @@ static double InternalStringToIntDouble(Iterator current,
} else if (radix > 10 && *current >= 'A' && *current < 'A' + radix - 10) {
digit = static_cast<char>(*current) - 'A' + 10;
} else {
if (allow_trailing_junk || !AdvanceToNonspace(&current, end)) {
if (allow_trailing_junk ||
!AdvanceToNonspace(unicode_cache, &current, end)) {
break;
} else {
return JUNK_STRING_VALUE;
@ -188,7 +192,8 @@ static double InternalStringToIntDouble(Iterator current,
exponent += radix_log_2;
}
if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
if (!allow_trailing_junk &&
AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE;
}
@ -232,11 +237,16 @@ static double InternalStringToIntDouble(Iterator current,
template <class Iterator, class EndMark>
static double InternalStringToInt(Iterator current, EndMark end, int radix) {
static double InternalStringToInt(UnicodeCache* unicode_cache,
Iterator current,
EndMark end,
int radix) {
const bool allow_trailing_junk = true;
const double empty_string_val = JUNK_STRING_VALUE;
if (!AdvanceToNonspace(&current, end)) return empty_string_val;
if (!AdvanceToNonspace(unicode_cache, &current, end)) {
return empty_string_val;
}
bool negative = false;
bool leading_zero = false;
@ -244,10 +254,14 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
if (*current == '+') {
// Ignore leading sign; skip following spaces.
++current;
if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
if (current == end) {
return JUNK_STRING_VALUE;
}
} else if (*current == '-') {
++current;
if (!AdvanceToNonspace(&current, end)) return JUNK_STRING_VALUE;
if (current == end) {
return JUNK_STRING_VALUE;
}
negative = true;
}
@ -298,21 +312,21 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
switch (radix) {
case 2:
return InternalStringToIntDouble<1>(
current, end, negative, allow_trailing_junk);
unicode_cache, current, end, negative, allow_trailing_junk);
case 4:
return InternalStringToIntDouble<2>(
current, end, negative, allow_trailing_junk);
unicode_cache, current, end, negative, allow_trailing_junk);
case 8:
return InternalStringToIntDouble<3>(
current, end, negative, allow_trailing_junk);
unicode_cache, current, end, negative, allow_trailing_junk);
case 16:
return InternalStringToIntDouble<4>(
current, end, negative, allow_trailing_junk);
unicode_cache, current, end, negative, allow_trailing_junk);
case 32:
return InternalStringToIntDouble<5>(
current, end, negative, allow_trailing_junk);
unicode_cache, current, end, negative, allow_trailing_junk);
default:
UNREACHABLE();
}
@ -337,7 +351,8 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
if (current == end) break;
}
if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
if (!allow_trailing_junk &&
AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE;
}
@ -402,7 +417,8 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
v = v * multiplier + part;
} while (!done);
if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
if (!allow_trailing_junk &&
AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE;
}
@ -416,7 +432,8 @@ static double InternalStringToInt(Iterator current, EndMark end, int radix) {
// 2. *current - gets the current character in the sequence.
// 3. ++current (advances the position).
template <class Iterator, class EndMark>
static double InternalStringToDouble(Iterator current,
static double InternalStringToDouble(UnicodeCache* unicode_cache,
Iterator current,
EndMark end,
int flags,
double empty_string_val) {
@ -428,7 +445,9 @@ static double InternalStringToDouble(Iterator current,
// 'parsing_done'.
// 4. 'current' is not dereferenced after the 'parsing_done' label.
// 5. Code before 'parsing_done' may rely on 'current != end'.
if (!AdvanceToNonspace(&current, end)) return empty_string_val;
if (!AdvanceToNonspace(unicode_cache, &current, end)) {
return empty_string_val;
}
const bool allow_trailing_junk = (flags & ALLOW_TRAILING_JUNK) != 0;
@ -463,7 +482,8 @@ static double InternalStringToDouble(Iterator current,
return JUNK_STRING_VALUE;
}
if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
if (!allow_trailing_junk &&
AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE;
}
@ -485,7 +505,8 @@ static double InternalStringToDouble(Iterator current,
return JUNK_STRING_VALUE; // "0x".
}
return InternalStringToIntDouble<4>(current,
return InternalStringToIntDouble<4>(unicode_cache,
current,
end,
negative,
allow_trailing_junk);
@ -621,7 +642,8 @@ static double InternalStringToDouble(Iterator current,
exponent += (sign == '-' ? -num : num);
}
if (!allow_trailing_junk && AdvanceToNonspace(&current, end)) {
if (!allow_trailing_junk &&
AdvanceToNonspace(unicode_cache, &current, end)) {
return JUNK_STRING_VALUE;
}
@ -629,7 +651,8 @@ static double InternalStringToDouble(Iterator current,
exponent += insignificant_digits;
if (octal) {
return InternalStringToIntDouble<3>(buffer,
return InternalStringToIntDouble<3>(unicode_cache,
buffer,
buffer + buffer_pos,
negative,
allow_trailing_junk);
@ -648,19 +671,23 @@ static double InternalStringToDouble(Iterator current,
}
double StringToDouble(String* str, int flags, double empty_string_val) {
double StringToDouble(UnicodeCache* unicode_cache,
String* str, int flags, double empty_string_val) {
StringShape shape(str);
if (shape.IsSequentialAscii()) {
const char* begin = SeqAsciiString::cast(str)->GetChars();
const char* end = begin + str->length();
return InternalStringToDouble(begin, end, flags, empty_string_val);
return InternalStringToDouble(unicode_cache, begin, end, flags,
empty_string_val);
} else if (shape.IsSequentialTwoByte()) {
const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
const uc16* end = begin + str->length();
return InternalStringToDouble(begin, end, flags, empty_string_val);
return InternalStringToDouble(unicode_cache, begin, end, flags,
empty_string_val);
} else {
StringInputBuffer buffer(str);
return InternalStringToDouble(StringInputBufferIterator(&buffer),
return InternalStringToDouble(unicode_cache,
StringInputBufferIterator(&buffer),
StringInputBufferIterator::EndMarker(),
flags,
empty_string_val);
@ -668,36 +695,52 @@ double StringToDouble(String* str, int flags, double empty_string_val) {
}
double StringToInt(String* str, int radix) {
double StringToInt(UnicodeCache* unicode_cache,
String* str,
int radix) {
StringShape shape(str);
if (shape.IsSequentialAscii()) {
const char* begin = SeqAsciiString::cast(str)->GetChars();
const char* end = begin + str->length();
return InternalStringToInt(begin, end, radix);
return InternalStringToInt(unicode_cache, begin, end, radix);
} else if (shape.IsSequentialTwoByte()) {
const uc16* begin = SeqTwoByteString::cast(str)->GetChars();
const uc16* end = begin + str->length();
return InternalStringToInt(begin, end, radix);
return InternalStringToInt(unicode_cache, begin, end, radix);
} else {
StringInputBuffer buffer(str);
return InternalStringToInt(StringInputBufferIterator(&buffer),
return InternalStringToInt(unicode_cache,
StringInputBufferIterator(&buffer),
StringInputBufferIterator::EndMarker(),
radix);
}
}
double StringToDouble(const char* str, int flags, double empty_string_val) {
double StringToDouble(UnicodeCache* unicode_cache,
const char* str, int flags, double empty_string_val) {
const char* end = str + StrLength(str);
return InternalStringToDouble(str, end, flags, empty_string_val);
return InternalStringToDouble(unicode_cache, str, end, flags,
empty_string_val);
}
double StringToDouble(Vector<const char> str,
double StringToDouble(UnicodeCache* unicode_cache,
Vector<const char> str,
int flags,
double empty_string_val) {
const char* end = str.start() + str.length();
return InternalStringToDouble(str.start(), end, flags, empty_string_val);
return InternalStringToDouble(unicode_cache, str.start(), end, flags,
empty_string_val);
}
double StringToDouble(UnicodeCache* unicode_cache,
Vector<const uc16> str,
int flags,
double empty_string_val) {
const uc16* end = str.start() + str.length();
return InternalStringToDouble(unicode_cache, str.start(), end, flags,
empty_string_val);
}
@ -1066,4 +1109,23 @@ char* DoubleToRadixCString(double value, int radix) {
}
static Mutex* dtoa_lock_one = OS::CreateMutex();
static Mutex* dtoa_lock_zero = OS::CreateMutex();
} } // namespace v8::internal
extern "C" {
void ACQUIRE_DTOA_LOCK(int n) {
ASSERT(n == 0 || n == 1);
(n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)->Lock();
}
void FREE_DTOA_LOCK(int n) {
ASSERT(n == 0 || n == 1);
(n == 0 ? v8::internal::dtoa_lock_zero : v8::internal::dtoa_lock_one)->
Unlock();
}
}

23
deps/v8/src/conversions.h

@ -1,4 +1,4 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -28,6 +28,8 @@
#ifndef V8_CONVERSIONS_H_
#define V8_CONVERSIONS_H_
#include "scanner-base.h"
namespace v8 {
namespace internal {
@ -91,15 +93,26 @@ static inline uint32_t NumberToUint32(Object* number);
// Converts a string into a double value according to ECMA-262 9.3.1
double StringToDouble(String* str, int flags, double empty_string_val = 0);
double StringToDouble(Vector<const char> str,
double StringToDouble(UnicodeCache* unicode_cache,
String* str,
int flags,
double empty_string_val = 0);
double StringToDouble(UnicodeCache* unicode_cache,
Vector<const char> str,
int flags,
double empty_string_val = 0);
double StringToDouble(UnicodeCache* unicode_cache,
Vector<const uc16> str,
int flags,
double empty_string_val = 0);
// This version expects a zero-terminated character array.
double StringToDouble(const char* str, int flags, double empty_string_val = 0);
double StringToDouble(UnicodeCache* unicode_cache,
const char* str,
int flags,
double empty_string_val = 0);
// Converts a string into an integer.
double StringToInt(String* str, int radix);
double StringToInt(UnicodeCache* unicode_cache, String* str, int radix);
// Converts a double to a string value according to ECMA-262 9.8.1.
// The buffer should be large enough for any floating point number.

23
deps/v8/src/counters.cc

@ -28,14 +28,22 @@
#include "v8.h"
#include "counters.h"
#include "isolate.h"
#include "platform.h"
namespace v8 {
namespace internal {
CounterLookupCallback StatsTable::lookup_function_ = NULL;
CreateHistogramCallback StatsTable::create_histogram_function_ = NULL;
AddHistogramSampleCallback StatsTable::add_histogram_sample_function_ = NULL;
StatsTable::StatsTable()
: lookup_function_(NULL),
create_histogram_function_(NULL),
add_histogram_sample_function_(NULL) {}
int* StatsCounter::FindLocationInStatsTable() const {
return Isolate::Current()->stats_table()->FindLocation(name_);
}
// Start the timer.
void StatsCounterTimer::Start() {
@ -71,8 +79,15 @@ void HistogramTimer::Stop() {
// Compute the delta between start and stop, in milliseconds.
int milliseconds = static_cast<int>(stop_time_ - start_time_) / 1000;
StatsTable::AddHistogramSample(histogram_, milliseconds);
Isolate::Current()->stats_table()->
AddHistogramSample(histogram_, milliseconds);
}
}
void* HistogramTimer::CreateHistogram() const {
return Isolate::Current()->stats_table()->
CreateHistogram(name_, 0, 10000, 50);
}
} } // namespace v8::internal

44
deps/v8/src/counters.h

@ -38,27 +38,27 @@ namespace internal {
// counters for monitoring. Counters can be looked up and
// manipulated by name.
class StatsTable : public AllStatic {
class StatsTable {
public:
// Register an application-defined function where
// counters can be looked up.
static void SetCounterFunction(CounterLookupCallback f) {
void SetCounterFunction(CounterLookupCallback f) {
lookup_function_ = f;
}
// Register an application-defined function to create
// a histogram for passing to the AddHistogramSample function
static void SetCreateHistogramFunction(CreateHistogramCallback f) {
void SetCreateHistogramFunction(CreateHistogramCallback f) {
create_histogram_function_ = f;
}
// Register an application-defined function to add a sample
// to a histogram created with CreateHistogram function
static void SetAddHistogramSampleFunction(AddHistogramSampleCallback f) {
void SetAddHistogramSampleFunction(AddHistogramSampleCallback f) {
add_histogram_sample_function_ = f;
}
static bool HasCounterFunction() {
bool HasCounterFunction() const {
return lookup_function_ != NULL;
}
@ -68,7 +68,7 @@ class StatsTable : public AllStatic {
// may receive a different location to store it's counter.
// The return value must not be cached and re-used across
// threads, although a single thread is free to cache it.
static int* FindLocation(const char* name) {
int* FindLocation(const char* name) {
if (!lookup_function_) return NULL;
return lookup_function_(name);
}
@ -78,25 +78,31 @@ class StatsTable : public AllStatic {
// function. min and max define the expected minimum and maximum
// sample values. buckets is the maximum number of buckets
// that the samples will be grouped into.
static void* CreateHistogram(const char* name,
int min,
int max,
size_t buckets) {
void* CreateHistogram(const char* name,
int min,
int max,
size_t buckets) {
if (!create_histogram_function_) return NULL;
return create_histogram_function_(name, min, max, buckets);
}
// Add a sample to a histogram created with the CreateHistogram
// function.
static void AddHistogramSample(void* histogram, int sample) {
void AddHistogramSample(void* histogram, int sample) {
if (!add_histogram_sample_function_) return;
return add_histogram_sample_function_(histogram, sample);
}
private:
static CounterLookupCallback lookup_function_;
static CreateHistogramCallback create_histogram_function_;
static AddHistogramSampleCallback add_histogram_sample_function_;
StatsTable();
CounterLookupCallback lookup_function_;
CreateHistogramCallback create_histogram_function_;
AddHistogramSampleCallback add_histogram_sample_function_;
friend class Isolate;
DISALLOW_COPY_AND_ASSIGN(StatsTable);
};
// StatsCounters are dynamically created values which can be tracked in
@ -166,9 +172,12 @@ struct StatsCounter {
if (lookup_done_)
return ptr_;
lookup_done_ = true;
ptr_ = StatsTable::FindLocation(name_);
ptr_ = FindLocationInStatsTable();
return ptr_;
}
private:
int* FindLocationInStatsTable() const;
};
// StatsCounterTimer t = { { L"t:foo", NULL, false }, 0, 0 };
@ -216,10 +225,13 @@ struct HistogramTimer {
void* GetHistogram() {
if (!lookup_done_) {
lookup_done_ = true;
histogram_ = StatsTable::CreateHistogram(name_, 0, 10000, 50);
histogram_ = CreateHistogram();
}
return histogram_;
}
private:
void* CreateHistogram() const;
};
// Helper class for scoping a HistogramTimer.

22
deps/v8/src/cpu-profiler-inl.h

@ -32,6 +32,7 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
#include <new>
#include "circular-queue-inl.h"
#include "profile-generator-inl.h"
#include "unbound-queue-inl.h"
@ -41,8 +42,8 @@ namespace internal {
void CodeCreateEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->AddCode(start, entry, size);
if (sfi_address != NULL) {
entry->set_shared_id(code_map->GetSFITag(sfi_address));
if (shared != NULL) {
entry->set_shared_id(code_map->GetSharedId(shared));
}
}
@ -57,28 +58,15 @@ void CodeDeleteEventRecord::UpdateCodeMap(CodeMap* code_map) {
}
void SFIMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
void SharedFunctionInfoMoveEventRecord::UpdateCodeMap(CodeMap* code_map) {
code_map->MoveCode(from, to);
}
TickSampleEventRecord* TickSampleEventRecord::init(void* value) {
TickSampleEventRecord* result =
reinterpret_cast<TickSampleEventRecord*>(value);
result->filler = 1;
ASSERT(result->filler != SamplingCircularQueue::kClear);
// Init the required fields only.
result->sample.pc = NULL;
result->sample.frames_count = 0;
return result;
}
TickSample* ProfilerEventsProcessor::TickSampleEvent() {
generator_->Tick();
TickSampleEventRecord* evt =
TickSampleEventRecord::init(ticks_buffer_.Enqueue());
evt->order = enqueue_order_; // No increment!
new(ticks_buffer_.Enqueue()) TickSampleEventRecord(enqueue_order_);
return &evt->sample;
}

202
deps/v8/src/cpu-profiler.cc

@ -69,7 +69,7 @@ void ProfilerEventsProcessor::CallbackCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, prefix, name);
rec->size = 1;
rec->sfi_address = NULL;
rec->shared = NULL;
events_buffer_.Enqueue(evt_rec);
}
@ -80,7 +80,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
int line_number,
Address start,
unsigned size,
Address sfi_address) {
Address shared) {
if (FilterOutCodeCreateEvent(tag)) return;
CodeEventsContainer evt_rec;
CodeCreateEventRecord* rec = &evt_rec.CodeCreateEventRecord_;
@ -89,7 +89,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, name, resource_name, line_number);
rec->size = size;
rec->sfi_address = sfi_address;
rec->shared = shared;
events_buffer_.Enqueue(evt_rec);
}
@ -106,7 +106,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, name);
rec->size = size;
rec->sfi_address = NULL;
rec->shared = NULL;
events_buffer_.Enqueue(evt_rec);
}
@ -123,7 +123,7 @@ void ProfilerEventsProcessor::CodeCreateEvent(Logger::LogEventsAndTags tag,
rec->start = start;
rec->entry = generator_->NewCodeEntry(tag, args_count);
rec->size = size;
rec->sfi_address = NULL;
rec->shared = NULL;
events_buffer_.Enqueue(evt_rec);
}
@ -149,10 +149,12 @@ void ProfilerEventsProcessor::CodeDeleteEvent(Address from) {
}
void ProfilerEventsProcessor::SFIMoveEvent(Address from, Address to) {
void ProfilerEventsProcessor::SharedFunctionInfoMoveEvent(Address from,
Address to) {
CodeEventsContainer evt_rec;
SFIMoveEventRecord* rec = &evt_rec.SFIMoveEventRecord_;
rec->type = CodeEventRecord::SFI_MOVE;
SharedFunctionInfoMoveEventRecord* rec =
&evt_rec.SharedFunctionInfoMoveEventRecord_;
rec->type = CodeEventRecord::SHARED_FUNC_MOVE;
rec->order = ++enqueue_order_;
rec->from = from;
rec->to = to;
@ -179,18 +181,16 @@ void ProfilerEventsProcessor::RegExpCodeCreateEvent(
void ProfilerEventsProcessor::AddCurrentStack() {
TickSampleEventRecord record;
TickSampleEventRecord record(enqueue_order_);
TickSample* sample = &record.sample;
sample->state = Top::current_vm_state();
Isolate* isolate = Isolate::Current();
sample->state = isolate->current_vm_state();
sample->pc = reinterpret_cast<Address>(sample); // Not NULL.
sample->tos = NULL;
sample->frames_count = 0;
for (StackTraceFrameIterator it;
for (StackTraceFrameIterator it(isolate);
!it.done() && sample->frames_count < TickSample::kMaxFramesCount;
it.Advance()) {
sample->stack[sample->frames_count++] = it.frame()->pc();
}
record.order = enqueue_order_;
ticks_from_vm_buffer_.Enqueue(record);
}
@ -239,7 +239,7 @@ bool ProfilerEventsProcessor::ProcessTicks(unsigned dequeue_order) {
// A paranoid check to make sure that we don't get a memory overrun
// in case of frames_count having a wild value.
if (record.sample.frames_count < 0
|| record.sample.frames_count >= TickSample::kMaxFramesCount)
|| record.sample.frames_count > TickSample::kMaxFramesCount)
record.sample.frames_count = 0;
generator_->RecordTickSample(record.sample);
ticks_buffer_.FinishDequeue();
@ -270,82 +270,109 @@ void ProfilerEventsProcessor::Run() {
}
CpuProfiler* CpuProfiler::singleton_ = NULL;
Atomic32 CpuProfiler::is_profiling_ = false;
void CpuProfiler::StartProfiling(const char* title) {
ASSERT(singleton_ != NULL);
singleton_->StartCollectingProfile(title);
ASSERT(Isolate::Current()->cpu_profiler() != NULL);
Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
}
void CpuProfiler::StartProfiling(String* title) {
ASSERT(singleton_ != NULL);
singleton_->StartCollectingProfile(title);
ASSERT(Isolate::Current()->cpu_profiler() != NULL);
Isolate::Current()->cpu_profiler()->StartCollectingProfile(title);
}
CpuProfile* CpuProfiler::StopProfiling(const char* title) {
return is_profiling() ? singleton_->StopCollectingProfile(title) : NULL;
Isolate* isolate = Isolate::Current();
return is_profiling(isolate) ?
isolate->cpu_profiler()->StopCollectingProfile(title) : NULL;
}
CpuProfile* CpuProfiler::StopProfiling(Object* security_token, String* title) {
return is_profiling() ?
singleton_->StopCollectingProfile(security_token, title) : NULL;
Isolate* isolate = Isolate::Current();
return is_profiling(isolate) ?
isolate->cpu_profiler()->StopCollectingProfile(
security_token, title) : NULL;
}
int CpuProfiler::GetProfilesCount() {
ASSERT(singleton_ != NULL);
ASSERT(Isolate::Current()->cpu_profiler() != NULL);
// The count of profiles doesn't depend on a security token.
return singleton_->profiles_->Profiles(
return Isolate::Current()->cpu_profiler()->profiles_->Profiles(
TokenEnumerator::kNoSecurityToken)->length();
}
CpuProfile* CpuProfiler::GetProfile(Object* security_token, int index) {
ASSERT(singleton_ != NULL);
const int token = singleton_->token_enumerator_->GetTokenId(security_token);
return singleton_->profiles_->Profiles(token)->at(index);
ASSERT(Isolate::Current()->cpu_profiler() != NULL);
CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
const int token = profiler->token_enumerator_->GetTokenId(security_token);
return profiler->profiles_->Profiles(token)->at(index);
}
CpuProfile* CpuProfiler::FindProfile(Object* security_token, unsigned uid) {
ASSERT(singleton_ != NULL);
const int token = singleton_->token_enumerator_->GetTokenId(security_token);
return singleton_->profiles_->GetProfile(token, uid);
ASSERT(Isolate::Current()->cpu_profiler() != NULL);
CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
const int token = profiler->token_enumerator_->GetTokenId(security_token);
return profiler->profiles_->GetProfile(token, uid);
}
TickSample* CpuProfiler::TickSampleEvent() {
if (CpuProfiler::is_profiling()) {
return singleton_->processor_->TickSampleEvent();
TickSample* CpuProfiler::TickSampleEvent(Isolate* isolate) {
if (CpuProfiler::is_profiling(isolate)) {
return isolate->cpu_profiler()->processor_->TickSampleEvent();
} else {
return NULL;
}
}
void CpuProfiler::DeleteAllProfiles() {
Isolate* isolate = Isolate::Current();
ASSERT(isolate->cpu_profiler() != NULL);
if (is_profiling(isolate)) {
isolate->cpu_profiler()->StopProcessor();
}
isolate->cpu_profiler()->ResetProfiles();
}
void CpuProfiler::DeleteProfile(CpuProfile* profile) {
ASSERT(Isolate::Current()->cpu_profiler() != NULL);
Isolate::Current()->cpu_profiler()->profiles_->RemoveProfile(profile);
delete profile;
}
bool CpuProfiler::HasDetachedProfiles() {
ASSERT(Isolate::Current()->cpu_profiler() != NULL);
return Isolate::Current()->cpu_profiler()->profiles_->HasDetachedProfiles();
}
void CpuProfiler::CallbackEvent(String* name, Address entry_point) {
singleton_->processor_->CallbackCreateEvent(
Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, CodeEntry::kEmptyNamePrefix, name, entry_point);
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, const char* comment) {
singleton_->processor_->CodeCreateEvent(
Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
tag, comment, code->address(), code->ExecutableSize());
}
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, String* name) {
singleton_->processor_->CodeCreateEvent(
Isolate* isolate = Isolate::Current();
isolate->cpu_profiler()->processor_->CodeCreateEvent(
tag,
name,
Heap::empty_string(),
isolate->heap()->empty_string(),
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
code->ExecutableSize(),
@ -357,10 +384,11 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* name) {
singleton_->processor_->CodeCreateEvent(
Isolate* isolate = Isolate::Current();
isolate->cpu_profiler()->processor_->CodeCreateEvent(
tag,
name,
Heap::empty_string(),
isolate->heap()->empty_string(),
v8::CpuProfileNode::kNoLineNumberInfo,
code->address(),
code->ExecutableSize(),
@ -372,7 +400,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code,
SharedFunctionInfo* shared,
String* source, int line) {
singleton_->processor_->CodeCreateEvent(
Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
tag,
shared->DebugName(),
source,
@ -385,7 +413,7 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
Code* code, int args_count) {
singleton_->processor_->CodeCreateEvent(
Isolate::Current()->cpu_profiler()->processor_->CodeCreateEvent(
tag,
args_count,
code->address(),
@ -394,28 +422,29 @@ void CpuProfiler::CodeCreateEvent(Logger::LogEventsAndTags tag,
void CpuProfiler::CodeMoveEvent(Address from, Address to) {
singleton_->processor_->CodeMoveEvent(from, to);
Isolate::Current()->cpu_profiler()->processor_->CodeMoveEvent(from, to);
}
void CpuProfiler::CodeDeleteEvent(Address from) {
singleton_->processor_->CodeDeleteEvent(from);
Isolate::Current()->cpu_profiler()->processor_->CodeDeleteEvent(from);
}
void CpuProfiler::SFIMoveEvent(Address from, Address to) {
singleton_->processor_->SFIMoveEvent(from, to);
void CpuProfiler::SharedFunctionInfoMoveEvent(Address from, Address to) {
CpuProfiler* profiler = Isolate::Current()->cpu_profiler();
profiler->processor_->SharedFunctionInfoMoveEvent(from, to);
}
void CpuProfiler::GetterCallbackEvent(String* name, Address entry_point) {
singleton_->processor_->CallbackCreateEvent(
Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, "get ", name, entry_point);
}
void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
singleton_->processor_->RegExpCodeCreateEvent(
Isolate::Current()->cpu_profiler()->processor_->RegExpCodeCreateEvent(
Logger::REG_EXP_TAG,
"RegExp: ",
source,
@ -425,7 +454,7 @@ void CpuProfiler::RegExpCodeCreateEvent(Code* code, String* source) {
void CpuProfiler::SetterCallbackEvent(String* name, Address entry_point) {
singleton_->processor_->CallbackCreateEvent(
Isolate::Current()->cpu_profiler()->processor_->CallbackCreateEvent(
Logger::CALLBACK_TAG, "set ", name, entry_point);
}
@ -435,7 +464,9 @@ CpuProfiler::CpuProfiler()
next_profile_uid_(1),
token_enumerator_(new TokenEnumerator()),
generator_(NULL),
processor_(NULL) {
processor_(NULL),
need_to_stop_sampler_(false),
is_profiling_(false) {
}
@ -445,6 +476,11 @@ CpuProfiler::~CpuProfiler() {
}
void CpuProfiler::ResetProfiles() {
delete profiles_;
profiles_ = new CpuProfilesCollection();
}
void CpuProfiler::StartCollectingProfile(const char* title) {
if (profiles_->StartProfiling(title, next_profile_uid_++)) {
StartProcessorIfNotStarted();
@ -460,27 +496,32 @@ void CpuProfiler::StartCollectingProfile(String* title) {
void CpuProfiler::StartProcessorIfNotStarted() {
if (processor_ == NULL) {
Isolate* isolate = Isolate::Current();
// Disable logging when using the new implementation.
saved_logging_nesting_ = Logger::logging_nesting_;
Logger::logging_nesting_ = 0;
saved_logging_nesting_ = isolate->logger()->logging_nesting_;
isolate->logger()->logging_nesting_ = 0;
generator_ = new ProfileGenerator(profiles_);
processor_ = new ProfilerEventsProcessor(generator_);
NoBarrier_Store(&is_profiling_, true);
processor_->Start();
// Enumerate stuff we already have in the heap.
if (Heap::HasBeenSetup()) {
if (isolate->heap()->HasBeenSetup()) {
if (!FLAG_prof_browser_mode) {
bool saved_log_code_flag = FLAG_log_code;
FLAG_log_code = true;
Logger::LogCodeObjects();
isolate->logger()->LogCodeObjects();
FLAG_log_code = saved_log_code_flag;
}
Logger::LogCompiledFunctions();
Logger::LogAccessorCallbacks();
isolate->logger()->LogCompiledFunctions();
isolate->logger()->LogAccessorCallbacks();
}
// Enable stack sampling.
Sampler* sampler = reinterpret_cast<Sampler*>(Logger::ticker_);
if (!sampler->IsActive()) sampler->Start();
Sampler* sampler = reinterpret_cast<Sampler*>(isolate->logger()->ticker_);
if (!sampler->IsActive()) {
sampler->Start();
need_to_stop_sampler_ = true;
}
sampler->IncreaseProfilingDepth();
}
}
@ -511,19 +552,26 @@ CpuProfile* CpuProfiler::StopCollectingProfile(Object* security_token,
void CpuProfiler::StopProcessorIfLastProfile(const char* title) {
if (profiles_->IsLastProfile(title)) {
Sampler* sampler = reinterpret_cast<Sampler*>(Logger::ticker_);
sampler->DecreaseProfilingDepth();
if (profiles_->IsLastProfile(title)) StopProcessor();
}
void CpuProfiler::StopProcessor() {
Logger* logger = Isolate::Current()->logger();
Sampler* sampler = reinterpret_cast<Sampler*>(logger->ticker_);
sampler->DecreaseProfilingDepth();
if (need_to_stop_sampler_) {
sampler->Stop();
processor_->Stop();
processor_->Join();
delete processor_;
delete generator_;
processor_ = NULL;
NoBarrier_Store(&is_profiling_, false);
generator_ = NULL;
Logger::logging_nesting_ = saved_logging_nesting_;
need_to_stop_sampler_ = false;
}
processor_->Stop();
processor_->Join();
delete processor_;
delete generator_;
processor_ = NULL;
NoBarrier_Store(&is_profiling_, false);
generator_ = NULL;
logger->logging_nesting_ = saved_logging_nesting_;
}
} } // namespace v8::internal
@ -535,8 +583,9 @@ namespace internal {
void CpuProfiler::Setup() {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (singleton_ == NULL) {
singleton_ = new CpuProfiler();
Isolate* isolate = Isolate::Current();
if (isolate->cpu_profiler() == NULL) {
isolate->set_cpu_profiler(new CpuProfiler());
}
#endif
}
@ -544,10 +593,11 @@ void CpuProfiler::Setup() {
void CpuProfiler::TearDown() {
#ifdef ENABLE_LOGGING_AND_PROFILING
if (singleton_ != NULL) {
delete singleton_;
Isolate* isolate = Isolate::Current();
if (isolate->cpu_profiler() != NULL) {
delete isolate->cpu_profiler();
}
singleton_ = NULL;
isolate->set_cpu_profiler(NULL);
#endif
}

70
deps/v8/src/cpu-profiler.h

@ -30,6 +30,7 @@
#ifdef ENABLE_LOGGING_AND_PROFILING
#include "allocation.h"
#include "atomicops.h"
#include "circular-queue.h"
#include "unbound-queue.h"
@ -46,11 +47,11 @@ class HashMap;
class ProfileGenerator;
class TokenEnumerator;
#define CODE_EVENTS_TYPE_LIST(V) \
V(CODE_CREATION, CodeCreateEventRecord) \
V(CODE_MOVE, CodeMoveEventRecord) \
V(CODE_DELETE, CodeDeleteEventRecord) \
V(SFI_MOVE, SFIMoveEventRecord)
#define CODE_EVENTS_TYPE_LIST(V) \
V(CODE_CREATION, CodeCreateEventRecord) \
V(CODE_MOVE, CodeMoveEventRecord) \
V(CODE_DELETE, CodeDeleteEventRecord) \
V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord)
class CodeEventRecord {
@ -73,7 +74,7 @@ class CodeCreateEventRecord : public CodeEventRecord {
Address start;
CodeEntry* entry;
unsigned size;
Address sfi_address;
Address shared;
INLINE(void UpdateCodeMap(CodeMap* code_map));
};
@ -96,7 +97,7 @@ class CodeDeleteEventRecord : public CodeEventRecord {
};
class SFIMoveEventRecord : public CodeEventRecord {
class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
public:
Address from;
Address to;
@ -105,10 +106,14 @@ class SFIMoveEventRecord : public CodeEventRecord {
};
class TickSampleEventRecord BASE_EMBEDDED {
class TickSampleEventRecord {
public:
TickSampleEventRecord()
: filler(1) {
// The parameterless constructor is used when we dequeue data from
// the ticks buffer.
TickSampleEventRecord() { }
explicit TickSampleEventRecord(unsigned order)
: filler(1),
order(order) {
ASSERT(filler != SamplingCircularQueue::kClear);
}
@ -124,8 +129,6 @@ class TickSampleEventRecord BASE_EMBEDDED {
static TickSampleEventRecord* cast(void* value) {
return reinterpret_cast<TickSampleEventRecord*>(value);
}
INLINE(static TickSampleEventRecord* init(void* value));
};
@ -149,7 +152,7 @@ class ProfilerEventsProcessor : public Thread {
String* name,
String* resource_name, int line_number,
Address start, unsigned size,
Address sfi_address);
Address shared);
void CodeCreateEvent(Logger::LogEventsAndTags tag,
const char* name,
Address start, unsigned size);
@ -158,7 +161,7 @@ class ProfilerEventsProcessor : public Thread {
Address start, unsigned size);
void CodeMoveEvent(Address from, Address to);
void CodeDeleteEvent(Address from);
void SFIMoveEvent(Address from, Address to);
void SharedFunctionInfoMoveEvent(Address from, Address to);
void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
const char* prefix, String* name,
Address start, unsigned size);
@ -196,21 +199,23 @@ class ProfilerEventsProcessor : public Thread {
} } // namespace v8::internal
#define PROFILE(Call) \
LOG(Call); \
do { \
if (v8::internal::CpuProfiler::is_profiling()) { \
v8::internal::CpuProfiler::Call; \
} \
#define PROFILE(isolate, Call) \
LOG(isolate, Call); \
do { \
if (v8::internal::CpuProfiler::is_profiling(isolate)) { \
v8::internal::CpuProfiler::Call; \
} \
} while (false)
#else
#define PROFILE(Call) LOG(Call)
#define PROFILE(isolate, Call) LOG(isolate, Call)
#endif // ENABLE_LOGGING_AND_PROFILING
namespace v8 {
namespace internal {
// TODO(isolates): isolatify this class.
class CpuProfiler {
public:
static void Setup();
@ -224,9 +229,12 @@ class CpuProfiler {
static int GetProfilesCount();
static CpuProfile* GetProfile(Object* security_token, int index);
static CpuProfile* FindProfile(Object* security_token, unsigned uid);
static void DeleteAllProfiles();
static void DeleteProfile(CpuProfile* profile);
static bool HasDetachedProfiles();
// Invoked from stack sampler (thread or signal handler.)
static TickSample* TickSampleEvent();
static TickSample* TickSampleEvent(Isolate* isolate);
// Must be called via PROFILE macro, otherwise will crash when
// profiling is not enabled.
@ -251,10 +259,13 @@ class CpuProfiler {
static void GetterCallbackEvent(String* name, Address entry_point);
static void RegExpCodeCreateEvent(Code* code, String* source);
static void SetterCallbackEvent(String* name, Address entry_point);
static void SFIMoveEvent(Address from, Address to);
static void SharedFunctionInfoMoveEvent(Address from, Address to);
static INLINE(bool is_profiling()) {
return NoBarrier_Load(&is_profiling_);
// TODO(isolates): this doesn't have to use atomics anymore.
static INLINE(bool is_profiling(Isolate* isolate)) {
CpuProfiler* profiler = isolate->cpu_profiler();
return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
}
private:
@ -266,6 +277,8 @@ class CpuProfiler {
CpuProfile* StopCollectingProfile(const char* title);
CpuProfile* StopCollectingProfile(Object* security_token, String* title);
void StopProcessorIfLastProfile(const char* title);
void StopProcessor();
void ResetProfiles();
CpuProfilesCollection* profiles_;
unsigned next_profile_uid_;
@ -273,12 +286,11 @@ class CpuProfiler {
ProfileGenerator* generator_;
ProfilerEventsProcessor* processor_;
int saved_logging_nesting_;
static CpuProfiler* singleton_;
static Atomic32 is_profiling_;
bool need_to_stop_sampler_;
Atomic32 is_profiling_;
#else
static INLINE(bool is_profiling()) { return false; }
static INLINE(bool is_profiling(Isolate* isolate)) { return false; }
#endif // ENABLE_LOGGING_AND_PROFILING
private:

4
deps/v8/src/cpu.h

@ -36,6 +36,8 @@
#ifndef V8_CPU_H_
#define V8_CPU_H_
#include "allocation.h"
namespace v8 {
namespace internal {
@ -53,6 +55,8 @@ class CPU : public AllStatic {
// Initializes the cpu architecture support. Called once at VM startup.
static void Setup();
static bool SupportsCrankshaft();
// Flush instruction cache.
static void FlushICache(void* start, size_t size);

2
deps/v8/src/d8-debug.cc

@ -272,6 +272,7 @@ RemoteDebuggerEvent* RemoteDebugger::GetEvent() {
void RemoteDebugger::HandleMessageReceived(char* message) {
Locker lock;
HandleScope scope;
// Print the event details.
@ -300,6 +301,7 @@ void RemoteDebugger::HandleMessageReceived(char* message) {
void RemoteDebugger::HandleKeyboardCommand(char* command) {
Locker lock;
HandleScope scope;
// Convert the debugger command to a JSON debugger request.

13
deps/v8/src/d8-posix.cc

@ -311,10 +311,6 @@ static Handle<Value> GetStdout(int child_fd,
int read_timeout,
int total_timeout) {
Handle<String> accumulator = String::Empty();
const char* source = "(function(a, b) { return a + b; })";
Handle<Value> cons_as_obj(Script::Compile(String::New(source))->Run());
Handle<Function> cons_function(Function::Cast(*cons_as_obj));
Handle<Value> cons_args[2];
int fullness = 0;
static const int kStdoutReadBufferSize = 4096;
@ -350,12 +346,7 @@ static Handle<Value> GetStdout(int child_fd,
bytes_read + fullness :
LengthWithoutIncompleteUtf8(buffer, bytes_read + fullness);
Handle<String> addition = String::New(buffer, length);
cons_args[0] = accumulator;
cons_args[1] = addition;
accumulator = Handle<String>::Cast(cons_function->Call(
Shell::utility_context()->Global(),
2,
cons_args));
accumulator = String::Concat(accumulator, addition);
fullness = bytes_read + fullness - length;
memcpy(buffer, buffer + length, fullness);
}
@ -375,8 +366,10 @@ static Handle<Value> GetStdout(int child_fd,
// a parent process hangs on waiting while a child process is already a zombie.
// See http://code.google.com/p/v8/issues/detail?id=401.
#if defined(WNOWAIT) && !defined(ANDROID) && !defined(__APPLE__)
#if !defined(__FreeBSD__)
#define HAS_WAITID 1
#endif
#endif
// Get exit status of child.

2
deps/v8/src/d8-readline.cc

@ -30,6 +30,8 @@
#include <readline/readline.h> // NOLINT
#include <readline/history.h> // NOLINT
// The readline includes leaves RETURN defined which breaks V8 compilation.
#undef RETURN
#include "d8.h"

443
deps/v8/src/d8.cc

@ -1,4 +1,4 @@
// Copyright 2009 the V8 project authors. All rights reserved.
// Copyright 2011 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
@ -26,8 +26,13 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#include <stdlib.h>
#ifdef COMPRESS_STARTUP_DATA_BZ2
#include <bzlib.h>
#endif
#include <errno.h>
#include <stdlib.h>
#include "v8.h"
#include "d8.h"
#include "d8-debug.h"
@ -216,6 +221,103 @@ Handle<Value> Shell::Load(const Arguments& args) {
}
Handle<Value> Shell::CreateExternalArray(const Arguments& args,
ExternalArrayType type,
size_t element_size) {
ASSERT(element_size == 1 || element_size == 2 || element_size == 4 ||
element_size == 8);
if (args.Length() != 1) {
return ThrowException(
String::New("Array constructor needs one parameter."));
}
size_t length = 0;
if (args[0]->IsUint32()) {
length = args[0]->Uint32Value();
} else if (args[0]->IsNumber()) {
double raw_length = args[0]->NumberValue();
if (raw_length < 0) {
return ThrowException(String::New("Array length must not be negative."));
}
if (raw_length > v8::internal::ExternalArray::kMaxLength) {
return ThrowException(
String::New("Array length exceeds maximum length."));
}
length = static_cast<size_t>(raw_length);
} else {
return ThrowException(String::New("Array length must be a number."));
}
if (length > static_cast<size_t>(internal::ExternalArray::kMaxLength)) {
return ThrowException(String::New("Array length exceeds maximum length."));
}
void* data = calloc(length, element_size);
if (data == NULL) {
return ThrowException(String::New("Memory allocation failed."));
}
Handle<Object> array = Object::New();
Persistent<Object> persistent_array = Persistent<Object>::New(array);
persistent_array.MakeWeak(data, ExternalArrayWeakCallback);
persistent_array.MarkIndependent();
array->SetIndexedPropertiesToExternalArrayData(data, type, length);
array->Set(String::New("length"), Int32::New(length), ReadOnly);
array->Set(String::New("BYTES_PER_ELEMENT"), Int32::New(element_size));
return array;
}
void Shell::ExternalArrayWeakCallback(Persistent<Value> object, void* data) {
free(data);
object.Dispose();
}
Handle<Value> Shell::Int8Array(const Arguments& args) {
return CreateExternalArray(args, v8::kExternalByteArray, sizeof(int8_t));
}
Handle<Value> Shell::Uint8Array(const Arguments& args) {
return CreateExternalArray(args, kExternalUnsignedByteArray, sizeof(uint8_t));
}
Handle<Value> Shell::Int16Array(const Arguments& args) {
return CreateExternalArray(args, kExternalShortArray, sizeof(int16_t));
}
Handle<Value> Shell::Uint16Array(const Arguments& args) {
return CreateExternalArray(args, kExternalUnsignedShortArray,
sizeof(uint16_t));
}
Handle<Value> Shell::Int32Array(const Arguments& args) {
return CreateExternalArray(args, kExternalIntArray, sizeof(int32_t));
}
Handle<Value> Shell::Uint32Array(const Arguments& args) {
return CreateExternalArray(args, kExternalUnsignedIntArray, sizeof(uint32_t));
}
Handle<Value> Shell::Float32Array(const Arguments& args) {
return CreateExternalArray(args, kExternalFloatArray,
sizeof(float)); // NOLINT
}
Handle<Value> Shell::Float64Array(const Arguments& args) {
return CreateExternalArray(args, kExternalDoubleArray,
sizeof(double)); // NOLINT
}
Handle<Value> Shell::PixelArray(const Arguments& args) {
return CreateExternalArray(args, kExternalPixelArray, sizeof(uint8_t));
}
Handle<Value> Shell::Yield(const Arguments& args) {
v8::Unlocker unlocker;
return Undefined();
@ -264,6 +366,11 @@ void Shell::ReportException(v8::TryCatch* try_catch) {
printf("^");
}
printf("\n");
v8::String::Utf8Value stack_trace(try_catch->StackTrace());
if (stack_trace.length() > 0) {
const char* stack_trace_string = ToCString(stack_trace);
printf("%s\n", stack_trace_string);
}
}
}
@ -401,20 +508,79 @@ void Shell::AddHistogramSample(void* histogram, int sample) {
counter->AddSample(sample);
}
void Shell::InstallUtilityScript() {
Locker lock;
HandleScope scope;
// If we use the utility context, we have to set the security tokens so that
// utility, evaluation and debug context can all access each other.
utility_context_->SetSecurityToken(Undefined());
evaluation_context_->SetSecurityToken(Undefined());
Context::Scope utility_scope(utility_context_);
void Shell::Initialize() {
Shell::counter_map_ = new CounterMap();
// Set up counters
if (i::StrLength(i::FLAG_map_counters) != 0)
MapCounters(i::FLAG_map_counters);
if (i::FLAG_dump_counters) {
V8::SetCounterFunction(LookupCounter);
V8::SetCreateHistogramFunction(CreateHistogram);
V8::SetAddHistogramSampleFunction(AddHistogramSample);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Install the debugger object in the utility scope
i::Debug* debug = i::Isolate::Current()->debug();
debug->Load();
i::Handle<i::JSObject> js_debug
= i::Handle<i::JSObject>(debug->debug_context()->global());
utility_context_->Global()->Set(String::New("$debug"),
Utils::ToLocal(js_debug));
debug->debug_context()->set_security_token(HEAP->undefined_value());
#endif
// Run the d8 shell utility script in the utility context
int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
i::Vector<const char> shell_source =
i::NativesCollection<i::D8>::GetRawScriptSource(source_index);
i::Vector<const char> shell_source_name =
i::NativesCollection<i::D8>::GetScriptName(source_index);
Handle<String> source = String::New(shell_source.start(),
shell_source.length());
Handle<String> name = String::New(shell_source_name.start(),
shell_source_name.length());
Handle<Script> script = Script::Compile(source, name);
script->Run();
// Mark the d8 shell script as native to avoid it showing up as normal source
// in the debugger.
i::Handle<i::Object> compiled_script = Utils::OpenHandle(*script);
i::Handle<i::Script> script_object = compiled_script->IsJSFunction()
? i::Handle<i::Script>(i::Script::cast(
i::JSFunction::cast(*compiled_script)->shared()->script()))
: i::Handle<i::Script>(i::Script::cast(
i::SharedFunctionInfo::cast(*compiled_script)->script()));
script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
}
#ifdef COMPRESS_STARTUP_DATA_BZ2
class BZip2Decompressor : public v8::StartupDataDecompressor {
public:
virtual ~BZip2Decompressor() { }
protected:
virtual int DecompressData(char* raw_data,
int* raw_data_size,
const char* compressed_data,
int compressed_data_size) {
ASSERT_EQ(v8::StartupData::kBZip2,
v8::V8::GetCompressedStartupDataAlgorithm());
unsigned int decompressed_size = *raw_data_size;
int result =
BZ2_bzBuffToBuffDecompress(raw_data,
&decompressed_size,
const_cast<char*>(compressed_data),
compressed_data_size,
0, 1);
if (result == BZ_OK) {
*raw_data_size = decompressed_size;
}
return result;
}
};
#endif
// Initialize the global objects
HandleScope scope;
Handle<ObjectTemplate> Shell::CreateGlobalTemplate() {
Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("write"), FunctionTemplate::New(Write));
@ -425,6 +591,26 @@ void Shell::Initialize() {
global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
global_template->Set(String::New("version"), FunctionTemplate::New(Version));
// Bind the handlers for external arrays.
global_template->Set(String::New("Int8Array"),
FunctionTemplate::New(Int8Array));
global_template->Set(String::New("Uint8Array"),
FunctionTemplate::New(Uint8Array));
global_template->Set(String::New("Int16Array"),
FunctionTemplate::New(Int16Array));
global_template->Set(String::New("Uint16Array"),
FunctionTemplate::New(Uint16Array));
global_template->Set(String::New("Int32Array"),
FunctionTemplate::New(Int32Array));
global_template->Set(String::New("Uint32Array"),
FunctionTemplate::New(Uint32Array));
global_template->Set(String::New("Float32Array"),
FunctionTemplate::New(Float32Array));
global_template->Set(String::New("Float64Array"),
FunctionTemplate::New(Float64Array));
global_template->Set(String::New("PixelArray"),
FunctionTemplate::New(PixelArray));
#ifdef LIVE_OBJECT_LIST
global_template->Set(String::New("lol_is_enabled"), Boolean::New(true));
#else
@ -435,63 +621,38 @@ void Shell::Initialize() {
AddOSMethods(os_templ);
global_template->Set(String::New("os"), os_templ);
utility_context_ = Context::New(NULL, global_template);
utility_context_->SetSecurityToken(Undefined());
Context::Scope utility_scope(utility_context_);
return global_template;
}
i::JSArguments js_args = i::FLAG_js_arguments;
i::Handle<i::FixedArray> arguments_array =
i::Factory::NewFixedArray(js_args.argc());
for (int j = 0; j < js_args.argc(); j++) {
i::Handle<i::String> arg =
i::Factory::NewStringFromUtf8(i::CStrVector(js_args[j]));
arguments_array->set(j, *arg);
}
i::Handle<i::JSArray> arguments_jsarray =
i::Factory::NewJSArrayWithElements(arguments_array);
global_template->Set(String::New("arguments"),
Utils::ToLocal(arguments_jsarray));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Install the debugger object in the utility scope
i::Debug::Load();
i::Handle<i::JSObject> debug
= i::Handle<i::JSObject>(i::Debug::debug_context()->global());
utility_context_->Global()->Set(String::New("$debug"),
Utils::ToLocal(debug));
void Shell::Initialize(bool test_shell) {
#ifdef COMPRESS_STARTUP_DATA_BZ2
BZip2Decompressor startup_data_decompressor;
int bz2_result = startup_data_decompressor.Decompress();
if (bz2_result != BZ_OK) {
fprintf(stderr, "bzip error code: %d\n", bz2_result);
exit(1);
}
#endif
// Run the d8 shell utility script in the utility context
int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
i::Vector<const char> shell_source
= i::NativesCollection<i::D8>::GetScriptSource(source_index);
i::Vector<const char> shell_source_name
= i::NativesCollection<i::D8>::GetScriptName(source_index);
Handle<String> source = String::New(shell_source.start(),
shell_source.length());
Handle<String> name = String::New(shell_source_name.start(),
shell_source_name.length());
Handle<Script> script = Script::Compile(source, name);
script->Run();
Shell::counter_map_ = new CounterMap();
// Set up counters
if (i::StrLength(i::FLAG_map_counters) != 0)
MapCounters(i::FLAG_map_counters);
if (i::FLAG_dump_counters) {
V8::SetCounterFunction(LookupCounter);
V8::SetCreateHistogramFunction(CreateHistogram);
V8::SetAddHistogramSampleFunction(AddHistogramSample);
}
// Mark the d8 shell script as native to avoid it showing up as normal source
// in the debugger.
i::Handle<i::Object> compiled_script = Utils::OpenHandle(*script);
i::Handle<i::Script> script_object = compiled_script->IsJSFunction()
? i::Handle<i::Script>(i::Script::cast(
i::JSFunction::cast(*compiled_script)->shared()->script()))
: i::Handle<i::Script>(i::Script::cast(
i::SharedFunctionInfo::cast(*compiled_script)->script()));
script_object->set_type(i::Smi::FromInt(i::Script::TYPE_NATIVE));
if (test_shell) return;
// Create the evaluation context
evaluation_context_ = Context::New(NULL, global_template);
evaluation_context_->SetSecurityToken(Undefined());
Locker lock;
HandleScope scope;
Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
utility_context_ = Context::New(NULL, global_template);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Set the security token of the debug context to allow access.
i::Debug::debug_context()->set_security_token(i::Heap::undefined_value());
// Start the debugger agent if requested.
if (i::FLAG_debugger_agent) {
v8::Debug::EnableAgent("d8 shell", i::FLAG_debugger_port, true);
@ -505,6 +666,33 @@ void Shell::Initialize() {
}
void Shell::RenewEvaluationContext() {
// Initialize the global objects
HandleScope scope;
Handle<ObjectTemplate> global_template = CreateGlobalTemplate();
// (Re-)create the evaluation context
if (!evaluation_context_.IsEmpty()) {
evaluation_context_.Dispose();
}
evaluation_context_ = Context::New(NULL, global_template);
Context::Scope utility_scope(evaluation_context_);
i::JSArguments js_args = i::FLAG_js_arguments;
i::Handle<i::FixedArray> arguments_array =
FACTORY->NewFixedArray(js_args.argc());
for (int j = 0; j < js_args.argc(); j++) {
i::Handle<i::String> arg =
FACTORY->NewStringFromUtf8(i::CStrVector(js_args[j]));
arguments_array->set(j, *arg);
}
i::Handle<i::JSArray> arguments_jsarray =
FACTORY->NewJSArrayWithElements(arguments_array);
evaluation_context_->Global()->Set(String::New("arguments"),
Utils::ToLocal(arguments_jsarray));
}
void Shell::OnExit() {
if (i::FLAG_dump_counters) {
::printf("+----------------------------------------+-------------+\n");
@ -587,6 +775,7 @@ void Shell::RunShell() {
if (i::FLAG_debugger) {
printf("JavaScript debugger enabled\n");
}
editor->Open();
while (true) {
Locker locker;
@ -620,21 +809,7 @@ void ShellThread::Run() {
// Prepare the context for this thread.
Locker locker;
HandleScope scope;
Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"),
FunctionTemplate::New(Shell::Print));
global_template->Set(String::New("write"),
FunctionTemplate::New(Shell::Write));
global_template->Set(String::New("read"),
FunctionTemplate::New(Shell::Read));
global_template->Set(String::New("readline"),
FunctionTemplate::New(Shell::ReadLine));
global_template->Set(String::New("load"),
FunctionTemplate::New(Shell::Load));
global_template->Set(String::New("yield"),
FunctionTemplate::New(Shell::Yield));
global_template->Set(String::New("version"),
FunctionTemplate::New(Shell::Version));
Handle<ObjectTemplate> global_template = Shell::CreateGlobalTemplate();
char* ptr = const_cast<char*>(files_.start());
while ((ptr != NULL) && (*ptr != '\0')) {
@ -648,7 +823,6 @@ void ShellThread::Run() {
}
Persistent<Context> thread_context = Context::New(NULL, global_template);
thread_context->SetSecurityToken(Undefined());
Context::Scope context_scope(thread_context);
while ((ptr != NULL) && (*ptr != '\0')) {
@ -674,15 +848,7 @@ void ShellThread::Run() {
}
}
int Shell::Main(int argc, char* argv[]) {
i::FlagList::SetFlagsFromCommandLine(&argc, argv, true);
if (i::FLAG_help) {
return 1;
}
Initialize();
bool run_shell = (argc == 1);
int Shell::RunMain(int argc, char* argv[], bool* executed) {
// Default use preemption if threads are created.
bool use_preemption = true;
@ -693,16 +859,14 @@ int Shell::Main(int argc, char* argv[]) {
i::List<i::Thread*> threads(1);
{
// Acquire the V8 lock once initialization has finished. Since the thread
// below may spawn new threads accessing V8 holding the V8 lock here is
// mandatory.
// Since the thread below may spawn new threads accessing V8 holding the
// V8 lock here is mandatory.
Locker locker;
RenewEvaluationContext();
Context::Scope context_scope(evaluation_context_);
for (int i = 1; i < argc; i++) {
char* str = argv[i];
if (strcmp(str, "--shell") == 0) {
run_shell = true;
} else if (strcmp(str, "--preemption") == 0) {
if (strcmp(str, "--preemption") == 0) {
use_preemption = true;
} else if (strcmp(str, "--no-preemption") == 0) {
use_preemption = false;
@ -717,7 +881,7 @@ int Shell::Main(int argc, char* argv[]) {
} else {
printf("Missing value for --preemption-interval\n");
return 1;
}
}
} else if (strcmp(str, "-f") == 0) {
// Ignore any -f flags for compatibility with other stand-alone
// JavaScript engines.
@ -728,12 +892,12 @@ int Shell::Main(int argc, char* argv[]) {
// Execute argument given to -e option directly.
v8::HandleScope handle_scope;
v8::Handle<v8::String> file_name = v8::String::New("unnamed");
v8::Handle<v8::String> source = v8::String::New(argv[i + 1]);
v8::Handle<v8::String> source = v8::String::New(argv[++i]);
(*executed) = true;
if (!ExecuteString(source, file_name, false, true)) {
OnExit();
return 1;
}
i++;
} else if (strcmp(str, "-p") == 0 && i + 1 < argc) {
int size = 0;
const char* files = ReadChars(argv[++i], &size);
@ -743,11 +907,13 @@ int Shell::Main(int argc, char* argv[]) {
i::Vector<const char>(files, size));
thread->Start();
threads.Add(thread);
(*executed) = true;
} else {
// Use all other arguments as names of files to load and run.
HandleScope handle_scope;
Handle<String> file_name = v8::String::New(str);
Handle<String> source = ReadFile(str);
(*executed) = true;
if (source.IsEmpty()) {
printf("Error reading '%s'\n", str);
return 1;
@ -763,17 +929,8 @@ int Shell::Main(int argc, char* argv[]) {
if (threads.length() > 0 && use_preemption) {
Locker::StartPreemption(preemption_interval);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
// Run the remote debugger if requested.
if (i::FLAG_remote_debugger) {
RunRemoteDebugger(i::FLAG_debugger_port);
return 0;
}
#endif
}
if (run_shell)
RunShell();
for (int i = 0; i < threads.length(); i++) {
i::Thread* thread = threads[i];
thread->Join();
@ -784,9 +941,83 @@ int Shell::Main(int argc, char* argv[]) {
}
int Shell::Main(int argc, char* argv[]) {
// Figure out if we're requested to stress the optimization
// infrastructure by running tests multiple times and forcing
// optimization in the last run.
bool FLAG_stress_opt = false;
bool FLAG_stress_deopt = false;
bool FLAG_interactive_shell = false;
bool FLAG_test_shell = false;
bool script_executed = false;
for (int i = 0; i < argc; i++) {
if (strcmp(argv[i], "--stress-opt") == 0) {
FLAG_stress_opt = true;
argv[i] = NULL;
} else if (strcmp(argv[i], "--stress-deopt") == 0) {
FLAG_stress_deopt = true;
argv[i] = NULL;
} else if (strcmp(argv[i], "--noalways-opt") == 0) {
// No support for stressing if we can't use --always-opt.
FLAG_stress_opt = false;
FLAG_stress_deopt = false;
} else if (strcmp(argv[i], "--shell") == 0) {
FLAG_interactive_shell = true;
argv[i] = NULL;
} else if (strcmp(argv[i], "--test") == 0) {
FLAG_test_shell = true;
argv[i] = NULL;
}
}
v8::V8::SetFlagsFromCommandLine(&argc, argv, true);
Initialize(FLAG_test_shell);
int result = 0;
if (FLAG_stress_opt || FLAG_stress_deopt) {
v8::Testing::SetStressRunType(
FLAG_stress_opt ? v8::Testing::kStressTypeOpt
: v8::Testing::kStressTypeDeopt);
int stress_runs = v8::Testing::GetStressRuns();
for (int i = 0; i < stress_runs && result == 0; i++) {
printf("============ Stress %d/%d ============\n", i + 1, stress_runs);
v8::Testing::PrepareStressRun(i);
result = RunMain(argc, argv, &script_executed);
}
printf("======== Full Deoptimization =======\n");
v8::Testing::DeoptimizeAll();
} else {
result = RunMain(argc, argv, &script_executed);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
// Run remote debugger if requested, but never on --test
if (i::FLAG_remote_debugger && !FLAG_test_shell) {
InstallUtilityScript();
RunRemoteDebugger(i::FLAG_debugger_port);
return 0;
}
#endif
// Run interactive shell if explicitly requested or if no script has been
// executed, but never on --test
if ((FLAG_interactive_shell || !script_executed) && !FLAG_test_shell) {
InstallUtilityScript();
RunShell();
}
v8::V8::Dispose();
return result;
}
} // namespace v8
#ifndef GOOGLE3
int main(int argc, char* argv[]) {
return v8::Shell::Main(argc, argv);
}
#endif

8
deps/v8/src/d8.gyp

@ -38,7 +38,10 @@
'../src',
],
'defines': [
'ENABLE_LOGGING_AND_PROFILING',
'ENABLE_DEBUGGER_SUPPORT',
'ENABLE_VMSTATE_TRACKING',
'V8_FAST_TLS',
],
'sources': [
'd8.cc',
@ -49,6 +52,9 @@
[ 'OS=="linux" or OS=="mac" or OS=="freebsd" or OS=="openbsd" or OS=="solaris"', {
'sources': [ 'd8-posix.cc', ]
}],
[ 'OS=="win"', {
'sources': [ 'd8-windows.cc', ]
}],
],
},
{
@ -58,6 +64,7 @@
'variables': {
'js_files': [
'd8.js',
'macros.py',
],
},
'actions': [
@ -69,7 +76,6 @@
],
'outputs': [
'<(SHARED_INTERMEDIATE_DIR)/d8-js.cc',
'<(SHARED_INTERMEDIATE_DIR)/d8-js-empty.cc',
],
'action': [
'python',

26
deps/v8/src/d8.h

@ -28,10 +28,10 @@
#ifndef V8_D8_H_
#define V8_D8_H_
#include "allocation.h"
#include "v8.h"
#include "hashmap.h"
namespace v8 {
@ -104,6 +104,7 @@ class CounterMap {
i::HashMap* map_;
i::HashMap::Entry* entry_;
};
private:
static int Hash(const char* name);
static bool Match(void* key1, void* key2);
@ -119,7 +120,6 @@ class Shell: public i::AllStatic {
bool report_exceptions);
static const char* ToCString(const v8::String::Utf8Value& value);
static void ReportException(TryCatch* try_catch);
static void Initialize();
static void OnExit();
static int* LookupCounter(const char* name);
static void* CreateHistogram(const char* name,
@ -129,8 +129,14 @@ class Shell: public i::AllStatic {
static void AddHistogramSample(void* histogram, int sample);
static void MapCounters(const char* name);
static Handle<String> ReadFile(const char* name);
static void Initialize(bool test_shell);
static void RenewEvaluationContext();
static void InstallUtilityScript();
static void RunShell();
static int RunScript(char* filename);
static int RunMain(int argc, char* argv[], bool* executed);
static int Main(int argc, char* argv[]);
static Handle<ObjectTemplate> CreateGlobalTemplate();
static Handle<Array> GetCompletions(Handle<String> text,
Handle<String> full);
#ifdef ENABLE_DEBUGGER_SUPPORT
@ -150,6 +156,15 @@ class Shell: public i::AllStatic {
static Handle<Value> Read(const Arguments& args);
static Handle<Value> ReadLine(const Arguments& args);
static Handle<Value> Load(const Arguments& args);
static Handle<Value> Int8Array(const Arguments& args);
static Handle<Value> Uint8Array(const Arguments& args);
static Handle<Value> Int16Array(const Arguments& args);
static Handle<Value> Uint16Array(const Arguments& args);
static Handle<Value> Int32Array(const Arguments& args);
static Handle<Value> Uint32Array(const Arguments& args);
static Handle<Value> Float32Array(const Arguments& args);
static Handle<Value> Float64Array(const Arguments& args);
static Handle<Value> PixelArray(const Arguments& args);
// The OS object on the global object contains methods for performing
// operating system calls:
//
@ -187,10 +202,9 @@ class Shell: public i::AllStatic {
static void AddOSMethods(Handle<ObjectTemplate> os_template);
static Handle<Context> utility_context() { return utility_context_; }
static const char* kHistoryFileName;
static const char* kPrompt;
private:
static Persistent<Context> utility_context_;
static Persistent<Context> evaluation_context_;
@ -201,6 +215,10 @@ class Shell: public i::AllStatic {
static CounterCollection* counters_;
static i::OS::MemoryMappedFile* counters_file_;
static Counter* GetCounter(const char* name, bool is_histogram);
static Handle<Value> CreateExternalArray(const Arguments& args,
ExternalArrayType type,
size_t element_size);
static void ExternalArrayWeakCallback(Persistent<Value> object, void* data);
};

10
deps/v8/src/d8.js

@ -977,9 +977,14 @@ DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) {
// specification it is considered a function break point.
pos = target.indexOf(':');
if (pos > 0) {
type = 'script';
var tmp = target.substring(pos + 1, target.length);
target = target.substring(0, pos);
if (target[0] == '/' && target[target.length - 1] == '/') {
type = 'scriptRegExp';
target = target.substring(1, target.length - 1);
} else {
type = 'script';
}
// Check for both line and column.
pos = tmp.indexOf(':');
@ -1984,6 +1989,9 @@ function DebugResponseDetails(response) {
if (breakpoint.script_name) {
result += ' script_name=' + breakpoint.script_name;
}
if (breakpoint.script_regexp) {
result += ' script_regexp=' + breakpoint.script_regexp;
}
result += ' line=' + (breakpoint.line + 1);
if (breakpoint.column != null) {
result += ' column=' + (breakpoint.column + 1);

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save