Browse Source

Upgrade v8 to version 1.2.3.

v0.7.4-release
Ryan 16 years ago
parent
commit
3a41367c40
  1. 58
      deps/v8/ChangeLog
  2. 200
      deps/v8/SConstruct
  3. 126
      deps/v8/include/v8-debug.h
  4. 73
      deps/v8/include/v8.h
  5. 22
      deps/v8/samples/shell.cc
  6. 38
      deps/v8/src/SConscript
  7. 37
      deps/v8/src/accessors.cc
  8. 4
      deps/v8/src/accessors.h
  9. 173
      deps/v8/src/api.cc
  10. 8
      deps/v8/src/arm/assembler-arm-inl.h
  11. 2
      deps/v8/src/arm/assembler-arm.cc
  12. 40
      deps/v8/src/arm/assembler-arm.h
  13. 63
      deps/v8/src/arm/builtins-arm.cc
  14. 514
      deps/v8/src/arm/codegen-arm.cc
  15. 28
      deps/v8/src/arm/codegen-arm.h
  16. 13
      deps/v8/src/arm/constants-arm.h
  17. 0
      deps/v8/src/arm/cpu-arm.cc
  18. 5
      deps/v8/src/arm/debug-arm.cc
  19. 9
      deps/v8/src/arm/disasm-arm.cc
  20. 2
      deps/v8/src/arm/frames-arm.cc
  21. 6
      deps/v8/src/arm/frames-arm.h
  22. 51
      deps/v8/src/arm/ic-arm.cc
  23. 2
      deps/v8/src/arm/jump-target-arm.cc
  24. 21
      deps/v8/src/arm/macro-assembler-arm.cc
  25. 20
      deps/v8/src/arm/macro-assembler-arm.h
  26. 2
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  27. 6
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  28. 8
      deps/v8/src/arm/register-allocator-arm.cc
  29. 95
      deps/v8/src/arm/simulator-arm.cc
  30. 14
      deps/v8/src/arm/simulator-arm.h
  31. 15
      deps/v8/src/arm/stub-cache-arm.cc
  32. 13
      deps/v8/src/arm/virtual-frame-arm.cc
  33. 6
      deps/v8/src/arm/virtual-frame-arm.h
  34. 165
      deps/v8/src/array.js
  35. 56
      deps/v8/src/assembler.cc
  36. 37
      deps/v8/src/assembler.h
  37. 21
      deps/v8/src/ast.cc
  38. 7
      deps/v8/src/ast.h
  39. 64
      deps/v8/src/bootstrapper.cc
  40. 3
      deps/v8/src/builtins.cc
  41. 5
      deps/v8/src/builtins.h
  42. 2
      deps/v8/src/checks.h
  43. 2
      deps/v8/src/code-stubs.h
  44. 86
      deps/v8/src/codegen.cc
  45. 20
      deps/v8/src/codegen.h
  46. 43
      deps/v8/src/compiler.cc
  47. 3
      deps/v8/src/compiler.h
  48. 6
      deps/v8/src/contexts.h
  49. 27
      deps/v8/src/d8.cc
  50. 3
      deps/v8/src/d8.h
  51. 37
      deps/v8/src/d8.js
  52. 25
      deps/v8/src/date-delay.js
  53. 5
      deps/v8/src/dateparser-inl.h
  54. 21
      deps/v8/src/debug-agent.cc
  55. 28
      deps/v8/src/debug-agent.h
  56. 71
      deps/v8/src/debug-delay.js
  57. 489
      deps/v8/src/debug.cc
  58. 188
      deps/v8/src/debug.h
  59. 26
      deps/v8/src/execution.cc
  60. 11
      deps/v8/src/execution.h
  61. 13
      deps/v8/src/factory.cc
  62. 5
      deps/v8/src/factory.h
  63. 12
      deps/v8/src/frames-inl.h
  64. 4
      deps/v8/src/frames.cc
  65. 9
      deps/v8/src/func-name-inferrer.cc
  66. 23
      deps/v8/src/func-name-inferrer.h
  67. 2
      deps/v8/src/global-handles.cc
  68. 5
      deps/v8/src/global-handles.h
  69. 110
      deps/v8/src/globals.h
  70. 66
      deps/v8/src/handles.cc
  71. 9
      deps/v8/src/handles.h
  72. 4
      deps/v8/src/heap-inl.h
  73. 151
      deps/v8/src/heap.cc
  74. 6
      deps/v8/src/ia32/assembler-ia32-inl.h
  75. 41
      deps/v8/src/ia32/assembler-ia32.cc
  76. 6
      deps/v8/src/ia32/assembler-ia32.h
  77. 28
      deps/v8/src/ia32/builtins-ia32.cc
  78. 216
      deps/v8/src/ia32/codegen-ia32.cc
  79. 633
      deps/v8/src/ia32/codegen-ia32.h
  80. 0
      deps/v8/src/ia32/cpu-ia32.cc
  81. 4
      deps/v8/src/ia32/debug-ia32.cc
  82. 0
      deps/v8/src/ia32/disasm-ia32.cc
  83. 0
      deps/v8/src/ia32/frames-ia32.cc
  84. 6
      deps/v8/src/ia32/frames-ia32.h
  85. 112
      deps/v8/src/ia32/ic-ia32.cc
  86. 12
      deps/v8/src/ia32/jump-target-ia32.cc
  87. 11
      deps/v8/src/ia32/macro-assembler-ia32.cc
  88. 371
      deps/v8/src/ia32/macro-assembler-ia32.h
  89. 16
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  90. 6
      deps/v8/src/ia32/regexp-macro-assembler-ia32.h
  91. 6
      deps/v8/src/ia32/register-allocator-ia32.cc
  92. 0
      deps/v8/src/ia32/simulator-ia32.cc
  93. 6
      deps/v8/src/ia32/simulator-ia32.h
  94. 16
      deps/v8/src/ia32/stub-cache-ia32.cc
  95. 156
      deps/v8/src/ia32/virtual-frame-ia32.cc
  96. 6
      deps/v8/src/ia32/virtual-frame-ia32.h
  97. 4
      deps/v8/src/ic-inl.h
  98. 37
      deps/v8/src/ic.cc
  99. 18
      deps/v8/src/ic.h
  100. 6
      deps/v8/src/interpreter-irregexp.cc

58
deps/v8/ChangeLog

@ -1,3 +1,61 @@
2009-05-11: Version 1.2.3
Fixed bug in reporting of out-of-memory situations.
Introduced hidden prototypes on certain builtin prototype objects
such as String.prototype to emulate JSC's behavior of restoring
the original function when deleting functions from those prototype
objects.
Fixed crash bug in the register allocator.
2009-05-04: Version 1.2.2
Fixed bug in array sorting for sparse arrays (issue 326).
Added support for adding a soname when building a shared library
on Linux (issue 151).
Fixed bug caused by morphing internal ASCII strings to external
two-byte strings. Slices over ASCII strings have to forward ASCII
checks to the underlying buffer string.
Allowed API call-as-function handlers to be called as
constructors.
Fixed a crash bug where an external string was disposed but a
slice of the external string survived as a symbol.
2009-04-27: Version 1.2.1
Added EcmaScript 5 JSON object.
Fix bug in preemption support on ARM.
2009-04-23: Version 1.2.0
Optimized floating-point operations on ARM.
Added a number of extensions to the debugger API.
Changed the enumeration order for unsigned integer keys to always
be numerical order.
Added a "read" extension to the shell sample.
Added support for Array.prototype.reduce and
Array.prototype.reduceRight.
Added an option to the SCons build to control Microsoft Visual C++
link-time code generation.
Fixed a number of bugs (in particular issue 315, issue 316,
issue 317 and issue 318).
2009-04-15: Version 1.1.10 2009-04-15: Version 1.1.10
Fixed crash bug that occurred when loading a const variable in the Fixed crash bug that occurred when loading a const variable in the

200
deps/v8/SConstruct

@ -35,6 +35,7 @@ root_dir = dirname(File('SConstruct').rfile().abspath)
sys.path.append(join(root_dir, 'tools')) sys.path.append(join(root_dir, 'tools'))
import js2c, utils import js2c, utils
# ANDROID_TOP is the top of the Android checkout, fetched from the environment # ANDROID_TOP is the top of the Android checkout, fetched from the environment
# variable 'TOP'. You will also need to set the CXX, CC, AR and RANLIB # variable 'TOP'. You will also need to set the CXX, CC, AR and RANLIB
# environment variables to the cross-compiling tools. # environment variables to the cross-compiling tools.
@ -83,7 +84,8 @@ ANDROID_LINKFLAGS = ['-nostdlib',
LIBRARY_FLAGS = { LIBRARY_FLAGS = {
'all': { 'all': {
'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'] 'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
'CPPPATH': [join(root_dir, 'src')]
}, },
'gcc': { 'gcc': {
'all': { 'all': {
@ -94,6 +96,7 @@ LIBRARY_FLAGS = {
'CCFLAGS': ['-g', '-O0'], 'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG'], 'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG'],
'os:android': { 'os:android': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT'],
'CCFLAGS': ['-mthumb'] 'CCFLAGS': ['-mthumb']
} }
}, },
@ -102,13 +105,13 @@ LIBRARY_FLAGS = {
'-ffunction-sections'], '-ffunction-sections'],
'os:android': { 'os:android': {
'CCFLAGS': ['-mthumb', '-Os'], 'CCFLAGS': ['-mthumb', '-Os'],
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG'] 'CPPDEFINES': ['SK_RELEASE', 'NDEBUG', 'ENABLE_DEBUGGER_SUPPORT']
} }
}, },
'os:linux': { 'os:linux': {
'CCFLAGS': ['-ansi'], 'CCFLAGS': ['-ansi'],
'library:shared': { 'library:shared': {
'LIBS': ['pthread', 'rt'] 'LIBS': ['pthread']
} }
}, },
'os:macos': { 'os:macos': {
@ -129,10 +132,31 @@ LIBRARY_FLAGS = {
'-Wstrict-aliasing=2'], '-Wstrict-aliasing=2'],
'CPPPATH': ANDROID_INCLUDES, 'CPPPATH': ANDROID_INCLUDES,
}, },
'wordsize:32': {
'arch:x64': {
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64']
}
},
'wordsize:64': { 'wordsize:64': {
'arch:ia32': {
'CCFLAGS': ['-m32'], 'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32'] 'LINKFLAGS': ['-m32']
}, },
'arch:arm': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
}
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
},
'arch:arm': {
'CPPDEFINES': ['V8_TARGET_ARCH_ARM']
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64']
},
'prof:oprofile': { 'prof:oprofile': {
'CPPDEFINES': ['ENABLE_OPROFILE_AGENT'] 'CPPDEFINES': ['ENABLE_OPROFILE_AGENT']
} }
@ -148,6 +172,9 @@ LIBRARY_FLAGS = {
'ARFLAGS': ['/NOLOGO'], 'ARFLAGS': ['/NOLOGO'],
'CCPDBFLAGS': ['/Zi'] 'CCPDBFLAGS': ['/Zi']
}, },
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
},
'mode:debug': { 'mode:debug': {
'CCFLAGS': ['/Od', '/Gm'], 'CCFLAGS': ['/Od', '/Gm'],
'CPPDEFINES': ['_DEBUG', 'ENABLE_DISASSEMBLER', 'DEBUG'], 'CPPDEFINES': ['_DEBUG', 'ENABLE_DISASSEMBLER', 'DEBUG'],
@ -160,16 +187,20 @@ LIBRARY_FLAGS = {
} }
}, },
'mode:release': { 'mode:release': {
'CCFLAGS': ['/O2', '/GL'], 'CCFLAGS': ['/O2'],
'LINKFLAGS': ['/OPT:REF', '/OPT:ICF', '/LTCG'], 'LINKFLAGS': ['/OPT:REF', '/OPT:ICF'],
'ARFLAGS': ['/LTCG'],
'msvcrt:static': { 'msvcrt:static': {
'CCFLAGS': ['/MT'] 'CCFLAGS': ['/MT']
}, },
'msvcrt:shared': { 'msvcrt:shared': {
'CCFLAGS': ['/MD'] 'CCFLAGS': ['/MD']
}
}, },
'msvcltcg:on': {
'CCFLAGS': ['/GL'],
'LINKFLAGS': ['/LTCG'],
'ARFLAGS': ['/LTCG'],
}
}
} }
} }
@ -181,17 +212,16 @@ V8_EXTRA_FLAGS = {
'WARNINGFLAGS': ['-Wall', '-Werror', '-W', 'WARNINGFLAGS': ['-Wall', '-Werror', '-W',
'-Wno-unused-parameter'] '-Wno-unused-parameter']
}, },
'arch:arm': {
'CPPDEFINES': ['ARM']
},
'arch:android': {
'CPPDEFINES': ['ARM']
},
'os:win32': { 'os:win32': {
'WARNINGFLAGS': ['-pedantic', '-Wno-long-long'] 'WARNINGFLAGS': ['-pedantic', '-Wno-long-long']
}, },
'os:linux': { 'os:linux': {
'WARNINGFLAGS': ['-pedantic'] 'WARNINGFLAGS': ['-pedantic'],
'library:shared': {
'soname:on': {
'LINKFLAGS': ['-Wl,-soname,${SONAME}']
}
}
}, },
'os:macos': { 'os:macos': {
'WARNINGFLAGS': ['-pedantic'] 'WARNINGFLAGS': ['-pedantic']
@ -209,7 +239,7 @@ V8_EXTRA_FLAGS = {
'LIBS': ['winmm', 'ws2_32'] 'LIBS': ['winmm', 'ws2_32']
}, },
'arch:arm': { 'arch:arm': {
'CPPDEFINES': ['ARM'], 'CPPDEFINES': ['V8_TARGET_ARCH_ARM'],
# /wd4996 is to silence the warning about sscanf # /wd4996 is to silence the warning about sscanf
# used by the arm simulator. # used by the arm simulator.
'WARNINGFLAGS': ['/wd4996'] 'WARNINGFLAGS': ['/wd4996']
@ -224,7 +254,7 @@ V8_EXTRA_FLAGS = {
MKSNAPSHOT_EXTRA_FLAGS = { MKSNAPSHOT_EXTRA_FLAGS = {
'gcc': { 'gcc': {
'os:linux': { 'os:linux': {
'LIBS': ['pthread', 'rt'], 'LIBS': ['pthread'],
}, },
'os:macos': { 'os:macos': {
'LIBS': ['pthread'], 'LIBS': ['pthread'],
@ -238,6 +268,7 @@ MKSNAPSHOT_EXTRA_FLAGS = {
}, },
'msvc': { 'msvc': {
'all': { 'all': {
'CPPDEFINES': ['_HAS_EXCEPTIONS=0'],
'LIBS': ['winmm', 'ws2_32'] 'LIBS': ['winmm', 'ws2_32']
} }
} }
@ -268,7 +299,7 @@ CCTEST_EXTRA_FLAGS = {
'LIBPATH': [abspath('.')] 'LIBPATH': [abspath('.')]
}, },
'os:linux': { 'os:linux': {
'LIBS': ['pthread', 'rt'], 'LIBS': ['pthread'],
}, },
'os:macos': { 'os:macos': {
'LIBS': ['pthread'], 'LIBS': ['pthread'],
@ -279,10 +310,34 @@ CCTEST_EXTRA_FLAGS = {
'os:win32': { 'os:win32': {
'LIBS': ['winmm', 'ws2_32'] 'LIBS': ['winmm', 'ws2_32']
}, },
'os:android': {
'CPPDEFINES': ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
'__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
'CCFLAGS': ANDROID_FLAGS,
'CPPPATH': ANDROID_INCLUDES,
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'],
'LINKFLAGS': ANDROID_LINKFLAGS,
'LIBS': ['c', 'stdc++', 'm'],
'mode:release': {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
},
'wordsize:32': {
'arch:x64': {
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64']
}
},
'wordsize:64': { 'wordsize:64': {
'arch:ia32': {
'CCFLAGS': ['-m32'], 'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32'] 'LINKFLAGS': ['-m32']
}, },
'arch:arm': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
}
}
}, },
'msvc': { 'msvc': {
'all': { 'all': {
@ -291,6 +346,9 @@ CCTEST_EXTRA_FLAGS = {
}, },
'library:shared': { 'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED'] 'CPPDEFINES': ['USING_V8_SHARED']
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
} }
} }
} }
@ -307,7 +365,7 @@ SAMPLE_FLAGS = {
'CCFLAGS': ['-fno-rtti', '-fno-exceptions'] 'CCFLAGS': ['-fno-rtti', '-fno-exceptions']
}, },
'os:linux': { 'os:linux': {
'LIBS': ['pthread', 'rt'], 'LIBS': ['pthread'],
}, },
'os:macos': { 'os:macos': {
'LIBS': ['pthread'], 'LIBS': ['pthread'],
@ -330,10 +388,22 @@ SAMPLE_FLAGS = {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG'] 'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
} }
}, },
'wordsize:32': {
'arch:x64': {
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64']
}
},
'wordsize:64': { 'wordsize:64': {
'arch:ia32': {
'CCFLAGS': ['-m32'], 'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32'] 'LINKFLAGS': ['-m32']
}, },
'arch:arm': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
}
},
'mode:release': { 'mode:release': {
'CCFLAGS': ['-O2'] 'CCFLAGS': ['-O2']
}, },
@ -359,14 +429,21 @@ SAMPLE_FLAGS = {
}, },
'mode:release': { 'mode:release': {
'CCFLAGS': ['/O2'], 'CCFLAGS': ['/O2'],
'LINKFLAGS': ['/OPT:REF', '/OPT:ICF', '/LTCG'], 'LINKFLAGS': ['/OPT:REF', '/OPT:ICF'],
'msvcrt:static': { 'msvcrt:static': {
'CCFLAGS': ['/MT'] 'CCFLAGS': ['/MT']
}, },
'msvcrt:shared': { 'msvcrt:shared': {
'CCFLAGS': ['/MD'] 'CCFLAGS': ['/MD']
},
'msvcltcg:on': {
'CCFLAGS': ['/GL'],
'LINKFLAGS': ['/LTCG'],
} }
}, },
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
},
'mode:debug': { 'mode:debug': {
'CCFLAGS': ['/Od'], 'CCFLAGS': ['/Od'],
'LINKFLAGS': ['/DEBUG'], 'LINKFLAGS': ['/DEBUG'],
@ -387,7 +464,7 @@ D8_FLAGS = {
'LIBS': ['readline'] 'LIBS': ['readline']
}, },
'os:linux': { 'os:linux': {
'LIBS': ['pthread', 'rt'], 'LIBS': ['pthread'],
}, },
'os:macos': { 'os:macos': {
'LIBS': ['pthread'], 'LIBS': ['pthread'],
@ -443,17 +520,17 @@ SIMPLE_OPTIONS = {
'toolchain': { 'toolchain': {
'values': ['gcc', 'msvc'], 'values': ['gcc', 'msvc'],
'default': TOOLCHAIN_GUESS, 'default': TOOLCHAIN_GUESS,
'help': 'the toolchain to use' 'help': 'the toolchain to use (' + TOOLCHAIN_GUESS + ')'
}, },
'os': { 'os': {
'values': ['freebsd', 'linux', 'macos', 'win32', 'android'], 'values': ['freebsd', 'linux', 'macos', 'win32', 'android'],
'default': OS_GUESS, 'default': OS_GUESS,
'help': 'the os to build for' 'help': 'the os to build for (' + OS_GUESS + ')'
}, },
'arch': { 'arch': {
'values':['arm', 'ia32'], 'values':['arm', 'ia32', 'x64'],
'default': ARCH_GUESS, 'default': ARCH_GUESS,
'help': 'the architecture to build for' 'help': 'the architecture to build for (' + ARCH_GUESS + ')'
}, },
'snapshot': { 'snapshot': {
'values': ['on', 'off', 'nobuild'], 'values': ['on', 'off', 'nobuild'],
@ -470,10 +547,20 @@ SIMPLE_OPTIONS = {
'default': 'static', 'default': 'static',
'help': 'the type of library to produce' 'help': 'the type of library to produce'
}, },
'soname': {
'values': ['on', 'off'],
'default': 'off',
'help': 'turn on setting soname for Linux shared library'
},
'msvcrt': { 'msvcrt': {
'values': ['static', 'shared'], 'values': ['static', 'shared'],
'default': 'static', 'default': 'static',
'help': 'the type of MSVCRT library to use' 'help': 'the type of Microsoft Visual C++ runtime library to use'
},
'msvcltcg': {
'values': ['on', 'off'],
'default': 'on',
'help': 'use Microsoft Visual C++ link-time code generation'
}, },
'wordsize': { 'wordsize': {
'values': ['64', '32'], 'values': ['64', '32'],
@ -515,6 +602,49 @@ def GetOptions():
return result return result
def GetVersionComponents():
MAJOR_VERSION_PATTERN = re.compile(r"#define\s+MAJOR_VERSION\s+(.*)")
MINOR_VERSION_PATTERN = re.compile(r"#define\s+MINOR_VERSION\s+(.*)")
BUILD_NUMBER_PATTERN = re.compile(r"#define\s+BUILD_NUMBER\s+(.*)")
PATCH_LEVEL_PATTERN = re.compile(r"#define\s+PATCH_LEVEL\s+(.*)")
patterns = [MAJOR_VERSION_PATTERN,
MINOR_VERSION_PATTERN,
BUILD_NUMBER_PATTERN,
PATCH_LEVEL_PATTERN]
source = open(join(root_dir, 'src', 'version.cc')).read()
version_components = []
for pattern in patterns:
match = pattern.search(source)
if match:
version_components.append(match.group(1).strip())
else:
version_components.append('0')
return version_components
def GetVersion():
version_components = GetVersionComponents()
if version_components[len(version_components) - 1] == '0':
version_components.pop()
return '.'.join(version_components)
def GetSpecificSONAME():
SONAME_PATTERN = re.compile(r"#define\s+SONAME\s+\"(.*)\"")
source = open(join(root_dir, 'src', 'version.cc')).read()
match = SONAME_PATTERN.search(source)
if match:
return match.group(1).strip()
else:
return ''
def SplitList(str): def SplitList(str):
return [ s for s in str.split(",") if len(s) > 0 ] return [ s for s in str.split(",") if len(s) > 0 ]
@ -537,6 +667,12 @@ def VerifyOptions(env):
Abort("Profiling on windows only supported for static library.") Abort("Profiling on windows only supported for static library.")
if env['prof'] == 'oprofile' and env['os'] != 'linux': if env['prof'] == 'oprofile' and env['os'] != 'linux':
Abort("OProfile is only supported on Linux.") Abort("OProfile is only supported on Linux.")
if env['os'] == 'win32' and env['soname'] == 'on':
Abort("Shared Object soname not applicable for Windows.")
if env['soname'] == 'on' and env['library'] == 'static':
Abort("Shared Object soname not applicable for static library.")
if env['arch'] == 'x64' and env['os'] != 'linux':
Abort("X64 compilation only allowed on Linux OS.")
for (name, option) in SIMPLE_OPTIONS.iteritems(): for (name, option) in SIMPLE_OPTIONS.iteritems():
if (not option.get('default')) and (name not in ARGUMENTS): if (not option.get('default')) and (name not in ARGUMENTS):
message = ("A value for option %s must be specified (%s)." % message = ("A value for option %s must be specified (%s)." %
@ -565,13 +701,13 @@ class BuildContext(object):
def AddRelevantFlags(self, initial, flags): def AddRelevantFlags(self, initial, flags):
result = initial.copy() result = initial.copy()
self.AppendFlags(result, flags.get('all'))
toolchain = self.options['toolchain'] toolchain = self.options['toolchain']
if toolchain in flags: if toolchain in flags:
self.AppendFlags(result, flags[toolchain].get('all')) self.AppendFlags(result, flags[toolchain].get('all'))
for option in sorted(self.options.keys()): for option in sorted(self.options.keys()):
value = self.options[option] value = self.options[option]
self.AppendFlags(result, flags[toolchain].get(option + ':' + value)) self.AppendFlags(result, flags[toolchain].get(option + ':' + value))
self.AppendFlags(result, flags.get('all'))
return result return result
def AddRelevantSubFlags(self, options, flags): def AddRelevantSubFlags(self, options, flags):
@ -667,11 +803,23 @@ def BuildSpecific(env, mode, env_overrides):
'd8': d8_flags 'd8': d8_flags
} }
# Generate library base name.
target_id = mode target_id = mode
suffix = SUFFIXES[target_id] suffix = SUFFIXES[target_id]
library_name = 'v8' + suffix library_name = 'v8' + suffix
version = GetVersion()
if context.options['soname'] == 'on':
# When building shared object with SONAME version the library name.
library_name += '-' + version
env['LIBRARY'] = library_name env['LIBRARY'] = library_name
# Generate library SONAME if required by the build.
if context.options['soname'] == 'on':
soname = GetSpecificSONAME()
if soname == '':
soname = 'lib' + library_name + '.so'
env['SONAME'] = soname
# Build the object files by invoking SCons recursively. # Build the object files by invoking SCons recursively.
(object_files, shell_files, mksnapshot) = env.SConscript( (object_files, shell_files, mksnapshot) = env.SConscript(
join('src', 'SConscript'), join('src', 'SConscript'),

126
deps/v8/include/v8-debug.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_DEBUG_H_ #ifndef V8_V8_DEBUG_H_
#define V8_DEBUG_H_ #define V8_V8_DEBUG_H_
#include "v8.h" #include "v8.h"
@ -79,7 +79,70 @@ enum DebugEvent {
}; };
/** class EXPORT Debug {
public:
/**
* A client object passed to the v8 debugger whose ownership will be taken by
* it. v8 is always responsible for deleting the object.
*/
class ClientData {
public:
virtual ~ClientData() {}
};
/**
* A message object passed to the debug message handler.
*/
class Message {
public:
/**
* Check type of message.
*/
virtual bool IsEvent() const = 0;
virtual bool IsResponse() const = 0;
virtual DebugEvent GetEvent() const = 0;
/**
* Indicate whether this is a response to a continue command which will
* start the VM running after this is processed.
*/
virtual bool WillStartRunning() const = 0;
/**
* Access to execution state and event data. Don't store these cross
* callbacks as their content becomes invalid. These objects are from the
* debugger event that started the debug message loop.
*/
virtual Handle<Object> GetExecutionState() const = 0;
virtual Handle<Object> GetEventData() const = 0;
/**
* Get the debugger protocol JSON.
*/
virtual Handle<String> GetJSON() const = 0;
/**
* Get the context active when the debug event happened. Note this is not
* the current active context as the JavaScript part of the debugger is
* running in it's own context which is entered at this point.
*/
virtual Handle<Context> GetEventContext() const = 0;
/**
* Client data passed with the corresponding request if any. This is the
* client_data data value passed into Debug::SendCommand along with the
* request that led to the message or NULL if the message is an event. The
* debugger takes ownership of the data and will delete it even if there is
* no message handler.
*/
virtual ClientData* GetClientData() const = 0;
virtual ~Message() {}
};
/**
* Debug event callback function. * Debug event callback function.
* *
* \param event the type of the debug event that triggered the callback * \param event the type of the debug event that triggered the callback
@ -88,39 +151,44 @@ enum DebugEvent {
* \param event_data event specific data (JavaScript object) * \param event_data event specific data (JavaScript object)
* \param data value passed by the user to SetDebugEventListener * \param data value passed by the user to SetDebugEventListener
*/ */
typedef void (*DebugEventCallback)(DebugEvent event, typedef void (*EventCallback)(DebugEvent event,
Handle<Object> exec_state, Handle<Object> exec_state,
Handle<Object> event_data, Handle<Object> event_data,
Handle<Value> data); Handle<Value> data);
/** /**
* Debug message callback function. * Debug message callback function.
* *
* \param message the debug message * \param message the debug message handler message object
* \param length length of the message * \param length length of the message
* \param data the data value passed when registering the message handler * \param client_data the data value passed when registering the message handler
* A DebugMessageHandler does not take posession of the message string,
* A MessageHandler does not take posession of the message string,
* and must not rely on the data persisting after the handler returns. * and must not rely on the data persisting after the handler returns.
*
* This message handler is deprecated. Use MessageHandler2 instead.
*/ */
typedef void (*DebugMessageHandler)(const uint16_t* message, int length, typedef void (*MessageHandler)(const uint16_t* message, int length,
void* data); ClientData* client_data);
/** /**
* Debug host dispatch callback function. * Debug message callback function.
* *
* \param dispatch the dispatch value * \param message the debug message handler message object
* \param data the data value passed when registering the dispatch handler
*/
typedef void (*DebugHostDispatchHandler)(void* dispatch,
void* data);
* A MessageHandler does not take posession of the message data,
* and must not rely on the data persisting after the handler returns.
*/
typedef void (*MessageHandler2)(const Message& message);
/**
* Debug host dispatch callback function.
*/
typedef void (*HostDispatchHandler)();
class EXPORT Debug {
public:
// Set a C debug event listener. // Set a C debug event listener.
static bool SetDebugEventListener(DebugEventCallback that, static bool SetDebugEventListener(EventCallback that,
Handle<Value> data = Handle<Value>()); Handle<Value> data = Handle<Value>());
// Set a JavaScript debug event listener. // Set a JavaScript debug event listener.
@ -130,15 +198,17 @@ class EXPORT Debug {
// Break execution of JavaScript. // Break execution of JavaScript.
static void DebugBreak(); static void DebugBreak();
// Message based interface. The message protocol is JSON. // Message based interface. The message protocol is JSON. NOTE the message
static void SetMessageHandler(DebugMessageHandler handler, void* data = NULL, // handler thread is not supported any more parameter must be false.
bool message_handler_thread = true); static void SetMessageHandler(MessageHandler handler,
static void SendCommand(const uint16_t* command, int length); bool message_handler_thread = false);
static void SetMessageHandler2(MessageHandler2 handler);
static void SendCommand(const uint16_t* command, int length,
ClientData* client_data = NULL);
// Dispatch interface. // Dispatch interface.
static void SetHostDispatchHandler(DebugHostDispatchHandler handler, static void SetHostDispatchHandler(HostDispatchHandler handler,
void* data = NULL); int period = 100);
static void SendHostDispatch(void* dispatch);
/** /**
* Run a JavaScript function in the debugger. * Run a JavaScript function in the debugger.
@ -176,4 +246,4 @@ class EXPORT Debug {
#undef EXPORT #undef EXPORT
#endif // V8_DEBUG_H_ #endif // V8_V8_DEBUG_H_

73
deps/v8/include/v8.h

@ -41,10 +41,15 @@
#include <stdio.h> #include <stdio.h>
#ifdef _WIN32 #ifdef _WIN32
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef short int16_t; // NOLINT
typedef unsigned short uint16_t; // NOLINT
typedef int int32_t; typedef int int32_t;
typedef unsigned int uint32_t; typedef unsigned int uint32_t;
typedef unsigned short uint16_t; // NOLINT typedef __int64 int64_t;
typedef long long int64_t; // NOLINT typedef unsigned __int64 uint64_t;
// intptr_t is defined in crtdefs.h through stdio.h.
// Setup for Windows DLL export/import. When building the V8 DLL the // Setup for Windows DLL export/import. When building the V8 DLL the
// BUILDING_V8_SHARED needs to be defined. When building a program which uses // BUILDING_V8_SHARED needs to be defined. When building a program which uses
@ -529,6 +534,13 @@ class V8EXPORT Script {
* Returns the script id value. * Returns the script id value.
*/ */
Local<Value> Id(); Local<Value> Id();
/**
* Associate an additional data object with the script. This is mainly used
* with the debugger as this data object is only available through the
* debugger API.
*/
void SetData(Handle<Value> data);
}; };
@ -540,8 +552,18 @@ class V8EXPORT Message {
Local<String> Get() const; Local<String> Get() const;
Local<String> GetSourceLine() const; Local<String> GetSourceLine() const;
/**
* Returns the resource name for the script from where the function causing
* the error originates.
*/
Handle<Value> GetScriptResourceName() const; Handle<Value> GetScriptResourceName() const;
/**
* Returns the resource data for the script from where the function causing
* the error originates.
*/
Handle<Value> GetScriptData() const;
/** /**
* Returns the number, 1-based, of the line where the error occurred. * Returns the number, 1-based, of the line where the error occurred.
*/ */
@ -805,14 +827,14 @@ class V8EXPORT String : public Primitive {
}; };
/** /**
* Get the ExternalStringResource for an external string. Only * Get the ExternalStringResource for an external string. Returns
* valid if IsExternal() returns true. * NULL if IsExternal() doesn't return true.
*/ */
ExternalStringResource* GetExternalStringResource() const; ExternalStringResource* GetExternalStringResource() const;
/** /**
* Get the ExternalAsciiStringResource for an external ascii string. * Get the ExternalAsciiStringResource for an external ascii string.
* Only valid if IsExternalAscii() returns true. * Returns NULL if IsExternalAscii() doesn't return true.
*/ */
ExternalAsciiStringResource* GetExternalAsciiStringResource() const; ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
@ -1028,6 +1050,18 @@ class V8EXPORT Object : public Value {
bool Set(Handle<Value> key, bool Set(Handle<Value> key,
Handle<Value> value, Handle<Value> value,
PropertyAttribute attribs = None); PropertyAttribute attribs = None);
// Sets a local property on this object, bypassing interceptors and
// overriding accessors or read-only properties.
//
// Note that if the object has an interceptor the property will be set
// locally, but since the interceptor takes precedence the local property
// will only be returned if the interceptor doesn't return a value.
//
// Note also that this only works for named properties.
bool ForceSet(Handle<Value> key,
Handle<Value> value,
PropertyAttribute attribs = None);
Local<Value> Get(Handle<Value> key); Local<Value> Get(Handle<Value> key);
// TODO(1245389): Replace the type-specific versions of these // TODO(1245389): Replace the type-specific versions of these
@ -1093,6 +1127,9 @@ class V8EXPORT Object : public Value {
/** /**
* Returns the identity hash for this object. The current implemenation uses * Returns the identity hash for this object. The current implemenation uses
* a hidden property on the object to store the identity hash. * a hidden property on the object to store the identity hash.
*
* The return value will never be 0. Also, it is not guaranteed to be
* unique.
*/ */
int GetIdentityHash(); int GetIdentityHash();
@ -2042,6 +2079,24 @@ class V8EXPORT V8 {
*/ */
static void ResumeProfiler(); static void ResumeProfiler();
/**
* If logging is performed into a memory buffer (via --logfile=*), allows to
* retrieve previously written messages. This can be used for retrieving
* profiler log data in the application. This function is thread-safe.
*
* Caller provides a destination buffer that must exist during GetLogLines
* call. Only whole log lines are copied into the buffer.
*
* \param from_pos specified a point in a buffer to read from, 0 is the
* beginning of a buffer. It is assumed that caller updates its current
* position using returned size value from the previous call.
* \param dest_buf destination buffer for log data.
* \param max_size size of the destination buffer.
* \returns actual size of log data copied into buffer.
*/
static int GetLogLines(int from_pos, char* dest_buf, int max_size);
/** /**
* Releases any resources used by v8 and stops any utility threads * Releases any resources used by v8 and stops any utility threads
* that may be running. Note that disposing v8 is permanent, it * that may be running. Note that disposing v8 is permanent, it
@ -2222,6 +2277,14 @@ class V8EXPORT Context {
/** Returns true if V8 has a current context. */ /** Returns true if V8 has a current context. */
static bool InContext(); static bool InContext();
/**
* Associate an additional data object with the context. This is mainly used
* with the debugger to provide additional information on the context through
* the debugger API.
*/
void SetData(Handle<Value> data);
Local<Value> GetData();
/** /**
* Stack-allocated class which sets the execution context for all * Stack-allocated class which sets the execution context for all
* operations executed within a local scope. * operations executed within a local scope.

22
deps/v8/samples/shell.cc

@ -38,6 +38,7 @@ bool ExecuteString(v8::Handle<v8::String> source,
bool print_result, bool print_result,
bool report_exceptions); bool report_exceptions);
v8::Handle<v8::Value> Print(const v8::Arguments& args); v8::Handle<v8::Value> Print(const v8::Arguments& args);
v8::Handle<v8::Value> Read(const v8::Arguments& args);
v8::Handle<v8::Value> Load(const v8::Arguments& args); v8::Handle<v8::Value> Load(const v8::Arguments& args);
v8::Handle<v8::Value> Quit(const v8::Arguments& args); v8::Handle<v8::Value> Quit(const v8::Arguments& args);
v8::Handle<v8::Value> Version(const v8::Arguments& args); v8::Handle<v8::Value> Version(const v8::Arguments& args);
@ -52,6 +53,8 @@ int RunMain(int argc, char* argv[]) {
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New(); v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
// Bind the global 'print' function to the C++ Print callback. // Bind the global 'print' function to the C++ Print callback.
global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print)); global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
// Bind the global 'read' function to the C++ Read callback.
global->Set(v8::String::New("read"), v8::FunctionTemplate::New(Read));
// Bind the global 'load' function to the C++ Load callback. // Bind the global 'load' function to the C++ Load callback.
global->Set(v8::String::New("load"), v8::FunctionTemplate::New(Load)); global->Set(v8::String::New("load"), v8::FunctionTemplate::New(Load));
// Bind the 'quit' function // Bind the 'quit' function
@ -135,6 +138,25 @@ v8::Handle<v8::Value> Print(const v8::Arguments& args) {
} }
// The callback that is invoked by v8 whenever the JavaScript 'read'
// function is called. This function loads the content of the file named in
// the argument into a JavaScript string.
v8::Handle<v8::Value> Read(const v8::Arguments& args) {
if (args.Length() != 1) {
return v8::ThrowException(v8::String::New("Bad parameters"));
}
v8::String::Utf8Value file(args[0]);
if (*file == NULL) {
return v8::ThrowException(v8::String::New("Error loading file"));
}
v8::Handle<v8::String> source = ReadFile(*file);
if (source.IsEmpty()) {
return v8::ThrowException(v8::String::New("Error loading file"));
}
return source;
}
// The callback that is invoked by v8 whenever the JavaScript 'load' // The callback that is invoked by v8 whenever the JavaScript 'load'
// function is called. Loads, compiles and executes its argument // function is called. Loads, compiles and executes its argument
// JavaScript file. // JavaScript file.

38
deps/v8/src/SConscript

@ -50,23 +50,36 @@ SOURCES = {
'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc', 'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc', 'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc', 'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
'v8.cc', 'v8threads.cc', 'variables.cc', 'virtual-frame.cc', 'zone.cc' 'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
'virtual-frame.cc', 'zone.cc'
], ],
'arch:arm': [ 'arch:arm': [
'assembler-arm.cc', 'builtins-arm.cc', 'codegen-arm.cc', 'cpu-arm.cc', 'arm/assembler-arm.cc', 'arm/builtins-arm.cc',
'disasm-arm.cc', 'debug-arm.cc', 'frames-arm.cc', 'ic-arm.cc', 'arm/codegen-arm.cc', 'arm/cpu-arm.cc', 'arm/disasm-arm.cc',
'jump-target-arm.cc', 'macro-assembler-arm.cc', 'arm/debug-arm.cc', 'arm/frames-arm.cc', 'arm/ic-arm.cc',
'regexp-macro-assembler-arm.cc', 'register-allocator-arm.cc', 'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc',
'stub-cache-arm.cc', 'virtual-frame-arm.cc' 'arm/regexp-macro-assembler-arm.cc',
'arm/register-allocator-arm.cc', 'arm/stub-cache-arm.cc',
'arm/virtual-frame-arm.cc'
], ],
'arch:ia32': [ 'arch:ia32': [
'assembler-ia32.cc', 'builtins-ia32.cc', 'codegen-ia32.cc', 'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc',
'cpu-ia32.cc', 'disasm-ia32.cc', 'debug-ia32.cc', 'frames-ia32.cc', 'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc',
'ic-ia32.cc', 'jump-target-ia32.cc', 'macro-assembler-ia32.cc', 'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc',
'regexp-macro-assembler-ia32.cc', 'register-allocator-ia32.cc', 'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc',
'stub-cache-ia32.cc', 'virtual-frame-ia32.cc' 'ia32/regexp-macro-assembler-ia32.cc',
'ia32/register-allocator-ia32.cc', 'ia32/stub-cache-ia32.cc',
'ia32/virtual-frame-ia32.cc'
], ],
'simulator:arm': ['simulator-arm.cc'], 'arch:x64': [
'x64/assembler-x64.cc', 'x64/builtins-x64.cc',
'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc',
'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
# 'x64/regexp-macro-assembler-x64.cc',
'x64/stub-cache-x64.cc'
],
'simulator:arm': ['arm/simulator-arm.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'], 'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
'os:linux': ['platform-linux.cc', 'platform-posix.cc'], 'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
'os:android': ['platform-linux.cc', 'platform-posix.cc'], 'os:android': ['platform-linux.cc', 'platform-posix.cc'],
@ -121,6 +134,7 @@ debug-delay.js
mirror-delay.js mirror-delay.js
date-delay.js date-delay.js
regexp-delay.js regexp-delay.js
json-delay.js
'''.split() '''.split()

37
deps/v8/src/accessors.cc

@ -251,6 +251,24 @@ const AccessorDescriptor Accessors::ScriptColumnOffset = {
}; };
//
// Accessors::ScriptData
//
Object* Accessors::ScriptGetData(Object* object, void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->data();
}
const AccessorDescriptor Accessors::ScriptData = {
ScriptGetData,
IllegalSetter,
0
};
// //
// Accessors::ScriptType // Accessors::ScriptType
// //
@ -289,6 +307,25 @@ const AccessorDescriptor Accessors::ScriptLineEnds = {
}; };
//
// Accessors::ScriptGetContextData
//
Object* Accessors::ScriptGetContextData(Object* object, void*) {
HandleScope scope;
Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
return script->context_data();
}
const AccessorDescriptor Accessors::ScriptContextData = {
ScriptGetContextData,
IllegalSetter,
0
};
// //
// Accessors::FunctionPrototype // Accessors::FunctionPrototype
// //

4
deps/v8/src/accessors.h

@ -45,8 +45,10 @@ namespace v8 { namespace internal {
V(ScriptId) \ V(ScriptId) \
V(ScriptLineOffset) \ V(ScriptLineOffset) \
V(ScriptColumnOffset) \ V(ScriptColumnOffset) \
V(ScriptData) \
V(ScriptType) \ V(ScriptType) \
V(ScriptLineEnds) \ V(ScriptLineEnds) \
V(ScriptContextData) \
V(ObjectPrototype) V(ObjectPrototype)
// Accessors contains all predefined proxy accessors. // Accessors contains all predefined proxy accessors.
@ -84,8 +86,10 @@ class Accessors : public AllStatic {
static Object* ScriptGetSource(Object* object, void*); static Object* ScriptGetSource(Object* object, void*);
static Object* ScriptGetLineOffset(Object* object, void*); static Object* ScriptGetLineOffset(Object* object, void*);
static Object* ScriptGetColumnOffset(Object* object, void*); static Object* ScriptGetColumnOffset(Object* object, void*);
static Object* ScriptGetData(Object* object, void*);
static Object* ScriptGetType(Object* object, void*); static Object* ScriptGetType(Object* object, void*);
static Object* ScriptGetLineEnds(Object* object, void*); static Object* ScriptGetLineEnds(Object* object, void*);
static Object* ScriptGetContextData(Object* object, void*);
static Object* ObjectGetPrototype(Object* receiver, void*); static Object* ObjectGetPrototype(Object* receiver, void*);
static Object* ObjectSetPrototype(JSObject* receiver, Object* value, void*); static Object* ObjectSetPrototype(JSObject* receiver, Object* value, void*);

173
deps/v8/src/api.cc

@ -37,6 +37,7 @@
#include "serialize.h" #include "serialize.h"
#include "snapshot.h" #include "snapshot.h"
#include "v8threads.h" #include "v8threads.h"
#include "version.h"
#define LOG_API(expr) LOG(ApiEntryCall(expr)) #define LOG_API(expr) LOG(ApiEntryCall(expr))
@ -444,6 +445,40 @@ void Context::Exit() {
} }
void Context::SetData(v8::Handle<Value> data) {
if (IsDeadCheck("v8::Context::SetData()")) return;
ENTER_V8;
{
HandleScope scope;
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
ASSERT(env->IsGlobalContext());
if (env->IsGlobalContext()) {
env->set_data(*raw_data);
}
}
}
v8::Local<v8::Value> Context::GetData() {
if (IsDeadCheck("v8::Context::GetData()")) return v8::Local<Value>();
ENTER_V8;
i::Object* raw_result = NULL;
{
HandleScope scope;
i::Handle<i::Context> env = Utils::OpenHandle(this);
ASSERT(env->IsGlobalContext());
if (env->IsGlobalContext()) {
raw_result = env->data();
} else {
return Local<Value>();
}
}
i::Handle<i::Object> result(raw_result);
return Utils::ToLocal(result);
}
void** v8::HandleScope::RawClose(void** value) { void** v8::HandleScope::RawClose(void** value) {
if (!ApiCheck(!is_closed_, if (!ApiCheck(!is_closed_,
"v8::HandleScope::Close()", "v8::HandleScope::Close()",
@ -1108,6 +1143,19 @@ Local<Value> Script::Id() {
} }
void Script::SetData(v8::Handle<Value> data) {
ON_BAILOUT("v8::Script::SetData()", return);
LOG_API("Script::SetData");
{
HandleScope scope;
i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
i::Handle<i::Script> script(i::Script::cast(fun->shared()->script()));
script->set_data(*raw_data);
}
}
// --- E x c e p t i o n s --- // --- E x c e p t i o n s ---
@ -1199,6 +1247,22 @@ v8::Handle<Value> Message::GetScriptResourceName() const {
} }
v8::Handle<Value> Message::GetScriptData() const {
if (IsDeadCheck("v8::Message::GetScriptResourceData()")) {
return Local<Value>();
}
ENTER_V8;
HandleScope scope;
i::Handle<i::JSObject> obj =
i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
// Return this.script.data.
i::Handle<i::JSValue> script =
i::Handle<i::JSValue>::cast(GetProperty(obj, "script"));
i::Handle<i::Object> data(i::Script::cast(script->value())->data());
return scope.Close(Utils::ToLocal(data));
}
static i::Handle<i::Object> CallV8HeapFunction(const char* name, static i::Handle<i::Object> CallV8HeapFunction(const char* name,
i::Handle<i::Object> recv, i::Handle<i::Object> recv,
int argc, int argc,
@ -1806,6 +1870,26 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
} }
bool v8::Object::ForceSet(v8::Handle<Value> key,
v8::Handle<Value> value,
v8::PropertyAttribute attribs) {
ON_BAILOUT("v8::Object::ForceSet()", return false);
ENTER_V8;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE();
i::Handle<i::Object> obj = i::ForceSetProperty(
self,
key_obj,
value_obj,
static_cast<PropertyAttributes>(attribs));
has_pending_exception = obj.is_null();
EXCEPTION_BAILOUT_CHECK(false);
return true;
}
Local<Value> v8::Object::Get(v8::Handle<Value> key) { Local<Value> v8::Object::Get(v8::Handle<Value> key) {
ON_BAILOUT("v8::Object::Get()", return Local<v8::Value>()); ON_BAILOUT("v8::Object::Get()", return Local<v8::Value>());
ENTER_V8; ENTER_V8;
@ -2023,7 +2107,12 @@ int v8::Object::GetIdentityHash() {
if (hash->IsSmi()) { if (hash->IsSmi()) {
hash_value = i::Smi::cast(*hash)->value(); hash_value = i::Smi::cast(*hash)->value();
} else { } else {
int attempts = 0;
do {
hash_value = random() & i::Smi::kMaxValue; // Limit range to fit a smi. hash_value = random() & i::Smi::kMaxValue; // Limit range to fit a smi.
attempts++;
} while (hash_value == 0 && attempts < 30);
hash_value = hash_value != 0 ? hash_value : 1; // never return 0
i::SetProperty(hidden_props, i::SetProperty(hidden_props,
hash_symbol, hash_symbol,
i::Handle<i::Object>(i::Smi::FromInt(hash_value)), i::Handle<i::Object>(i::Smi::FromInt(hash_value)),
@ -2266,9 +2355,12 @@ v8::String::ExternalStringResource*
v8::String::GetExternalStringResource() const { v8::String::GetExternalStringResource() const {
EnsureInitialized("v8::String::GetExternalStringResource()"); EnsureInitialized("v8::String::GetExternalStringResource()");
i::Handle<i::String> str = Utils::OpenHandle(this); i::Handle<i::String> str = Utils::OpenHandle(this);
ASSERT(str->IsExternalTwoByteString()); if (i::StringShape(*str).IsExternalTwoByte()) {
void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource(); void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
return reinterpret_cast<ExternalStringResource*>(resource); return reinterpret_cast<ExternalStringResource*>(resource);
} else {
return NULL;
}
} }
@ -2276,9 +2368,12 @@ v8::String::ExternalAsciiStringResource*
v8::String::GetExternalAsciiStringResource() const { v8::String::GetExternalAsciiStringResource() const {
EnsureInitialized("v8::String::GetExternalAsciiStringResource()"); EnsureInitialized("v8::String::GetExternalAsciiStringResource()");
i::Handle<i::String> str = Utils::OpenHandle(this); i::Handle<i::String> str = Utils::OpenHandle(this);
ASSERT(str->IsExternalAsciiString()); if (i::StringShape(*str).IsExternalAscii()) {
void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource(); void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
return reinterpret_cast<ExternalAsciiStringResource*>(resource); return reinterpret_cast<ExternalAsciiStringResource*>(resource);
} else {
return NULL;
}
} }
@ -2373,7 +2468,9 @@ bool v8::V8::Dispose() {
const char* v8::V8::GetVersion() { const char* v8::V8::GetVersion() {
return "1.1.10.4"; static v8::internal::EmbeddedVector<char, 128> buffer;
v8::internal::Version::GetString(buffer);
return buffer.start();
} }
@ -2589,11 +2686,14 @@ Local<Value> v8::External::Wrap(void* data) {
ENTER_V8; ENTER_V8;
if ((reinterpret_cast<intptr_t>(data) & kAlignedPointerMask) == 0) { if ((reinterpret_cast<intptr_t>(data) & kAlignedPointerMask) == 0) {
uintptr_t data_ptr = reinterpret_cast<uintptr_t>(data); uintptr_t data_ptr = reinterpret_cast<uintptr_t>(data);
int data_value = static_cast<int>(data_ptr >> kAlignedPointerShift); intptr_t data_value =
static_cast<intptr_t>(data_ptr >> kAlignedPointerShift);
STATIC_ASSERT(sizeof(data_ptr) == sizeof(data_value)); STATIC_ASSERT(sizeof(data_ptr) == sizeof(data_value));
i::Handle<i::Object> obj(i::Smi::FromInt(data_value)); if (i::Smi::IsIntptrValid(data_value)) {
i::Handle<i::Object> obj(i::Smi::FromIntptr(data_value));
return Utils::ToLocal(obj); return Utils::ToLocal(obj);
} }
}
return ExternalNewImpl(data); return ExternalNewImpl(data);
} }
@ -2603,7 +2703,8 @@ void* v8::External::Unwrap(v8::Handle<v8::Value> value) {
i::Handle<i::Object> obj = Utils::OpenHandle(*value); i::Handle<i::Object> obj = Utils::OpenHandle(*value);
if (obj->IsSmi()) { if (obj->IsSmi()) {
// The external value was an aligned pointer. // The external value was an aligned pointer.
uintptr_t result = i::Smi::cast(*obj)->value() << kAlignedPointerShift; uintptr_t result = static_cast<uintptr_t>(
i::Smi::cast(*obj)->value()) << kAlignedPointerShift;
return reinterpret_cast<void*>(result); return reinterpret_cast<void*>(result);
} }
return ExternalValueImpl(obj); return ExternalValueImpl(obj);
@ -3021,6 +3122,11 @@ void V8::ResumeProfiler() {
#endif #endif
} }
int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
#ifdef ENABLE_LOGGING_AND_PROFILING
return i::Logger::GetLogLines(from_pos, dest_buf, max_size);
#endif
}
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) { String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) {
EnsureInitialized("v8::String::Utf8Value::Utf8Value()"); EnsureInitialized("v8::String::Utf8Value::Utf8Value()");
@ -3180,8 +3286,8 @@ Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
// --- D e b u g S u p p o r t --- // --- D e b u g S u p p o r t ---
#ifdef ENABLE_DEBUGGER_SUPPORT
bool Debug::SetDebugEventListener(DebugEventCallback that, Handle<Value> data) { bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
EnsureInitialized("v8::Debug::SetDebugEventListener()"); EnsureInitialized("v8::Debug::SetDebugEventListener()");
ON_BAILOUT("v8::Debug::SetDebugEventListener()", return false); ON_BAILOUT("v8::Debug::SetDebugEventListener()", return false);
ENTER_V8; ENTER_V8;
@ -3211,31 +3317,54 @@ void Debug::DebugBreak() {
} }
void Debug::SetMessageHandler(v8::DebugMessageHandler handler, void* data, static v8::Debug::MessageHandler message_handler = NULL;
static void MessageHandlerWrapper(const v8::Debug::Message& message) {
if (message_handler) {
v8::String::Value json(message.GetJSON());
message_handler(*json, json.length(), message.GetClientData());
}
}
void Debug::SetMessageHandler(v8::Debug::MessageHandler handler,
bool message_handler_thread) { bool message_handler_thread) {
EnsureInitialized("v8::Debug::SetMessageHandler"); EnsureInitialized("v8::Debug::SetMessageHandler");
ENTER_V8; ENTER_V8;
i::Debugger::SetMessageHandler(handler, data, message_handler_thread); // Message handler thread not supported any more. Parameter temporally left in
} // the API for client compatability reasons.
CHECK(!message_handler_thread);
// TODO(sgjesse) support the old message handler API through a simple wrapper.
void Debug::SendCommand(const uint16_t* command, int length) { message_handler = handler;
if (!i::V8::HasBeenSetup()) return; if (message_handler != NULL) {
i::Debugger::ProcessCommand(i::Vector<const uint16_t>(command, length)); i::Debugger::SetMessageHandler(MessageHandlerWrapper);
} else {
i::Debugger::SetMessageHandler(NULL);
}
} }
void Debug::SetHostDispatchHandler(v8::DebugHostDispatchHandler handler, void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
void* data) { EnsureInitialized("v8::Debug::SetMessageHandler");
EnsureInitialized("v8::Debug::SetHostDispatchHandler");
ENTER_V8; ENTER_V8;
i::Debugger::SetHostDispatchHandler(handler, data); i::Debugger::SetMessageHandler(handler);
} }
void Debug::SendHostDispatch(void* dispatch) { void Debug::SendCommand(const uint16_t* command, int length,
ClientData* client_data) {
if (!i::V8::HasBeenSetup()) return; if (!i::V8::HasBeenSetup()) return;
i::Debugger::ProcessHostDispatch(dispatch); i::Debugger::ProcessCommand(i::Vector<const uint16_t>(command, length),
client_data);
}
void Debug::SetHostDispatchHandler(HostDispatchHandler handler,
int period) {
EnsureInitialized("v8::Debug::SetHostDispatchHandler");
ENTER_V8;
i::Debugger::SetHostDispatchHandler(handler, period);
} }
@ -3263,7 +3392,7 @@ Handle<Value> Debug::Call(v8::Handle<v8::Function> fun,
bool Debug::EnableAgent(const char* name, int port) { bool Debug::EnableAgent(const char* name, int port) {
return i::Debugger::StartAgent(name, port); return i::Debugger::StartAgent(name, port);
} }
#endif // ENABLE_DEBUGGER_SUPPORT
namespace internal { namespace internal {

8
deps/v8/src/assembler-arm-inl.h → deps/v8/src/arm/assembler-arm-inl.h

@ -34,10 +34,10 @@
// significantly by Google Inc. // significantly by Google Inc.
// Copyright 2006-2008 the V8 project authors. All rights reserved. // Copyright 2006-2008 the V8 project authors. All rights reserved.
#ifndef V8_ASSEMBLER_ARM_INL_H_ #ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
#define V8_ASSEMBLER_ARM_INL_H_ #define V8_ARM_ASSEMBLER_ARM_INL_H_
#include "assembler-arm.h" #include "arm/assembler-arm.h"
#include "cpu.h" #include "cpu.h"
@ -246,4 +246,4 @@ void Assembler::set_target_address_at(Address pc, Address target) {
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_ASSEMBLER_ARM_INL_H_ #endif // V8_ARM_ASSEMBLER_ARM_INL_H_

2
deps/v8/src/assembler-arm.cc → deps/v8/src/arm/assembler-arm.cc

@ -36,7 +36,7 @@
#include "v8.h" #include "v8.h"
#include "assembler-arm-inl.h" #include "arm/assembler-arm-inl.h"
#include "serialize.h" #include "serialize.h"
namespace v8 { namespace internal { namespace v8 { namespace internal {

40
deps/v8/src/assembler-arm.h → deps/v8/src/arm/assembler-arm.h

@ -37,8 +37,8 @@
// A light-weight ARM Assembler // A light-weight ARM Assembler
// Generates user mode instructions for the ARM architecture up to version 5 // Generates user mode instructions for the ARM architecture up to version 5
#ifndef V8_ASSEMBLER_ARM_H_ #ifndef V8_ARM_ASSEMBLER_ARM_H_
#define V8_ASSEMBLER_ARM_H_ #define V8_ARM_ASSEMBLER_ARM_H_
#include "assembler.h" #include "assembler.h"
@ -164,23 +164,23 @@ enum Coprocessor {
// Condition field in instructions // Condition field in instructions
enum Condition { enum Condition {
eq = 0 << 28, eq = 0 << 28, // Z set equal.
ne = 1 << 28, ne = 1 << 28, // Z clear not equal.
cs = 2 << 28, cs = 2 << 28, // C set unsigned higher or same.
hs = 2 << 28, hs = 2 << 28, // C set unsigned higher or same.
cc = 3 << 28, cc = 3 << 28, // C clear unsigned lower.
lo = 3 << 28, lo = 3 << 28, // C clear unsigned lower.
mi = 4 << 28, mi = 4 << 28, // N set negative.
pl = 5 << 28, pl = 5 << 28, // N clear positive or zero.
vs = 6 << 28, vs = 6 << 28, // V set overflow.
vc = 7 << 28, vc = 7 << 28, // V clear no overflow.
hi = 8 << 28, hi = 8 << 28, // C set, Z clear unsigned higher.
ls = 9 << 28, ls = 9 << 28, // C clear or Z set unsigned lower or same.
ge = 10 << 28, ge = 10 << 28, // N == V greater or equal.
lt = 11 << 28, lt = 11 << 28, // N != V less than.
gt = 12 << 28, gt = 12 << 28, // Z clear, N == V greater than.
le = 13 << 28, le = 13 << 28, // Z set or N != V less then or equal
al = 14 << 28 al = 14 << 28 // always.
}; };
@ -786,4 +786,4 @@ class Assembler : public Malloced {
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_ASSEMBLER_ARM_H_ #endif // V8_ARM_ASSEMBLER_ARM_H_

63
deps/v8/src/builtins-arm.cc → deps/v8/src/arm/builtins-arm.cc

@ -34,7 +34,7 @@
namespace v8 { namespace internal { namespace v8 { namespace internal {
#define __ masm-> #define __ ACCESS_MASM(masm)
void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) { void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
@ -58,6 +58,16 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- sp[...]: constructor arguments // -- sp[...]: constructor arguments
// ----------------------------------- // -----------------------------------
Label non_function_call;
// Check that the function is not a smi.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &non_function_call);
// Check that the function is a JSFunction.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r2, Operand(JS_FUNCTION_TYPE));
__ b(ne, &non_function_call);
// Enter a construct frame. // Enter a construct frame.
__ EnterConstructFrame(); __ EnterConstructFrame();
@ -169,7 +179,17 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ LeaveConstructFrame(); __ LeaveConstructFrame();
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1)); __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize)); __ add(sp, sp, Operand(kPointerSize));
__ mov(pc, Operand(lr)); __ Jump(lr);
// r0: number of arguments
// r1: called object
__ bind(&non_function_call);
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
} }
@ -218,8 +238,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(r5, Operand(r4)); __ mov(r5, Operand(r4));
__ mov(r6, Operand(r4)); __ mov(r6, Operand(r4));
__ mov(r7, Operand(r4)); __ mov(r7, Operand(r4));
if (kR9Available == 1) if (kR9Available == 1) {
__ mov(r9, Operand(r4)); __ mov(r9, Operand(r4));
}
// Invoke the code and pass argc as r0. // Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3)); __ mov(r0, Operand(r3));
@ -234,7 +255,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Exit the JS frame and remove the parameters (except function), and return. // Exit the JS frame and remove the parameters (except function), and return.
// Respect ABI stack constraint. // Respect ABI stack constraint.
__ LeaveInternalFrame(); __ LeaveInternalFrame();
__ mov(pc, lr); __ Jump(lr);
// r0: result // r0: result
} }
@ -416,15 +437,35 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(r0); __ push(r0);
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_JS); __ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_JS);
// Eagerly check for stack-overflow before starting to push the arguments. Label no_preemption, retry_preemption;
// r0: number of arguments __ bind(&retry_preemption);
Label okay;
ExternalReference stack_guard_limit_address = ExternalReference stack_guard_limit_address =
ExternalReference::address_of_stack_guard_limit(); ExternalReference::address_of_stack_guard_limit();
__ mov(r2, Operand(stack_guard_limit_address)); __ mov(r2, Operand(stack_guard_limit_address));
__ ldr(r2, MemOperand(r2)); __ ldr(r2, MemOperand(r2));
__ cmp(sp, r2);
__ b(hi, &no_preemption);
// We have encountered a preemption or stack overflow already before we push
// the array contents. Save r0 which is the Smi-tagged length of the array.
__ push(r0);
// Runtime routines expect at least one argument, so give it a Smi.
__ mov(r0, Operand(Smi::FromInt(0)));
__ push(r0);
__ CallRuntime(Runtime::kStackGuard, 1);
// Since we returned, it wasn't a stack overflow. Restore r0 and try again.
__ pop(r0);
__ b(&retry_preemption);
__ bind(&no_preemption);
// Eagerly check for stack-overflow before starting to push the arguments.
// r0: number of arguments.
// r2: stack limit.
Label okay;
__ sub(r2, sp, r2); __ sub(r2, sp, r2);
__ sub(r2, r2, Operand(3 * kPointerSize)); // limit, index, receiver
__ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); __ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
__ b(hi, &okay); __ b(hi, &okay);
@ -523,7 +564,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Tear down the internal frame and remove function, receiver and args. // Tear down the internal frame and remove function, receiver and args.
__ LeaveInternalFrame(); __ LeaveInternalFrame();
__ add(sp, sp, Operand(3 * kPointerSize)); __ add(sp, sp, Operand(3 * kPointerSize));
__ mov(pc, lr); __ Jump(lr);
} }
@ -642,14 +683,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Exit frame and return. // Exit frame and return.
LeaveArgumentsAdaptorFrame(masm); LeaveArgumentsAdaptorFrame(masm);
__ mov(pc, lr); __ Jump(lr);
// ------------------------------------------- // -------------------------------------------
// Dont adapt arguments. // Dont adapt arguments.
// ------------------------------------------- // -------------------------------------------
__ bind(&dont_adapt_arguments); __ bind(&dont_adapt_arguments);
__ mov(pc, r3); __ Jump(r3);
} }

514
deps/v8/src/codegen-arm.cc → deps/v8/src/arm/codegen-arm.cc

@ -38,7 +38,8 @@
namespace v8 { namespace internal { namespace v8 { namespace internal {
#define __ masm_-> #define __ ACCESS_MASM(masm_)
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// CodeGenState implementation. // CodeGenState implementation.
@ -146,13 +147,13 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
frame_->EmitPush(r0); frame_->EmitPush(r0);
frame_->CallRuntime(Runtime::kNewContext, 1); // r0 holds the result frame_->CallRuntime(Runtime::kNewContext, 1); // r0 holds the result
if (kDebug) { #ifdef DEBUG
JumpTarget verified_true(this); JumpTarget verified_true(this);
__ cmp(r0, Operand(cp)); __ cmp(r0, Operand(cp));
verified_true.Branch(eq); verified_true.Branch(eq);
__ stop("NewContext: r0 is expected to be the same as cp"); __ stop("NewContext: r0 is expected to be the same as cp");
verified_true.Bind(); verified_true.Bind();
} #endif
// Update context local. // Update context local.
__ str(cp, frame_->Context()); __ str(cp, frame_->Context());
} }
@ -276,7 +277,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
frame_->Exit(); frame_->Exit();
__ add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize)); __ add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
__ mov(pc, lr); __ Jump(lr);
} }
// Code generation state must be reset. // Code generation state must be reset.
@ -653,37 +654,27 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target,
} }
class GetPropertyStub : public CodeStub {
public:
GetPropertyStub() { }
private:
Major MajorKey() { return GetProperty; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class SetPropertyStub : public CodeStub {
public:
SetPropertyStub() { }
private:
Major MajorKey() { return SetProperty; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class GenericBinaryOpStub : public CodeStub { class GenericBinaryOpStub : public CodeStub {
public: public:
explicit GenericBinaryOpStub(Token::Value op) : op_(op) { } GenericBinaryOpStub(Token::Value op,
OverwriteMode mode)
: op_(op), mode_(mode) { }
private: private:
Token::Value op_; Token::Value op_;
OverwriteMode mode_;
// Minor key encoding in 16 bits.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 14> {};
Major MajorKey() { return GenericBinaryOp; } Major MajorKey() { return GenericBinaryOp; }
int MinorKey() { return static_cast<int>(op_); } int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return OpBits::encode(op_)
| ModeBits::encode(mode_);
}
void Generate(MacroAssembler* masm); void Generate(MacroAssembler* masm);
const char* GetName() { const char* GetName() {
@ -708,7 +699,8 @@ class GenericBinaryOpStub : public CodeStub {
}; };
void CodeGenerator::GenericBinaryOperation(Token::Value op) { void CodeGenerator::GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode) {
VirtualFrame::SpilledScope spilled_scope(this); VirtualFrame::SpilledScope spilled_scope(this);
// sp[0] : y // sp[0] : y
// sp[1] : x // sp[1] : x
@ -727,7 +719,7 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op) {
case Token::SAR: { case Token::SAR: {
frame_->EmitPop(r0); // r0 : y frame_->EmitPop(r0); // r0 : y
frame_->EmitPop(r1); // r1 : x frame_->EmitPop(r1); // r1 : x
GenericBinaryOpStub stub(op); GenericBinaryOpStub stub(op, overwrite_mode);
frame_->CallStub(&stub, 0); frame_->CallStub(&stub, 0);
break; break;
} }
@ -767,11 +759,13 @@ class DeferredInlineSmiOperation: public DeferredCode {
DeferredInlineSmiOperation(CodeGenerator* generator, DeferredInlineSmiOperation(CodeGenerator* generator,
Token::Value op, Token::Value op,
int value, int value,
bool reversed) bool reversed,
OverwriteMode overwrite_mode)
: DeferredCode(generator), : DeferredCode(generator),
op_(op), op_(op),
value_(value), value_(value),
reversed_(reversed) { reversed_(reversed),
overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlinedSmiOperation"); set_comment("[ DeferredInlinedSmiOperation");
} }
@ -781,6 +775,7 @@ class DeferredInlineSmiOperation: public DeferredCode {
Token::Value op_; Token::Value op_;
int value_; int value_;
bool reversed_; bool reversed_;
OverwriteMode overwrite_mode_;
}; };
@ -844,7 +839,7 @@ void DeferredInlineSmiOperation::Generate() {
break; break;
} }
GenericBinaryOpStub igostub(op_); GenericBinaryOpStub igostub(op_, overwrite_mode_);
Result arg0 = generator()->allocator()->Allocate(r1); Result arg0 = generator()->allocator()->Allocate(r1);
ASSERT(arg0.is_valid()); ASSERT(arg0.is_valid());
Result arg1 = generator()->allocator()->Allocate(r0); Result arg1 = generator()->allocator()->Allocate(r0);
@ -856,7 +851,8 @@ void DeferredInlineSmiOperation::Generate() {
void CodeGenerator::SmiOperation(Token::Value op, void CodeGenerator::SmiOperation(Token::Value op,
Handle<Object> value, Handle<Object> value,
bool reversed) { bool reversed,
OverwriteMode mode) {
VirtualFrame::SpilledScope spilled_scope(this); VirtualFrame::SpilledScope spilled_scope(this);
// NOTE: This is an attempt to inline (a bit) more of the code for // NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one // some possible smi operations (like + and -) when (at least) one
@ -875,7 +871,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
switch (op) { switch (op) {
case Token::ADD: { case Token::ADD: {
DeferredCode* deferred = DeferredCode* deferred =
new DeferredInlineSmiOperation(this, op, int_value, reversed); new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
__ add(r0, r0, Operand(value), SetCC); __ add(r0, r0, Operand(value), SetCC);
deferred->enter()->Branch(vs); deferred->enter()->Branch(vs);
@ -887,7 +883,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
case Token::SUB: { case Token::SUB: {
DeferredCode* deferred = DeferredCode* deferred =
new DeferredInlineSmiOperation(this, op, int_value, reversed); new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
if (!reversed) { if (!reversed) {
__ sub(r0, r0, Operand(value), SetCC); __ sub(r0, r0, Operand(value), SetCC);
@ -905,7 +901,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
case Token::BIT_XOR: case Token::BIT_XOR:
case Token::BIT_AND: { case Token::BIT_AND: {
DeferredCode* deferred = DeferredCode* deferred =
new DeferredInlineSmiOperation(this, op, int_value, reversed); new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
__ tst(r0, Operand(kSmiTagMask)); __ tst(r0, Operand(kSmiTagMask));
deferred->enter()->Branch(ne); deferred->enter()->Branch(ne);
switch (op) { switch (op) {
@ -925,12 +921,12 @@ void CodeGenerator::SmiOperation(Token::Value op,
__ mov(ip, Operand(value)); __ mov(ip, Operand(value));
frame_->EmitPush(ip); frame_->EmitPush(ip);
frame_->EmitPush(r0); frame_->EmitPush(r0);
GenericBinaryOperation(op); GenericBinaryOperation(op, mode);
} else { } else {
int shift_value = int_value & 0x1f; // least significant 5 bits int shift_value = int_value & 0x1f; // least significant 5 bits
DeferredCode* deferred = DeferredCode* deferred =
new DeferredInlineSmiOperation(this, op, shift_value, false); new DeferredInlineSmiOperation(this, op, shift_value, false, mode);
__ tst(r0, Operand(kSmiTagMask)); __ tst(r0, Operand(kSmiTagMask));
deferred->enter()->Branch(ne); deferred->enter()->Branch(ne);
__ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags __ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
@ -982,7 +978,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
frame_->EmitPush(ip); frame_->EmitPush(ip);
frame_->EmitPush(r0); frame_->EmitPush(r0);
} }
GenericBinaryOperation(op); GenericBinaryOperation(op, mode);
break; break;
} }
@ -1427,13 +1423,13 @@ void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
} else { } else {
frame_->CallRuntime(Runtime::kPushContext, 1); frame_->CallRuntime(Runtime::kPushContext, 1);
} }
if (kDebug) { #ifdef DEBUG
JumpTarget verified_true(this); JumpTarget verified_true(this);
__ cmp(r0, Operand(cp)); __ cmp(r0, Operand(cp));
verified_true.Branch(eq); verified_true.Branch(eq);
__ stop("PushContext: r0 is expected to be the same as cp"); __ stop("PushContext: r0 is expected to be the same as cp");
verified_true.Bind(); verified_true.Bind();
} #endif
// Update context local. // Update context local.
__ str(cp, frame_->Context()); __ str(cp, frame_->Context());
ASSERT(frame_->height() == original_height); ASSERT(frame_->height() == original_height);
@ -1487,8 +1483,8 @@ void CodeGenerator::GenerateFastCaseSwitchJumpTable(
// Test for a Smi value in a HeapNumber. // Test for a Smi value in a HeapNumber.
__ tst(r0, Operand(kSmiTagMask)); __ tst(r0, Operand(kSmiTagMask));
is_smi.Branch(eq); is_smi.Branch(eq);
__ ldr(r1, MemOperand(r0, HeapObject::kMapOffset - kHeapObjectTag)); __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, MemOperand(r1, Map::kInstanceTypeOffset - kHeapObjectTag)); __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
__ cmp(r1, Operand(HEAP_NUMBER_TYPE)); __ cmp(r1, Operand(HEAP_NUMBER_TYPE));
default_target->Branch(ne); default_target->Branch(ne);
frame_->EmitPush(r0); frame_->EmitPush(r0);
@ -2339,7 +2335,9 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
VirtualFrame::SpilledScope spilled_scope(this); VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ DebuggerStatament"); Comment cmnt(masm_, "[ DebuggerStatament");
CodeForStatementPosition(node); CodeForStatementPosition(node);
#ifdef ENABLE_DEBUGGER_SUPPORT
frame_->CallRuntime(Runtime::kDebugBreak, 0); frame_->CallRuntime(Runtime::kDebugBreak, 0);
#endif
// Ignore the return value. // Ignore the return value.
ASSERT(frame_->height() == original_height); ASSERT(frame_->height() == original_height);
} }
@ -2523,7 +2521,9 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
if (s->is_eval_scope()) { if (s->is_eval_scope()) {
Label next, fast; Label next, fast;
if (!context.is(tmp)) __ mov(tmp, Operand(context)); if (!context.is(tmp)) {
__ mov(tmp, Operand(context));
}
__ bind(&next); __ bind(&next);
// Terminate at global context. // Terminate at global context.
__ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset)); __ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
@ -2934,15 +2934,24 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
LoadAndSpill(node->value()); LoadAndSpill(node->value());
} else { } else {
// +=, *= and similar binary assignments.
// Get the old value of the lhs.
target.GetValueAndSpill(NOT_INSIDE_TYPEOF); target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
Literal* literal = node->value()->AsLiteral(); Literal* literal = node->value()->AsLiteral();
bool overwrite =
(node->value()->AsBinaryOperation() != NULL &&
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
if (literal != NULL && literal->handle()->IsSmi()) { if (literal != NULL && literal->handle()->IsSmi()) {
SmiOperation(node->binary_op(), literal->handle(), false); SmiOperation(node->binary_op(),
literal->handle(),
false,
overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
frame_->EmitPush(r0); frame_->EmitPush(r0);
} else { } else {
LoadAndSpill(node->value()); LoadAndSpill(node->value());
GenericBinaryOperation(node->binary_op()); GenericBinaryOperation(node->binary_op(),
overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
frame_->EmitPush(r0); frame_->EmitPush(r0);
} }
} }
@ -3822,19 +3831,39 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// is a literal small integer. // is a literal small integer.
Literal* lliteral = node->left()->AsLiteral(); Literal* lliteral = node->left()->AsLiteral();
Literal* rliteral = node->right()->AsLiteral(); Literal* rliteral = node->right()->AsLiteral();
// NOTE: The code below assumes that the slow cases (calls to runtime)
// never return a constant/immutable object.
bool overwrite_left =
(node->left()->AsBinaryOperation() != NULL &&
node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
bool overwrite_right =
(node->right()->AsBinaryOperation() != NULL &&
node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
if (rliteral != NULL && rliteral->handle()->IsSmi()) { if (rliteral != NULL && rliteral->handle()->IsSmi()) {
LoadAndSpill(node->left()); LoadAndSpill(node->left());
SmiOperation(node->op(), rliteral->handle(), false); SmiOperation(node->op(),
rliteral->handle(),
false,
overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else if (lliteral != NULL && lliteral->handle()->IsSmi()) { } else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
LoadAndSpill(node->right()); LoadAndSpill(node->right());
SmiOperation(node->op(), lliteral->handle(), true); SmiOperation(node->op(),
lliteral->handle(),
true,
overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
} else { } else {
OverwriteMode overwrite_mode = NO_OVERWRITE;
if (overwrite_left) {
overwrite_mode = OVERWRITE_LEFT;
} else if (overwrite_right) {
overwrite_mode = OVERWRITE_RIGHT;
}
LoadAndSpill(node->left()); LoadAndSpill(node->left());
LoadAndSpill(node->right()); LoadAndSpill(node->right());
GenericBinaryOperation(node->op()); GenericBinaryOperation(node->op(), overwrite_mode);
} }
frame_->EmitPush(r0); frame_->EmitPush(r0);
} }
@ -4067,7 +4096,8 @@ bool CodeGenerator::HasValidEntryRegisters() { return true; }
#undef __ #undef __
#define __ masm-> #define __ ACCESS_MASM(masm)
Handle<String> Reference::GetName() { Handle<String> Reference::GetName() {
ASSERT(type_ == NAMED); ASSERT(type_ == NAMED);
@ -4305,167 +4335,80 @@ void Reference::SetValue(InitState init_state) {
} }
void GetPropertyStub::Generate(MacroAssembler* masm) { static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// sp[0]: key Label* not_smi,
// sp[1]: receiver const Builtins::JavaScript& builtin,
Label slow, fast; Token::Value operation,
// Get the key and receiver object from the stack. int swi_number,
__ ldm(ia, sp, r0.bit() | r1.bit()); OverwriteMode mode) {
// Check that the key is a smi. Label slow;
__ tst(r0, Operand(kSmiTagMask)); if (mode == NO_OVERWRITE) {
__ b(ne, &slow); __ bind(not_smi);
__ mov(r0, Operand(r0, ASR, kSmiTagSize)); }
// Check that the object isn't a smi.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string
// objects work as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r2, Operand(JS_OBJECT_TYPE));
__ b(lt, &slow);
// Get the elements array of the object.
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ cmp(r3, Operand(Factory::hash_table_map()));
__ b(eq, &slow);
// Check that the key (index) is within bounds.
__ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset));
__ cmp(r0, Operand(r3));
__ b(lo, &fast);
// Slow case: Push extra copies of the arguments (2).
__ bind(&slow); __ bind(&slow);
__ ldm(ia, sp, r0.bit() | r1.bit()); __ push(r1);
__ stm(db_w, sp, r0.bit() | r1.bit()); __ push(r0);
// Do tail-call to runtime routine. __ mov(r0, Operand(1)); // Set number of arguments.
__ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2); __ InvokeBuiltin(builtin, JUMP_JS); // Tail call.
// Fast case: Do the load.
__ bind(&fast);
__ add(r3, r1, Operand(Array::kHeaderSize - kHeapObjectTag));
__ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));
__ cmp(r0, Operand(Factory::the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ b(eq, &slow);
__ StubReturn(1);
}
void SetPropertyStub::Generate(MacroAssembler* masm) {
// r0 : value
// sp[0] : key
// sp[1] : receiver
Label slow, fast, array, extra, exit; // Could it be a double-double op? If we already have a place to put
// Get the key and the object from the stack. // the answer then we can do the op and skip the builtin and runtime call.
__ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver if (mode != NO_OVERWRITE) {
// Check that the key is a smi. __ bind(not_smi);
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &slow); // We can't handle a Smi-double combination yet.
__ tst(r1, Operand(kSmiTagMask)); __ tst(r1, Operand(kSmiTagMask));
__ b(eq, &slow); // We can't handle a Smi-double combination yet.
// Get map of r0 into r2.
__ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
// Get type of r0 into r3.
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r3, Operand(HEAP_NUMBER_TYPE));
__ b(ne, &slow); __ b(ne, &slow);
// Check that the object isn't a smi. // Get type of r1 into r3.
__ tst(r3, Operand(kSmiTagMask)); __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ b(eq, &slow); // Check they are both the same map (heap number map).
// Get the type of the object from its map. __ cmp(r2, r3);
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); __ b(ne, &slow);
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset)); // Both are doubles.
// Check if the object is a JS array or not. // Calling convention says that second double is in r2 and r3.
__ cmp(r2, Operand(JS_ARRAY_TYPE)); __ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ b(eq, &array); __ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
// Check that the object is some kind of JS object. __ push(lr);
__ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE)); if (mode == OVERWRITE_LEFT) {
__ b(lt, &slow); __ push(r1);
} else {
__ push(r0);
// Object case: Check key against length in the elements array. }
__ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset)); // Calling convention says that first double is in r0 and r1.
// Check that the object is in fast mode (not dictionary). __ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset)); __ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
__ cmp(r2, Operand(Factory::hash_table_map())); // Call C routine that may not cause GC or other trouble.
__ b(eq, &slow); __ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
// Untag the key (for checking against untagged length in the fixed array). #if !defined(__arm__)
__ mov(r1, Operand(r1, ASR, kSmiTagSize)); // Notify the simulator that we are calling an add routine in C.
// Compute address to store into and check array bounds. __ swi(swi_number);
__ add(r2, r3, Operand(Array::kHeaderSize - kHeapObjectTag)); #else
__ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2)); // Actually call the add routine written in C.
__ ldr(ip, FieldMemOperand(r3, Array::kLengthOffset)); __ Call(r5);
__ cmp(r1, Operand(ip)); #endif
__ b(lo, &fast); // Store answer in the overwritable heap number.
__ pop(r4);
#if !defined(__ARM_EABI__) && defined(__arm__)
// Slow case: Push extra copies of the arguments (3). // Double returned in fp coprocessor register 0 and 1, encoded as register
__ bind(&slow); // cr8. Offsets must be divisible by 4 for coprocessor so we need to
__ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object // substract the tag from r4.
__ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit()); __ sub(r5, r4, Operand(kHeapObjectTag));
// Do tail-call to runtime routine. __ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
__ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3); #else
// Double returned in fp coprocessor register 0 and 1.
__ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
// Extra capacity case: Check if there is extra capacity to __ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + kPointerSize));
// perform the store and update the length. Used for adding one #endif
// element to the array by writing to array[array.length]. __ mov(r0, Operand(r4));
// r0 == value, r1 == key, r2 == elements, r3 == object // And we are done.
__ bind(&extra); __ pop(pc);
__ b(ne, &slow); // do not leave holes in the array }
__ mov(r1, Operand(r1, ASR, kSmiTagSize)); // untag
__ ldr(ip, FieldMemOperand(r2, Array::kLengthOffset));
__ cmp(r1, Operand(ip));
__ b(hs, &slow);
__ mov(r1, Operand(r1, LSL, kSmiTagSize)); // restore tag
__ add(r1, r1, Operand(1 << kSmiTagSize)); // and increment
__ str(r1, FieldMemOperand(r3, JSArray::kLengthOffset));
__ mov(r3, Operand(r2));
// NOTE: Computing the address to store into must take the fact
// that the key has been incremented into account.
int displacement = Array::kHeaderSize - kHeapObjectTag -
((1 << kSmiTagSize) * 2);
__ add(r2, r2, Operand(displacement));
__ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
__ b(&fast);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode; if it is the
// length is always a smi.
// r0 == value, r3 == object
__ bind(&array);
__ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset));
__ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
__ cmp(r1, Operand(Factory::hash_table_map()));
__ b(eq, &slow);
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
__ ldr(r1, MemOperand(sp));
// r0 == value, r1 == key, r2 == elements, r3 == object.
__ ldr(ip, FieldMemOperand(r3, JSArray::kLengthOffset));
__ cmp(r1, Operand(ip));
__ b(hs, &extra);
__ mov(r3, Operand(r2));
__ add(r2, r2, Operand(Array::kHeaderSize - kHeapObjectTag));
__ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
// Fast case: Do the store.
// r0 == value, r2 == address to store into, r3 == elements
__ bind(&fast);
__ str(r0, MemOperand(r2));
// Skip write barrier if the written value is a smi.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &exit);
// Update write barrier for the elements array address.
__ sub(r1, r2, Operand(r3));
__ RecordWrite(r3, r1, r2);
__ bind(&exit);
__ StubReturn(1);
} }
@ -4474,89 +4417,84 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// r0 : y // r0 : y
// result : r0 // result : r0
// All ops need to know whether we are dealing with two Smis. Set up r2 to
// tell us that.
__ orr(r2, r1, Operand(r0)); // r2 = x | y;
switch (op_) { switch (op_) {
case Token::ADD: { case Token::ADD: {
Label slow, exit; Label not_smi;
// fast path // Fast path.
__ orr(r2, r1, Operand(r0)); // r2 = x | y; ASSERT(kSmiTag == 0); // Adjust code below.
__ add(r0, r1, Operand(r0), SetCC); // add y optimistically
// go slow-path in case of overflow
__ b(vs, &slow);
// go slow-path in case of non-smi operands
ASSERT(kSmiTag == 0); // adjust code below
__ tst(r2, Operand(kSmiTagMask)); __ tst(r2, Operand(kSmiTagMask));
__ b(eq, &exit); __ b(ne, &not_smi);
// slow path __ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
__ bind(&slow); // Return if no overflow.
__ sub(r0, r0, Operand(r1)); // revert optimistic add __ Ret(vc);
__ push(r1); __ sub(r0, r0, Operand(r1)); // Revert optimistic add.
__ push(r0);
__ mov(r0, Operand(1)); // set number of arguments HandleBinaryOpSlowCases(masm,
__ InvokeBuiltin(Builtins::ADD, JUMP_JS); &not_smi,
// done Builtins::ADD,
__ bind(&exit); Token::ADD,
assembler::arm::simulator_fp_add,
mode_);
break; break;
} }
case Token::SUB: { case Token::SUB: {
Label slow, exit; Label not_smi;
// fast path // Fast path.
__ orr(r2, r1, Operand(r0)); // r2 = x | y; ASSERT(kSmiTag == 0); // Adjust code below.
__ sub(r3, r1, Operand(r0), SetCC); // subtract y optimistically
// go slow-path in case of overflow
__ b(vs, &slow);
// go slow-path in case of non-smi operands
ASSERT(kSmiTag == 0); // adjust code below
__ tst(r2, Operand(kSmiTagMask)); __ tst(r2, Operand(kSmiTagMask));
__ mov(r0, Operand(r3), LeaveCC, eq); // conditionally set r0 to result __ b(ne, &not_smi);
__ b(eq, &exit); __ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
// slow path // Return if no overflow.
__ bind(&slow); __ Ret(vc);
__ push(r1); __ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
__ push(r0);
__ mov(r0, Operand(1)); // set number of arguments HandleBinaryOpSlowCases(masm,
__ InvokeBuiltin(Builtins::SUB, JUMP_JS); &not_smi,
// done Builtins::SUB,
__ bind(&exit); Token::SUB,
assembler::arm::simulator_fp_sub,
mode_);
break; break;
} }
case Token::MUL: { case Token::MUL: {
Label slow, exit; Label not_smi, slow;
// tag check
__ orr(r2, r1, Operand(r0)); // r2 = x | y;
ASSERT(kSmiTag == 0); // adjust code below ASSERT(kSmiTag == 0); // adjust code below
__ tst(r2, Operand(kSmiTagMask)); __ tst(r2, Operand(kSmiTagMask));
__ b(ne, &slow); __ b(ne, &not_smi);
// remove tag from one operand (but keep sign), so that result is smi // Remove tag from one operand (but keep sign), so that result is Smi.
__ mov(ip, Operand(r0, ASR, kSmiTagSize)); __ mov(ip, Operand(r0, ASR, kSmiTagSize));
// do multiplication // Do multiplication
__ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1 __ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1.
// go slow on overflows (overflow bit is not set) // Go slow on overflows (overflow bit is not set).
__ mov(ip, Operand(r3, ASR, 31)); __ mov(ip, Operand(r3, ASR, 31));
__ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical __ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical
__ b(ne, &slow); __ b(ne, &slow);
// go slow on zero result to handle -0 // Go slow on zero result to handle -0.
__ tst(r3, Operand(r3)); __ tst(r3, Operand(r3));
__ mov(r0, Operand(r3), LeaveCC, ne); __ mov(r0, Operand(r3), LeaveCC, ne);
__ b(ne, &exit); __ Ret(ne);
// slow case // Slow case.
__ bind(&slow); __ bind(&slow);
__ push(r1);
__ push(r0); HandleBinaryOpSlowCases(masm,
__ mov(r0, Operand(1)); // set number of arguments &not_smi,
__ InvokeBuiltin(Builtins::MUL, JUMP_JS); Builtins::MUL,
// done Token::MUL,
__ bind(&exit); assembler::arm::simulator_fp_mul,
mode_);
break; break;
} }
case Token::BIT_OR: case Token::BIT_OR:
case Token::BIT_AND: case Token::BIT_AND:
case Token::BIT_XOR: { case Token::BIT_XOR: {
Label slow, exit; Label slow;
// tag check
__ orr(r2, r1, Operand(r0)); // r2 = x | y;
ASSERT(kSmiTag == 0); // adjust code below ASSERT(kSmiTag == 0); // adjust code below
__ tst(r2, Operand(kSmiTagMask)); __ tst(r2, Operand(kSmiTagMask));
__ b(ne, &slow); __ b(ne, &slow);
@ -4566,7 +4504,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break; case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
default: UNREACHABLE(); default: UNREACHABLE();
} }
__ b(&exit); __ Ret();
__ bind(&slow); __ bind(&slow);
__ push(r1); // restore stack __ push(r1); // restore stack
__ push(r0); __ push(r0);
@ -4584,16 +4522,13 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
default: default:
UNREACHABLE(); UNREACHABLE();
} }
__ bind(&exit);
break; break;
} }
case Token::SHL: case Token::SHL:
case Token::SHR: case Token::SHR:
case Token::SAR: { case Token::SAR: {
Label slow, exit; Label slow;
// tag check
__ orr(r2, r1, Operand(r0)); // r2 = x | y;
ASSERT(kSmiTag == 0); // adjust code below ASSERT(kSmiTag == 0); // adjust code below
__ tst(r2, Operand(kSmiTagMask)); __ tst(r2, Operand(kSmiTagMask));
__ b(ne, &slow); __ b(ne, &slow);
@ -4633,7 +4568,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// tag result and store it in r0 // tag result and store it in r0
ASSERT(kSmiTag == 0); // adjust code below ASSERT(kSmiTag == 0); // adjust code below
__ mov(r0, Operand(r3, LSL, kSmiTagSize)); __ mov(r0, Operand(r3, LSL, kSmiTagSize));
__ b(&exit); __ Ret();
// slow case // slow case
__ bind(&slow); __ bind(&slow);
__ push(r1); // restore stack __ push(r1); // restore stack
@ -4645,13 +4580,13 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::SHL: __ InvokeBuiltin(Builtins::SHL, JUMP_JS); break; case Token::SHL: __ InvokeBuiltin(Builtins::SHL, JUMP_JS); break;
default: UNREACHABLE(); default: UNREACHABLE();
} }
__ bind(&exit);
break; break;
} }
default: UNREACHABLE(); default: UNREACHABLE();
} }
__ Ret(); // This code should be unreachable.
__ stop("Unreachable");
} }
@ -4661,7 +4596,9 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
__ ldr(ip, MemOperand(ip)); __ ldr(ip, MemOperand(ip));
__ cmp(sp, Operand(ip)); __ cmp(sp, Operand(ip));
__ b(hs, &within_limit); __ b(hs, &within_limit);
// Do tail-call to runtime routine. // Do tail-call to runtime routine. Runtime routines expect at least one
// argument, so give it a Smi.
__ mov(r0, Operand(Smi::FromInt(0)));
__ push(r0); __ push(r0);
__ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1); __ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1);
__ bind(&within_limit); __ bind(&within_limit);
@ -4721,7 +4658,11 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ mov(cp, Operand(0), LeaveCC, eq); __ mov(cp, Operand(0), LeaveCC, eq);
// Restore cp otherwise. // Restore cp otherwise.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
if (kDebug && FLAG_debug_code) __ mov(lr, Operand(pc)); #ifdef DEBUG
if (FLAG_debug_code) {
__ mov(lr, Operand(pc));
}
#endif
__ pop(pc); __ pop(pc);
} }
@ -4784,7 +4725,11 @@ void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
__ mov(cp, Operand(0), LeaveCC, eq); __ mov(cp, Operand(0), LeaveCC, eq);
// Restore cp otherwise. // Restore cp otherwise.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
if (kDebug && FLAG_debug_code) __ mov(lr, Operand(pc)); #ifdef DEBUG
if (FLAG_debug_code) {
__ mov(lr, Operand(pc));
}
#endif
__ pop(pc); __ pop(pc);
} }
@ -5043,9 +4988,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
} }
__ ldr(ip, MemOperand(ip)); // deref address __ ldr(ip, MemOperand(ip)); // deref address
// Branch and link to JSEntryTrampoline // Branch and link to JSEntryTrampoline. We don't use the double underscore
// macro for the add instruction because we don't want the coverage tool
// inserting instructions here after we read the pc.
__ mov(lr, Operand(pc)); __ mov(lr, Operand(pc));
__ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
// Unlink this frame from the handler chain. When reading the // Unlink this frame from the handler chain. When reading the
// address of the next handler, there is no need to use the address // address of the next handler, there is no need to use the address
@ -5057,6 +5004,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// No need to restore registers // No need to restore registers
__ add(sp, sp, Operand(StackHandlerConstants::kSize)); __ add(sp, sp, Operand(StackHandlerConstants::kSize));
__ bind(&exit); // r0 holds result __ bind(&exit); // r0 holds result
// Restore the top frame descriptors from the stack. // Restore the top frame descriptors from the stack.
__ pop(r3); __ pop(r3);
@ -5068,7 +5016,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Restore callee-saved registers and return. // Restore callee-saved registers and return.
#ifdef DEBUG #ifdef DEBUG
if (FLAG_debug_code) __ mov(lr, Operand(pc)); if (FLAG_debug_code) {
__ mov(lr, Operand(pc));
}
#endif #endif
__ ldm(ia_w, sp, kCalleeSaved | pc.bit()); __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
} }
@ -5084,13 +5034,13 @@ void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
// Nothing to do: The formal number of parameters has already been // Nothing to do: The formal number of parameters has already been
// passed in register r0 by calling function. Just return it. // passed in register r0 by calling function. Just return it.
__ mov(pc, lr); __ Jump(lr);
// Arguments adaptor case: Read the arguments length from the // Arguments adaptor case: Read the arguments length from the
// adaptor frame and return it. // adaptor frame and return it.
__ bind(&adaptor); __ bind(&adaptor);
__ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ mov(pc, lr); __ Jump(lr);
} }
@ -5122,7 +5072,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ sub(r3, r0, r1); __ sub(r3, r0, r1);
__ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(r0, MemOperand(r3, kDisplacement)); __ ldr(r0, MemOperand(r3, kDisplacement));
__ mov(pc, lr); __ Jump(lr);
// Arguments adaptor case: Check index against actual arguments // Arguments adaptor case: Check index against actual arguments
// limit found in the arguments adaptor frame. Use unsigned // limit found in the arguments adaptor frame. Use unsigned
@ -5136,7 +5086,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ sub(r3, r0, r1); __ sub(r3, r0, r1);
__ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); __ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(r0, MemOperand(r3, kDisplacement)); __ ldr(r0, MemOperand(r3, kDisplacement));
__ mov(pc, lr); __ Jump(lr);
// Slow-case: Handle non-smi or out-of-bounds access to arguments // Slow-case: Handle non-smi or out-of-bounds access to arguments
// by calling the runtime system. // by calling the runtime system.

28
deps/v8/src/codegen-arm.h → deps/v8/src/arm/codegen-arm.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_CODEGEN_ARM_H_ #ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_CODEGEN_ARM_H_ #define V8_ARM_CODEGEN_ARM_H_
namespace v8 { namespace internal { namespace v8 { namespace internal {
@ -35,9 +35,6 @@ class DeferredCode;
class RegisterAllocator; class RegisterAllocator;
class RegisterFile; class RegisterFile;
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
enum InitState { CONST_INIT, NOT_CONST_INIT }; enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF }; enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
@ -292,10 +289,13 @@ class CodeGenerator: public AstVisitor {
void ToBoolean(JumpTarget* true_target, JumpTarget* false_target); void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
void GenericBinaryOperation(Token::Value op); void GenericBinaryOperation(Token::Value op, OverwriteMode overwrite_mode);
void Comparison(Condition cc, bool strict = false); void Comparison(Condition cc, bool strict = false);
void SmiOperation(Token::Value op, Handle<Object> value, bool reversed); void SmiOperation(Token::Value op,
Handle<Object> value,
bool reversed,
OverwriteMode mode);
void CallWithArguments(ZoneList<Expression*>* arguments, int position); void CallWithArguments(ZoneList<Expression*>* arguments, int position);
@ -303,7 +303,17 @@ class CodeGenerator: public AstVisitor {
void Branch(bool if_true, JumpTarget* target); void Branch(bool if_true, JumpTarget* target);
void CheckStack(); void CheckStack();
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*);
const char* name;
};
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node); bool CheckForInlineRuntimeCall(CallRuntime* node);
static bool PatchInlineRuntimeEntry(Handle<String> name,
const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry);
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node); Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations); void ProcessDeclarations(ZoneList<Declaration*>* declarations);
@ -433,6 +443,8 @@ class CodeGenerator: public AstVisitor {
// in a spilled state. // in a spilled state.
bool in_spilled_code_; bool in_spilled_code_;
static InlineRuntimeLUT kInlineRuntimeLUT[];
friend class VirtualFrame; friend class VirtualFrame;
friend class JumpTarget; friend class JumpTarget;
friend class Reference; friend class Reference;
@ -443,4 +455,4 @@ class CodeGenerator: public AstVisitor {
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_CODEGEN_ARM_H_ #endif // V8_ARM_CODEGEN_ARM_H_

13
deps/v8/src/constants-arm.h → deps/v8/src/arm/constants-arm.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_CONSTANTS_ARM_H_ #ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_CONSTANTS_ARM_H_ #define V8_ARM_CONSTANTS_ARM_H_
namespace assembler { namespace arm { namespace assembler { namespace arm {
@ -106,7 +106,12 @@ enum SoftwareInterruptCodes {
call_rt_r5 = 0x10, call_rt_r5 = 0x10,
call_rt_r2 = 0x11, call_rt_r2 = 0x11,
// break point // break point
break_point = 0x20 break_point = 0x20,
// FP operations. These simulate calling into C for a moment to do fp ops.
// They should trash all caller-save registers.
simulator_fp_add = 0x21,
simulator_fp_sub = 0x22,
simulator_fp_mul = 0x23
}; };
@ -232,4 +237,4 @@ class Instr {
} } // namespace assembler::arm } } // namespace assembler::arm
#endif // V8_CONSTANTS_ARM_H_ #endif // V8_ARM_CONSTANTS_ARM_H_

0
deps/v8/src/cpu-arm.cc → deps/v8/src/arm/cpu-arm.cc

5
deps/v8/src/debug-arm.cc → deps/v8/src/arm/debug-arm.cc

@ -32,7 +32,7 @@
namespace v8 { namespace internal { namespace v8 { namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Currently debug break is not supported in frame exit code on ARM. // Currently debug break is not supported in frame exit code on ARM.
bool BreakLocationIterator::IsDebugBreakAtReturn() { bool BreakLocationIterator::IsDebugBreakAtReturn() {
return false; return false;
@ -58,7 +58,7 @@ bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
} }
#define __ masm-> #define __ ACCESS_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm, static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
@ -191,5 +191,6 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
#undef __ #undef __
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal } } // namespace v8::internal

9
deps/v8/src/disasm-arm.cc → deps/v8/src/arm/disasm-arm.cc

@ -261,6 +261,15 @@ void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes swi) {
case break_point: case break_point:
Print("break_point"); Print("break_point");
return; return;
case simulator_fp_add:
Print("simulator_fp_add");
return;
case simulator_fp_mul:
Print("simulator_fp_mul");
return;
case simulator_fp_sub:
Print("simulator_fp_sub");
return;
default: default:
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_, out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d", "%d",

2
deps/v8/src/frames-arm.cc → deps/v8/src/arm/frames-arm.cc

@ -28,7 +28,7 @@
#include "v8.h" #include "v8.h"
#include "frames-inl.h" #include "frames-inl.h"
#include "assembler-arm-inl.h" #include "arm/assembler-arm-inl.h"
namespace v8 { namespace internal { namespace v8 { namespace internal {

6
deps/v8/src/frames-arm.h → deps/v8/src/arm/frames-arm.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_FRAMES_ARM_H_ #ifndef V8_ARM_FRAMES_ARM_H_
#define V8_FRAMES_ARM_H_ #define V8_ARM_FRAMES_ARM_H_
namespace v8 { namespace internal { namespace v8 { namespace internal {
@ -376,4 +376,4 @@ inline Object* JavaScriptFrame::function_slot_object() const {
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_FRAMES_ARM_H_ #endif // V8_ARM_FRAMES_ARM_H_

51
deps/v8/src/ic-arm.cc → deps/v8/src/arm/ic-arm.cc

@ -39,7 +39,7 @@ namespace v8 { namespace internal {
// Static IC stub generators. // Static IC stub generators.
// //
#define __ masm-> #define __ ACCESS_MASM(masm)
// Helper function used from LoadIC/CallIC GenerateNormal. // Helper function used from LoadIC/CallIC GenerateNormal.
@ -96,7 +96,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Compute the masked index: (hash + i + i * i) & mask. // Compute the masked index: (hash + i + i * i) & mask.
__ ldr(t1, FieldMemOperand(r2, String::kLengthOffset)); __ ldr(t1, FieldMemOperand(r2, String::kLengthOffset));
__ mov(t1, Operand(t1, LSR, String::kHashShift)); __ mov(t1, Operand(t1, LSR, String::kHashShift));
if (i > 0) __ add(t1, t1, Operand(Dictionary::GetProbeOffset(i))); if (i > 0) {
__ add(t1, t1, Operand(Dictionary::GetProbeOffset(i)));
}
__ and_(t1, t1, Operand(r3)); __ and_(t1, t1, Operand(r3));
// Scale the index by multiplying by the element size. // Scale the index by multiplying by the element size.
@ -125,9 +127,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
} }
// Helper function used to check that a value is either not a function // Helper function used to check that a value is either not an object
// or is loaded if it is a function. // or is loaded if it is an object.
static void GenerateCheckNonFunctionOrLoaded(MacroAssembler* masm, static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm,
Label* miss, Label* miss,
Register value, Register value,
Register scratch) { Register scratch) {
@ -135,17 +137,10 @@ static void GenerateCheckNonFunctionOrLoaded(MacroAssembler* masm,
// Check if the value is a Smi. // Check if the value is a Smi.
__ tst(value, Operand(kSmiTagMask)); __ tst(value, Operand(kSmiTagMask));
__ b(eq, &done); __ b(eq, &done);
// Check if the value is a function. // Check if the object has been loaded.
__ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); __ ldr(scratch, FieldMemOperand(value, JSObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); __ ldrb(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ cmp(scratch, Operand(JS_FUNCTION_TYPE)); __ tst(scratch, Operand(1 << Map::kNeedsLoading));
__ b(ne, &done);
// Check if the function has been loaded.
__ ldr(scratch,
FieldMemOperand(value, JSFunction::kSharedFunctionInfoOffset));
__ ldr(scratch,
FieldMemOperand(scratch, SharedFunctionInfo::kLazyLoadDataOffset));
__ cmp(scratch, Operand(Factory::undefined_value()));
__ b(ne, miss); __ b(ne, miss);
__ bind(&done); __ bind(&done);
} }
@ -282,9 +277,9 @@ static void GenerateNormalHelper(MacroAssembler* masm,
__ b(ne, miss); __ b(ne, miss);
// Check that the function has been loaded. // Check that the function has been loaded.
__ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); __ ldr(r0, FieldMemOperand(r1, JSObject::kMapOffset));
__ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kLazyLoadDataOffset)); __ ldrb(r0, FieldMemOperand(r0, Map::kBitField2Offset));
__ cmp(r0, Operand(Factory::undefined_value())); __ tst(r0, Operand(1 << Map::kNeedsLoading));
__ b(ne, miss); __ b(ne, miss);
// Patch the receiver with the global proxy if necessary. // Patch the receiver with the global proxy if necessary.
@ -468,7 +463,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ bind(&probe); __ bind(&probe);
GenerateDictionaryLoad(masm, &miss, r1, r0); GenerateDictionaryLoad(masm, &miss, r1, r0);
GenerateCheckNonFunctionOrLoaded(masm, &miss, r0, r1); GenerateCheckNonObjectOrLoaded(masm, &miss, r0, r1);
__ Ret(); __ Ret();
// Global object access: Check access rights. // Global object access: Check access rights.
@ -502,10 +497,18 @@ void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
} }
// TODO(181): Implement map patching once loop nesting is tracked on // TODO(181): Implement map patching once loop nesting is tracked on the
// the ARM platform so we can generate inlined fast-case code for // ARM platform so we can generate inlined fast-case code loads in
// array indexing in loops. // loops.
void KeyedLoadIC::PatchInlinedMapCheck(Address address, Object* value) { } void LoadIC::ClearInlinedVersion(Address address) {}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
return false;
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
return false;
}
Object* KeyedLoadIC_Miss(Arguments args); Object* KeyedLoadIC_Miss(Arguments args);

2
deps/v8/src/jump-target-arm.cc → deps/v8/src/arm/jump-target-arm.cc

@ -35,7 +35,7 @@ namespace v8 { namespace internal {
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// JumpTarget implementation. // JumpTarget implementation.
#define __ masm_-> #define __ ACCESS_MASM(masm_)
void JumpTarget::DoJump() { void JumpTarget::DoJump() {
ASSERT(cgen_ != NULL); ASSERT(cgen_ != NULL);

21
deps/v8/src/macro-assembler-arm.cc → deps/v8/src/arm/macro-assembler-arm.cc

@ -168,11 +168,11 @@ void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
} }
void MacroAssembler::Ret() { void MacroAssembler::Ret(Condition cond) {
#if USE_BX #if USE_BX
bx(lr); bx(lr, cond);
#else #else
mov(pc, Operand(lr)); mov(pc, Operand(lr), LeaveCC, cond);
#endif #endif
} }
@ -320,16 +320,19 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
add(r6, fp, Operand(r4, LSL, kPointerSizeLog2)); add(r6, fp, Operand(r4, LSL, kPointerSizeLog2));
add(r6, r6, Operand(ExitFrameConstants::kPPDisplacement - kPointerSize)); add(r6, r6, Operand(ExitFrameConstants::kPPDisplacement - kPointerSize));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory // Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points. // location. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) { if (type == StackFrame::EXIT_DEBUG) {
// Use sp as base to push. // Use sp as base to push.
CopyRegistersFromMemoryToStack(sp, kJSCallerSaved); CopyRegistersFromMemoryToStack(sp, kJSCallerSaved);
} }
#endif
} }
void MacroAssembler::LeaveExitFrame(StackFrame::Type type) { void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from // Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points. // the stack. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) { if (type == StackFrame::EXIT_DEBUG) {
@ -339,6 +342,7 @@ void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
add(r3, fp, Operand(kOffset)); add(r3, fp, Operand(kOffset));
CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved); CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved);
} }
#endif
// Clear top frame. // Clear top frame.
mov(r3, Operand(0)); mov(r3, Operand(0));
@ -348,9 +352,9 @@ void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
// Restore current context from top and clear it in debug mode. // Restore current context from top and clear it in debug mode.
mov(ip, Operand(ExternalReference(Top::k_context_address))); mov(ip, Operand(ExternalReference(Top::k_context_address)));
ldr(cp, MemOperand(ip)); ldr(cp, MemOperand(ip));
if (kDebug) { #ifdef DEBUG
str(r3, MemOperand(ip)); str(r3, MemOperand(ip));
} #endif
// Pop the arguments, restore registers, and return. // Pop the arguments, restore registers, and return.
mov(sp, Operand(fp)); // respect ABI stack constraint mov(sp, Operand(fp)); // respect ABI stack constraint
@ -491,6 +495,7 @@ void MacroAssembler::InvokeFunction(Register fun,
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::SaveRegistersToMemory(RegList regs) { void MacroAssembler::SaveRegistersToMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0); ASSERT((regs & ~kJSCallerSaved) == 0);
// Copy the content of registers to memory location. // Copy the content of registers to memory location.
@ -548,7 +553,7 @@ void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
} }
} }
} }
#endif
void MacroAssembler::PushTryHandler(CodeLocation try_location, void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) { HandlerType type) {
@ -674,10 +679,10 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Load current lexical context from the stack frame. // Load current lexical context from the stack frame.
ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
// In debug mode, make sure the lexical context is set. // In debug mode, make sure the lexical context is set.
if (kDebug) { #ifdef DEBUG
cmp(scratch, Operand(0)); cmp(scratch, Operand(0));
Check(ne, "we should not have an empty lexical context"); Check(ne, "we should not have an empty lexical context");
} #endif
// Load the global context of the current context. // Load the global context of the current context.
int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize; int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;

20
deps/v8/src/macro-assembler-arm.h → deps/v8/src/arm/macro-assembler-arm.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MACRO_ASSEMBLER_ARM_H_ #ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
#define V8_MACRO_ASSEMBLER_ARM_H_ #define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "assembler.h" #include "assembler.h"
@ -86,7 +86,7 @@ class MacroAssembler: public Assembler {
void Call(Register target, Condition cond = al); void Call(Register target, Condition cond = al);
void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al); void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al); void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Ret(); void Ret(Condition cond = al);
// Jumps to the label at the index given by the Smi in "index". // Jumps to the label at the index given by the Smi in "index".
void SmiJumpTable(Register index, Vector<Label*> targets); void SmiJumpTable(Register index, Vector<Label*> targets);
@ -138,6 +138,7 @@ class MacroAssembler: public Assembler {
InvokeFlag flag); InvokeFlag flag);
#ifdef ENABLE_DEBUGGER_SUPPORT
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Debugger Support // Debugger Support
@ -147,7 +148,7 @@ class MacroAssembler: public Assembler {
void CopyRegistersFromStackToMemory(Register base, void CopyRegistersFromStackToMemory(Register base,
Register scratch, Register scratch,
RegList regs); RegList regs);
#endif
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// Exception handling // Exception handling
@ -297,7 +298,16 @@ static inline MemOperand FieldMemOperand(Register object, int offset) {
} }
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
#else
#define ACCESS_MASM(masm) masm->
#endif
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_MACRO_ASSEMBLER_ARM_H_ #endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_

2
deps/v8/src/regexp-macro-assembler-arm.cc → deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -28,7 +28,7 @@
#include "v8.h" #include "v8.h"
#include "ast.h" #include "ast.h"
#include "regexp-macro-assembler.h" #include "regexp-macro-assembler.h"
#include "regexp-macro-assembler-arm.h" #include "arm/regexp-macro-assembler-arm.h"
namespace v8 { namespace internal { namespace v8 { namespace internal {

6
deps/v8/src/regexp-macro-assembler-arm.h → deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef REGEXP_MACRO_ASSEMBLER_ARM_H_ #ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
#define REGEXP_MACRO_ASSEMBLER_ARM_H_ #define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
namespace v8 { namespace internal { namespace v8 { namespace internal {
@ -38,4 +38,4 @@ class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
}} // namespace v8::internal }} // namespace v8::internal
#endif /* REGEXP_MACRO_ASSEMBLER_ARM_H_ */ #endif // V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_

8
deps/v8/src/register-allocator-arm.cc → deps/v8/src/arm/register-allocator-arm.cc

@ -66,6 +66,14 @@ void RegisterAllocator::UnuseReserved(RegisterFile* register_file) {
} }
bool RegisterAllocator::IsReserved(int reg_code) {
return (reg_code == sp.code())
|| (reg_code == fp.code())
|| (reg_code == cp.code())
|| (reg_code == pc.code());
}
void RegisterAllocator::Initialize() { void RegisterAllocator::Initialize() {
Reset(); Reset();
// The following registers are live on function entry, saved in the // The following registers are live on function entry, saved in the

95
deps/v8/src/simulator-arm.cc → deps/v8/src/arm/simulator-arm.cc

@ -30,8 +30,8 @@
#include "v8.h" #include "v8.h"
#include "disasm.h" #include "disasm.h"
#include "constants-arm.h" #include "arm/constants-arm.h"
#include "simulator-arm.h" #include "arm/simulator-arm.h"
#if !defined(__arm__) #if !defined(__arm__)
@ -90,12 +90,44 @@ Debugger::~Debugger() {
} }
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
static void InitializeCoverage() {
char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
if (file_name != NULL) {
coverage_log = fopen(file_name, "aw+");
}
}
void Debugger::Stop(Instr* instr) {
char* str = reinterpret_cast<char*>(instr->InstructionBits() & 0x0fffffff);
if (strlen(str) > 0) {
if (coverage_log != NULL) {
fprintf(coverage_log, "%s\n", str);
fflush(coverage_log);
}
instr->SetInstructionBits(0xe1a00000); // Overwrite with nop.
}
sim_->set_pc(sim_->get_pc() + Instr::kInstrSize);
}
#else // ndef GENERATED_CODE_COVERAGE
static void InitializeCoverage() {
}
void Debugger::Stop(Instr* instr) { void Debugger::Stop(Instr* instr) {
const char* str = (const char*)(instr->InstructionBits() & 0x0fffffff); const char* str = (const char*)(instr->InstructionBits() & 0x0fffffff);
PrintF("Simulator hit %s\n", str); PrintF("Simulator hit %s\n", str);
sim_->set_pc(sim_->get_pc() + Instr::kInstrSize); sim_->set_pc(sim_->get_pc() + Instr::kInstrSize);
Debug(); Debug();
} }
#endif
static const char* reg_names[] = { "r0", "r1", "r2", "r3", static const char* reg_names[] = { "r0", "r1", "r2", "r3",
@ -375,6 +407,7 @@ Simulator::Simulator() {
// access violation if the simulator ever tries to execute it. // access violation if the simulator ever tries to execute it.
registers_[pc] = bad_lr; registers_[pc] = bad_lr;
registers_[lr] = bad_lr; registers_[lr] = bad_lr;
InitializeCoverage();
} }
@ -427,6 +460,37 @@ int32_t Simulator::get_pc() const {
} }
// For use in calls that take two double values, constructed from r0, r1, r2
// and r3.
void Simulator::GetFpArgs(double* x, double* y) {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[2 * sizeof(registers_[0])];
// Registers 0 and 1 -> x.
memcpy(buffer, registers_, sizeof(buffer));
memcpy(x, buffer, sizeof(buffer));
// Registers 2 and 3 -> y.
memcpy(buffer, registers_ + 2, sizeof(buffer));
memcpy(y, buffer, sizeof(buffer));
}
void Simulator::SetFpResult(const double& result) {
char buffer[2 * sizeof(registers_[0])];
memcpy(buffer, &result, sizeof(buffer));
// result -> registers 0 and 1.
memcpy(registers_, buffer, sizeof(buffer));
}
void Simulator::TrashCallerSaveRegisters() {
// We don't trash the registers with the return value.
registers_[2] = 0x50Bad4U;
registers_[3] = 0x50Bad4U;
registers_[12] = 0x50Bad4U;
}
// The ARM cannot do unaligned reads and writes. On some ARM platforms an // The ARM cannot do unaligned reads and writes. On some ARM platforms an
// interrupt is caused. On others it does a funky rotation thing. For now we // interrupt is caused. On others it does a funky rotation thing. For now we
// simply disallow unaligned reads, but at some point we may want to move to // simply disallow unaligned reads, but at some point we may want to move to
@ -862,7 +926,8 @@ typedef int64_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1);
// Software interrupt instructions are used by the simulator to call into the // Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime. // C-based V8 runtime.
void Simulator::SoftwareInterrupt(Instr* instr) { void Simulator::SoftwareInterrupt(Instr* instr) {
switch (instr->SwiField()) { int swi = instr->SwiField();
switch (swi) {
case call_rt_r5: { case call_rt_r5: {
SimulatorRuntimeCall target = SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(get_register(r5)); reinterpret_cast<SimulatorRuntimeCall>(get_register(r5));
@ -894,6 +959,30 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
dbg.Debug(); dbg.Debug();
break; break;
} }
{
double x, y, z;
case simulator_fp_add:
GetFpArgs(&x, &y);
z = x + y;
SetFpResult(z);
TrashCallerSaveRegisters();
set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
break;
case simulator_fp_sub:
GetFpArgs(&x, &y);
z = x - y;
SetFpResult(z);
TrashCallerSaveRegisters();
set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
break;
case simulator_fp_mul:
GetFpArgs(&x, &y);
z = x * y;
SetFpResult(z);
TrashCallerSaveRegisters();
set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
break;
}
default: { default: {
UNREACHABLE(); UNREACHABLE();
break; break;

14
deps/v8/src/simulator-arm.h → deps/v8/src/arm/simulator-arm.h

@ -33,14 +33,14 @@
// which will start execution in the Simulator or forwards to the real entry // which will start execution in the Simulator or forwards to the real entry
// on a ARM HW platform. // on a ARM HW platform.
#ifndef V8_SIMULATOR_ARM_H_ #ifndef V8_ARM_SIMULATOR_ARM_H_
#define V8_SIMULATOR_ARM_H_ #define V8_ARM_SIMULATOR_ARM_H_
#if defined(__arm__) #if defined(__arm__)
// When running without a simulator we call the entry directly. // When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \ #define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4) reinterpret_cast<Object*>(entry(p0, p1, p2, p3, p4))
// Calculated the stack limit beyond which we will throw stack overflow errors. // Calculated the stack limit beyond which we will throw stack overflow errors.
// This macro must be called from a C++ method. It relies on being able to take // This macro must be called from a C++ method. It relies on being able to take
@ -174,6 +174,12 @@ class Simulator {
// Executes one instruction. // Executes one instruction.
void InstructionDecode(Instr* instr); void InstructionDecode(Instr* instr);
// For use in calls that take two double values, constructed from r0, r1, r2
// and r3.
void GetFpArgs(double* x, double* y);
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
// architecture state // architecture state
int32_t registers_[16]; int32_t registers_[16];
bool n_flag_; bool n_flag_;
@ -195,4 +201,4 @@ class Simulator {
#endif // defined(__arm__) #endif // defined(__arm__)
#endif // V8_SIMULATOR_ARM_H_ #endif // V8_ARM_SIMULATOR_ARM_H_

15
deps/v8/src/stub-cache-arm.cc → deps/v8/src/arm/stub-cache-arm.cc

@ -33,7 +33,7 @@
namespace v8 { namespace internal { namespace v8 { namespace internal {
#define __ masm-> #define __ ACCESS_MASM(masm)
static void ProbeTable(MacroAssembler* masm, static void ProbeTable(MacroAssembler* masm,
@ -183,7 +183,7 @@ void StubCompiler::GenerateLoadField(MacroAssembler* masm,
// Check that the maps haven't changed. // Check that the maps haven't changed.
Register reg = Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label); masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
GenerateFastPropertyLoad(masm, r0, reg, holder, index); GenerateFastPropertyLoad(masm, r0, reg, holder, index);
__ Ret(); __ Ret();
} }
@ -203,7 +203,7 @@ void StubCompiler::GenerateLoadConstant(MacroAssembler* masm,
// Check that the maps haven't changed. // Check that the maps haven't changed.
Register reg = Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label); masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Return the constant value. // Return the constant value.
__ mov(r0, Operand(Handle<Object>(value))); __ mov(r0, Operand(Handle<Object>(value)));
@ -226,7 +226,7 @@ void StubCompiler::GenerateLoadCallback(MacroAssembler* masm,
// Check that the maps haven't changed. // Check that the maps haven't changed.
Register reg = Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label); masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Push the arguments on the JS stack of the caller. // Push the arguments on the JS stack of the caller.
__ push(receiver); // receiver __ push(receiver); // receiver
@ -256,7 +256,7 @@ void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
// Check that the maps haven't changed. // Check that the maps haven't changed.
Register reg = Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label); masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Push the arguments on the JS stack of the caller. // Push the arguments on the JS stack of the caller.
__ push(receiver); // receiver __ push(receiver); // receiver
@ -456,8 +456,7 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
#undef __ #undef __
#define __ ACCESS_MASM(masm())
#define __ masm()->
Object* StubCompiler::CompileLazyCompile(Code::Flags flags) { Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
@ -511,7 +510,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
// Do the right check and compute the holder register. // Do the right check and compute the holder register.
Register reg = Register reg =
__ CheckMaps(JSObject::cast(object), r0, holder, r3, r2, &miss); masm()->CheckMaps(JSObject::cast(object), r0, holder, r3, r2, &miss);
GenerateFastPropertyLoad(masm(), r1, reg, holder, index); GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
// Check that the function really is a function. // Check that the function really is a function.

13
deps/v8/src/virtual-frame-arm.cc → deps/v8/src/arm/virtual-frame-arm.cc

@ -36,7 +36,8 @@ namespace v8 { namespace internal {
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// VirtualFrame implementation. // VirtualFrame implementation.
#define __ masm_-> #define __ ACCESS_MASM(masm_)
// On entry to a function, the virtual frame already contains the // On entry to a function, the virtual frame already contains the
// receiver and the parameters. All initial frame elements are in // receiver and the parameters. All initial frame elements are in
@ -70,6 +71,16 @@ void VirtualFrame::SyncElementByPushing(int index) {
} }
void VirtualFrame::SyncRange(int begin, int end) {
// All elements are in memory on ARM (ie, synced).
#ifdef DEBUG
for (int i = begin; i <= end; i++) {
ASSERT(elements_[i].is_synced());
}
#endif
}
void VirtualFrame::MergeTo(VirtualFrame* expected) { void VirtualFrame::MergeTo(VirtualFrame* expected) {
Comment cmnt(masm_, "[ Merge frame"); Comment cmnt(masm_, "[ Merge frame");
// We should always be merging the code generator's current frame to an // We should always be merging the code generator's current frame to an

6
deps/v8/src/virtual-frame-arm.h → deps/v8/src/arm/virtual-frame-arm.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_VIRTUAL_FRAME_ARM_H_ #ifndef V8_ARM_VIRTUAL_FRAME_ARM_H_
#define V8_VIRTUAL_FRAME_ARM_H_ #define V8_ARM_VIRTUAL_FRAME_ARM_H_
#include "register-allocator.h" #include "register-allocator.h"
@ -477,4 +477,4 @@ class VirtualFrame : public Malloced {
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_VIRTUAL_FRAME_ARM_H_ #endif // V8_ARM_VIRTUAL_FRAME_ARM_H_

165
deps/v8/src/array.js

@ -709,32 +709,91 @@ function ArraySort(comparefn) {
QuickSort(a, high_start, to); QuickSort(a, high_start, to);
} }
var old_length = ToUint32(this.length); // Copies elements in the range 0..length from obj's prototype chain
if (old_length < 2) return this; // to obj itself, if obj has holes. Returns one more than the maximal index
// of a prototype property.
function CopyFromPrototype(obj, length) {
var max = 0;
for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
var indices = %GetArrayKeys(proto, length);
if (indices.length > 0) {
if (indices[0] == -1) {
// It's an interval.
var proto_length = indices[1];
for (var i = 0; i < proto_length; i++) {
if (!obj.hasOwnProperty(i) && proto.hasOwnProperty(i)) {
obj[i] = proto[i];
if (i >= max) { max = i + 1; }
}
}
} else {
for (var i = 0; i < indices.length; i++) {
var index = indices[i];
if (!IS_UNDEFINED(index) &&
!obj.hasOwnProperty(index) && proto.hasOwnProperty(index)) {
obj[index] = proto[index];
if (index >= max) { max = index + 1; }
}
}
}
}
}
return max;
}
%RemoveArrayHoles(this); // Set a value of "undefined" on all indices in the range from..to
// where a prototype of obj has an element. I.e., shadow all prototype
// elements in that range.
function ShadowPrototypeElements(obj, from, to) {
for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
var indices = %GetArrayKeys(proto, to);
if (indices.length > 0) {
if (indices[0] == -1) {
// It's an interval.
var proto_length = indices[1];
for (var i = from; i < proto_length; i++) {
if (proto.hasOwnProperty(i)) {
obj[i] = void 0;
}
}
} else {
for (var i = 0; i < indices.length; i++) {
var index = indices[i];
if (!IS_UNDEFINED(index) && from <= index &&
proto.hasOwnProperty(index)) {
obj[index] = void 0;
}
}
}
}
}
}
var length = ToUint32(this.length); var length = ToUint32(this.length);
if (length < 2) return this;
// Move undefined elements to the end of the array. var is_array = IS_ARRAY(this);
for (var i = 0; i < length; ) { var max_prototype_element;
if (IS_UNDEFINED(this[i])) { if (!is_array) {
length--; // For compatibility with JSC, we also sort elements inherited from
this[i] = this[length]; // the prototype chain on non-Array objects.
this[length] = void 0; // We do this by copying them to this object and sorting only
} else { // local elements. This is not very efficient, but sorting with
i++; // inherited elements happens very, very rarely, if at all.
} // The specification allows "implementation dependent" behavior
// if an element on the prototype chain has an element that
// might interact with sorting.
max_prototype_element = CopyFromPrototype(this, length);
} }
QuickSort(this, 0, length); var num_non_undefined = %RemoveArrayHoles(this, length);
// We only changed the length of the this object (in QuickSort(this, 0, num_non_undefined);
// RemoveArrayHoles) if it was an array. We are not allowed to set
// the length of the this object if it is not an array because this if (!is_array && (num_non_undefined + 1 < max_prototype_element)) {
// might introduce a new length property. // For compatibility with JSC, we shadow any elements in the prototype
if (IS_ARRAY(this)) { // chain that has become exposed by sort moving a hole to its position.
this.length = old_length; ShadowPrototypeElements(this, num_non_undefined, max_prototype_element);
} }
return this; return this;
@ -879,6 +938,62 @@ function ArrayLastIndexOf(element, index) {
} }
function ArrayReduce(callback, current) {
if (!IS_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = this.length;
var i = 0;
find_initial: if (%_ArgumentsLength() < 2) {
for (; i < length; i++) {
current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
i++;
break find_initial;
}
}
throw MakeTypeError('reduce_no_initial', []);
}
for (; i < length; i++) {
var element = this[i];
if (!IS_UNDEFINED(element) || i in this) {
current = callback.call(null, current, element, i, this);
}
}
return current;
}
function ArrayReduceRight(callback, current) {
if (!IS_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
}
var i = this.length - 1;
find_initial: if (%_ArgumentsLength() < 2) {
for (; i >= 0; i--) {
current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
i--;
break find_initial;
}
}
throw MakeTypeError('reduce_no_initial', []);
}
for (; i >= 0; i--) {
var element = this[i];
if (!IS_UNDEFINED(element) || i in this) {
current = callback.call(null, current, element, i, this);
}
}
return current;
}
// ------------------------------------------------------------------- // -------------------------------------------------------------------
@ -890,7 +1005,6 @@ function UpdateFunctionLengths(lengths) {
// ------------------------------------------------------------------- // -------------------------------------------------------------------
function SetupArray() { function SetupArray() {
// Setup non-enumerable constructor property on the Array.prototype // Setup non-enumerable constructor property on the Array.prototype
// object. // object.
@ -898,7 +1012,7 @@ function SetupArray() {
// Setup non-enumerable functions of the Array.prototype object and // Setup non-enumerable functions of the Array.prototype object and
// set their names. // set their names.
InstallFunctions($Array.prototype, DONT_ENUM, $Array( InstallFunctionsOnHiddenPrototype($Array.prototype, DONT_ENUM, $Array(
"toString", ArrayToString, "toString", ArrayToString,
"toLocaleString", ArrayToLocaleString, "toLocaleString", ArrayToLocaleString,
"join", ArrayJoin, "join", ArrayJoin,
@ -917,8 +1031,9 @@ function SetupArray() {
"every", ArrayEvery, "every", ArrayEvery,
"map", ArrayMap, "map", ArrayMap,
"indexOf", ArrayIndexOf, "indexOf", ArrayIndexOf,
"lastIndexOf", ArrayLastIndexOf "lastIndexOf", ArrayLastIndexOf,
)); "reduce", ArrayReduce,
"reduceRight", ArrayReduceRight));
// Manipulate the length of some of the functions to meet // Manipulate the length of some of the functions to meet
// expectations set by ECMA-262 or Mozilla. // expectations set by ECMA-262 or Mozilla.
@ -930,7 +1045,9 @@ function SetupArray() {
ArrayMap: 1, ArrayMap: 1,
ArrayIndexOf: 1, ArrayIndexOf: 1,
ArrayLastIndexOf: 1, ArrayLastIndexOf: 1,
ArrayPush: 1 ArrayPush: 1,
ArrayReduce: 1,
ArrayReduceRight: 1
}); });
} }

56
deps/v8/src/assembler.cc

@ -521,10 +521,10 @@ ExternalReference::ExternalReference(Runtime::Function* f)
ExternalReference::ExternalReference(const IC_Utility& ic_utility) ExternalReference::ExternalReference(const IC_Utility& ic_utility)
: address_(ic_utility.address()) {} : address_(ic_utility.address()) {}
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference::ExternalReference(const Debug_Address& debug_address) ExternalReference::ExternalReference(const Debug_Address& debug_address)
: address_(debug_address.address()) {} : address_(debug_address.address()) {}
#endif
ExternalReference::ExternalReference(StatsCounter* counter) ExternalReference::ExternalReference(StatsCounter* counter)
: address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {} : address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
@ -557,29 +557,71 @@ ExternalReference ExternalReference::address_of_regexp_stack_limit() {
} }
ExternalReference ExternalReference::debug_break() {
return ExternalReference(FUNCTION_ADDR(Debug::Break));
}
ExternalReference ExternalReference::new_space_start() { ExternalReference ExternalReference::new_space_start() {
return ExternalReference(Heap::NewSpaceStart()); return ExternalReference(Heap::NewSpaceStart());
} }
ExternalReference ExternalReference::new_space_allocation_top_address() { ExternalReference ExternalReference::new_space_allocation_top_address() {
return ExternalReference(Heap::NewSpaceAllocationTopAddress()); return ExternalReference(Heap::NewSpaceAllocationTopAddress());
} }
ExternalReference ExternalReference::heap_always_allocate_scope_depth() { ExternalReference ExternalReference::heap_always_allocate_scope_depth() {
return ExternalReference(Heap::always_allocate_scope_depth_address()); return ExternalReference(Heap::always_allocate_scope_depth_address());
} }
ExternalReference ExternalReference::new_space_allocation_limit_address() { ExternalReference ExternalReference::new_space_allocation_limit_address() {
return ExternalReference(Heap::NewSpaceAllocationLimitAddress()); return ExternalReference(Heap::NewSpaceAllocationLimitAddress());
} }
static double add_two_doubles(double x, double y) {
return x + y;
}
static double sub_two_doubles(double x, double y) {
return x - y;
}
static double mul_two_doubles(double x, double y) {
return x * y;
}
ExternalReference ExternalReference::double_fp_operation(
Token::Value operation) {
typedef double BinaryFPOperation(double x, double y);
BinaryFPOperation* function = NULL;
switch (operation) {
case Token::ADD:
function = &add_two_doubles;
break;
case Token::SUB:
function = &sub_two_doubles;
break;
case Token::MUL:
function = &mul_two_doubles;
break;
default:
UNREACHABLE();
}
return ExternalReference(FUNCTION_ADDR(function));
}
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference ExternalReference::debug_break() {
return ExternalReference(FUNCTION_ADDR(Debug::Break));
}
ExternalReference ExternalReference::debug_step_in_fp_address() { ExternalReference ExternalReference::debug_step_in_fp_address() {
return ExternalReference(Debug::step_in_fp_addr()); return ExternalReference(Debug::step_in_fp_addr());
} }
#endif
} } // namespace v8::internal } } // namespace v8::internal

37
deps/v8/src/assembler.h

@ -38,6 +38,7 @@
#include "runtime.h" #include "runtime.h"
#include "top.h" #include "top.h"
#include "zone-inl.h" #include "zone-inl.h"
#include "token.h"
namespace v8 { namespace internal { namespace v8 { namespace internal {
@ -340,29 +341,15 @@ class RelocIterator: public Malloced {
}; };
// A stack-allocated code region logs a name for the code generated
// while the region is in effect. This information is used by the
// profiler to categorize ticks within generated code.
class CodeRegion BASE_EMBEDDED {
public:
inline CodeRegion(Assembler* assm, const char *name) : assm_(assm) {
LOG(BeginCodeRegionEvent(this, assm, name));
}
inline ~CodeRegion() {
LOG(EndCodeRegionEvent(this, assm_));
}
private:
Assembler* assm_;
};
//------------------------------------------------------------------------------ //------------------------------------------------------------------------------
// External function // External function
//---------------------------------------------------------------------------- //----------------------------------------------------------------------------
class IC_Utility; class IC_Utility;
class Debug_Address;
class SCTableReference; class SCTableReference;
#ifdef ENABLE_DEBUGGER_SUPPORT
class Debug_Address;
#endif
// An ExternalReference represents a C++ address called from the generated // An ExternalReference represents a C++ address called from the generated
// code. All references to C++ functions and must be encapsulated in an // code. All references to C++ functions and must be encapsulated in an
@ -380,7 +367,9 @@ class ExternalReference BASE_EMBEDDED {
explicit ExternalReference(const IC_Utility& ic_utility); explicit ExternalReference(const IC_Utility& ic_utility);
#ifdef ENABLE_DEBUGGER_SUPPORT
explicit ExternalReference(const Debug_Address& debug_address); explicit ExternalReference(const Debug_Address& debug_address);
#endif
explicit ExternalReference(StatsCounter* counter); explicit ExternalReference(StatsCounter* counter);
@ -403,9 +392,6 @@ class ExternalReference BASE_EMBEDDED {
// Static variable RegExpStack::limit_address() // Static variable RegExpStack::limit_address()
static ExternalReference address_of_regexp_stack_limit(); static ExternalReference address_of_regexp_stack_limit();
// Function Debug::Break()
static ExternalReference debug_break();
// Static variable Heap::NewSpaceStart() // Static variable Heap::NewSpaceStart()
static ExternalReference new_space_start(); static ExternalReference new_space_start();
static ExternalReference heap_always_allocate_scope_depth(); static ExternalReference heap_always_allocate_scope_depth();
@ -414,11 +400,18 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference new_space_allocation_top_address(); static ExternalReference new_space_allocation_top_address();
static ExternalReference new_space_allocation_limit_address(); static ExternalReference new_space_allocation_limit_address();
// Used to check if single stepping is enabled in generated code. static ExternalReference double_fp_operation(Token::Value operation);
static ExternalReference debug_step_in_fp_address();
Address address() const {return address_;} Address address() const {return address_;}
#ifdef ENABLE_DEBUGGER_SUPPORT
// Function Debug::Break()
static ExternalReference debug_break();
// Used to check if single stepping is enabled in generated code.
static ExternalReference debug_step_in_fp_address();
#endif
private: private:
explicit ExternalReference(void* address) explicit ExternalReference(void* address)
: address_(reinterpret_cast<Address>(address)) {} : address_(reinterpret_cast<Address>(address)) {}

21
deps/v8/src/ast.cc

@ -152,6 +152,27 @@ ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
} }
bool ObjectLiteral::IsValidJSON() {
int length = properties()->length();
for (int i = 0; i < length; i++) {
Property* prop = properties()->at(i);
if (!prop->value()->IsValidJSON())
return false;
}
return true;
}
bool ArrayLiteral::IsValidJSON() {
int length = values()->length();
for (int i = 0; i < length; i++) {
if (!values()->at(i)->IsValidJSON())
return false;
}
return true;
}
void TargetCollector::AddTarget(BreakTarget* target) { void TargetCollector::AddTarget(BreakTarget* target) {
// Add the label to the collector, but discard duplicates. // Add the label to the collector, but discard duplicates.
int length = targets_->length(); int length = targets_->length();

7
deps/v8/src/ast.h

@ -155,6 +155,7 @@ class Expression: public Node {
public: public:
virtual Expression* AsExpression() { return this; } virtual Expression* AsExpression() { return this; }
virtual bool IsValidJSON() { return false; }
virtual bool IsValidLeftHandSide() { return false; } virtual bool IsValidLeftHandSide() { return false; }
// Mark the expression as being compiled as an expression // Mark the expression as being compiled as an expression
@ -625,6 +626,8 @@ class Literal: public Expression {
return handle_.is_identical_to(other->handle_); return handle_.is_identical_to(other->handle_);
} }
virtual bool IsValidJSON() { return true; }
// Identity testers. // Identity testers.
bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); } bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); } bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
@ -653,6 +656,8 @@ class MaterializedLiteral: public Expression {
// constants and simple object and array literals. // constants and simple object and array literals.
bool is_simple() const { return is_simple_; } bool is_simple() const { return is_simple_; }
virtual bool IsValidJSON() { return true; }
int depth() const { return depth_; } int depth() const { return depth_; }
private: private:
@ -704,6 +709,7 @@ class ObjectLiteral: public MaterializedLiteral {
virtual ObjectLiteral* AsObjectLiteral() { return this; } virtual ObjectLiteral* AsObjectLiteral() { return this; }
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual bool IsValidJSON();
Handle<FixedArray> constant_properties() const { Handle<FixedArray> constant_properties() const {
return constant_properties_; return constant_properties_;
@ -751,6 +757,7 @@ class ArrayLiteral: public MaterializedLiteral {
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
virtual ArrayLiteral* AsArrayLiteral() { return this; } virtual ArrayLiteral* AsArrayLiteral() { return this; }
virtual bool IsValidJSON();
Handle<FixedArray> literals() const { return literals_; } Handle<FixedArray> literals() const { return literals_; }
ZoneList<Expression*>* values() const { return values_; } ZoneList<Expression*>* values() const { return values_; }

64
deps/v8/src/bootstrapper.cc

@ -530,7 +530,7 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
global_context()->function_instance_map()->set_prototype(*empty_function); global_context()->function_instance_map()->set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later // Allocate the function map first and then patch the prototype later
Handle<Map> empty_fm = Factory::CopyMap(fm); Handle<Map> empty_fm = Factory::CopyMapDropDescriptors(fm);
empty_fm->set_instance_descriptors(*function_map_descriptors); empty_fm->set_instance_descriptors(*function_map_descriptors);
empty_fm->set_prototype(global_context()->object_function()->prototype()); empty_fm->set_prototype(global_context()->object_function()->prototype());
empty_function->set_map(*empty_fm); empty_function->set_map(*empty_fm);
@ -741,6 +741,19 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
global_context()->set_regexp_function(*regexp_fun); global_context()->set_regexp_function(*regexp_fun);
} }
{ // -- J S O N
Handle<String> name = Factory::NewStringFromAscii(CStrVector("JSON"));
Handle<JSFunction> cons = Factory::NewFunction(
name,
Factory::the_hole_value());
cons->SetInstancePrototype(global_context()->initial_object_prototype());
cons->SetInstanceClassName(*name);
Handle<JSObject> json_object = Factory::NewJSObject(cons, TENURED);
ASSERT(json_object->IsJSObject());
SetProperty(global, name, json_object, DONT_ENUM);
global_context()->set_json_object(*json_object);
}
{ // --- arguments_boilerplate_ { // --- arguments_boilerplate_
// Make sure we can recognize argument objects at runtime. // Make sure we can recognize argument objects at runtime.
// This is done by introducing an anonymous function with // This is done by introducing an anonymous function with
@ -820,6 +833,9 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
// Initialize the out of memory slot. // Initialize the out of memory slot.
global_context()->set_out_of_memory(Heap::false_value()); global_context()->set_out_of_memory(Heap::false_value());
// Initialize the data slot.
global_context()->set_data(Heap::undefined_value());
} }
@ -832,12 +848,16 @@ bool Genesis::CompileBuiltin(int index) {
bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) { bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) {
HandleScope scope; HandleScope scope;
#ifdef ENABLE_DEBUGGER_SUPPORT
Debugger::set_compiling_natives(true); Debugger::set_compiling_natives(true);
#endif
bool result = bool result =
CompileScriptCached(name, source, &natives_cache, NULL, true); CompileScriptCached(name, source, &natives_cache, NULL, true);
ASSERT(Top::has_pending_exception() != result); ASSERT(Top::has_pending_exception() != result);
if (!result) Top::clear_pending_exception(); if (!result) Top::clear_pending_exception();
#ifdef ENABLE_DEBUGGER_SUPPORT
Debugger::set_compiling_natives(false); Debugger::set_compiling_natives(false);
#endif
return result; return result;
} }
@ -853,9 +873,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
// If we can't find the function in the cache, we compile a new // If we can't find the function in the cache, we compile a new
// function and insert it into the cache. // function and insert it into the cache.
if (!cache->Lookup(name, &boilerplate)) { if (!cache->Lookup(name, &boilerplate)) {
#ifdef DEBUG ASSERT(source->IsAsciiRepresentation());
ASSERT(StringShape(*source).IsAsciiRepresentation());
#endif
Handle<String> script_name = Factory::NewStringFromUtf8(name); Handle<String> script_name = Factory::NewStringFromUtf8(name);
boilerplate = boilerplate =
Compiler::Compile(source, script_name, 0, 0, extension, NULL); Compiler::Compile(source, script_name, 0, 0, extension, NULL);
@ -1015,6 +1033,13 @@ bool Genesis::InstallNatives() {
Factory::LookupAsciiSymbol("column_offset"), Factory::LookupAsciiSymbol("column_offset"),
proxy_column_offset, proxy_column_offset,
common_attributes); common_attributes);
Handle<Proxy> proxy_data = Factory::NewProxy(&Accessors::ScriptData);
script_descriptors =
Factory::CopyAppendProxyDescriptor(
script_descriptors,
Factory::LookupAsciiSymbol("data"),
proxy_data,
common_attributes);
Handle<Proxy> proxy_type = Factory::NewProxy(&Accessors::ScriptType); Handle<Proxy> proxy_type = Factory::NewProxy(&Accessors::ScriptType);
script_descriptors = script_descriptors =
Factory::CopyAppendProxyDescriptor( Factory::CopyAppendProxyDescriptor(
@ -1030,6 +1055,14 @@ bool Genesis::InstallNatives() {
Factory::LookupAsciiSymbol("line_ends"), Factory::LookupAsciiSymbol("line_ends"),
proxy_line_ends, proxy_line_ends,
common_attributes); common_attributes);
Handle<Proxy> proxy_context_data =
Factory::NewProxy(&Accessors::ScriptContextData);
script_descriptors =
Factory::CopyAppendProxyDescriptor(
script_descriptors,
Factory::LookupAsciiSymbol("context_data"),
proxy_context_data,
common_attributes);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map()); Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
script_map->set_instance_descriptors(*script_descriptors); script_map->set_instance_descriptors(*script_descriptors);
@ -1057,6 +1090,10 @@ bool Genesis::InstallNatives() {
Natives::GetIndex("regexp"), Natives::GetIndex("regexp"),
Top::global_context(), Top::global_context(),
Handle<Context>(Top::context()->runtime_context())); Handle<Context>(Top::context()->runtime_context()));
SetupLazy(Handle<JSObject>(global_context()->json_object()),
Natives::GetIndex("json"),
Top::global_context(),
Handle<Context>(Top::context()->runtime_context()));
} else if (strlen(FLAG_natives_file) != 0) { } else if (strlen(FLAG_natives_file) != 0) {
// Otherwise install natives from natives file if file exists and // Otherwise install natives from natives file if file exists and
@ -1132,6 +1169,7 @@ bool Genesis::InstallSpecialObjects() {
Handle<JSObject>(js_global->builtins()), DONT_ENUM); Handle<JSObject>(js_global->builtins()), DONT_ENUM);
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
// Expose the debug global object in global if a name for it is specified. // Expose the debug global object in global if a name for it is specified.
if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) { if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
// If loading fails we just bail out without installing the // If loading fails we just bail out without installing the
@ -1149,6 +1187,7 @@ bool Genesis::InstallSpecialObjects() {
SetProperty(js_global, debug_string, SetProperty(js_global, debug_string,
Handle<Object>(Debug::debug_context()->global_proxy()), DONT_ENUM); Handle<Object>(Debug::debug_context()->global_proxy()), DONT_ENUM);
} }
#endif
return true; return true;
} }
@ -1403,7 +1442,7 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
Handle<DescriptorArray> function_map_descriptors = Handle<DescriptorArray> function_map_descriptors =
ComputeFunctionInstanceDescriptor(false, true); ComputeFunctionInstanceDescriptor(false, true);
Handle<Map> fm = Factory::CopyMap(Top::function_map()); Handle<Map> fm = Factory::CopyMapDropDescriptors(Top::function_map());
fm->set_instance_descriptors(*function_map_descriptors); fm->set_instance_descriptors(*function_map_descriptors);
Top::context()->global_context()->set_function_map(*fm); Top::context()->global_context()->set_function_map(*fm);
} }
@ -1442,11 +1481,20 @@ void Genesis::BuildSpecialFunctionTable() {
Handle<JSFunction> function = Handle<JSFunction> function =
Handle<JSFunction>( Handle<JSFunction>(
JSFunction::cast(global->GetProperty(Heap::Array_symbol()))); JSFunction::cast(global->GetProperty(Heap::Array_symbol())));
Handle<JSObject> prototype = Handle<JSObject> visible_prototype =
Handle<JSObject>(JSObject::cast(function->prototype())); Handle<JSObject>(JSObject::cast(function->prototype()));
AddSpecialFunction(prototype, "pop", // Remember to put push and pop on the hidden prototype if it's there.
Handle<JSObject> push_and_pop_prototype;
Handle<Object> superproto(visible_prototype->GetPrototype());
if (superproto->IsJSObject() &&
JSObject::cast(*superproto)->map()->is_hidden_prototype()) {
push_and_pop_prototype = Handle<JSObject>::cast(superproto);
} else {
push_and_pop_prototype = visible_prototype;
}
AddSpecialFunction(push_and_pop_prototype, "pop",
Handle<Code>(Builtins::builtin(Builtins::ArrayPop))); Handle<Code>(Builtins::builtin(Builtins::ArrayPop)));
AddSpecialFunction(prototype, "push", AddSpecialFunction(push_and_pop_prototype, "push",
Handle<Code>(Builtins::builtin(Builtins::ArrayPush))); Handle<Code>(Builtins::builtin(Builtins::ArrayPush)));
} }

3
deps/v8/src/builtins.cc

@ -559,6 +559,7 @@ static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) { static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
Debug::GenerateLoadICDebugBreak(masm); Debug::GenerateLoadICDebugBreak(masm);
} }
@ -597,7 +598,7 @@ static void Generate_Return_DebugBreakEntry(MacroAssembler* masm) {
static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) { static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) {
Debug::GenerateStubNoRegistersDebugBreak(masm); Debug::GenerateStubNoRegistersDebugBreak(masm);
} }
#endif
Object* Builtins::builtins_[builtin_count] = { NULL, }; Object* Builtins::builtins_[builtin_count] = { NULL, };
const char* Builtins::names_[builtin_count] = { NULL, }; const char* Builtins::names_[builtin_count] = { NULL, };

5
deps/v8/src/builtins.h

@ -83,6 +83,7 @@ namespace v8 { namespace internal {
V(FunctionApply, BUILTIN, UNINITIALIZED) V(FunctionApply, BUILTIN, UNINITIALIZED)
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly. // Define list of builtins used by the debugger implemented in assembly.
#define BUILTIN_LIST_DEBUG_A(V) \ #define BUILTIN_LIST_DEBUG_A(V) \
V(Return_DebugBreak, BUILTIN, DEBUG_BREAK) \ V(Return_DebugBreak, BUILTIN, DEBUG_BREAK) \
@ -93,7 +94,9 @@ namespace v8 { namespace internal {
V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK) \ V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK) \
V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK) \ V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK) \
V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK) V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK)
#else
#define BUILTIN_LIST_DEBUG_A(V)
#endif
// Define list of builtins implemented in JavaScript. // Define list of builtins implemented in JavaScript.
#define BUILTINS_LIST_JS(V) \ #define BUILTINS_LIST_JS(V) \

2
deps/v8/src/checks.h

@ -254,7 +254,7 @@ template <int> class StaticAssertionHelper { };
#define ASSERT_TAG_ALIGNED(address) \ #define ASSERT_TAG_ALIGNED(address) \
ASSERT((reinterpret_cast<int>(address) & kHeapObjectTagMask) == 0) ASSERT((reinterpret_cast<intptr_t>(address) & kHeapObjectTagMask) == 0)
#define ASSERT_SIZE_TAG_ALIGNED(size) ASSERT((size & kHeapObjectTagMask) == 0) #define ASSERT_SIZE_TAG_ALIGNED(size) ASSERT((size & kHeapObjectTagMask) == 0)

2
deps/v8/src/code-stubs.h

@ -72,7 +72,7 @@ class CodeStub BASE_EMBEDDED {
protected: protected:
static const int kMajorBits = 5; static const int kMajorBits = 5;
static const int kMinorBits = kBitsPerPointer - kMajorBits - kSmiTagSize; static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
private: private:
// Generates the assembler code for the stub. // Generates the assembler code for the stub.

86
deps/v8/src/codegen.cc

@ -304,8 +304,10 @@ Handle<JSFunction> CodeGenerator::BuildBoilerplate(FunctionLiteral* node) {
node->is_expression(), false, script_, node->is_expression(), false, script_,
node->inferred_name()); node->inferred_name());
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger that a new function has been added. // Notify debugger that a new function has been added.
Debugger::OnNewFunction(function); Debugger::OnNewFunction(function);
#endif
// Set the expected number of properties for instances and return // Set the expected number of properties for instances and return
// the resulting function. // the resulting function.
@ -384,57 +386,69 @@ void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
} }
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*); // Special cases: These 'runtime calls' manipulate the current
const char* name; // frame and are only used 1 or two places, so we generate them
// inline instead of generating calls to them. They are used
// for implementing Function.prototype.call() and
// Function.prototype.apply().
CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
{&CodeGenerator::GenerateIsSmi, "_IsSmi"},
{&CodeGenerator::GenerateIsNonNegativeSmi, "_IsNonNegativeSmi"},
{&CodeGenerator::GenerateIsArray, "_IsArray"},
{&CodeGenerator::GenerateArgumentsLength, "_ArgumentsLength"},
{&CodeGenerator::GenerateArgumentsAccess, "_Arguments"},
{&CodeGenerator::GenerateValueOf, "_ValueOf"},
{&CodeGenerator::GenerateSetValueOf, "_SetValueOf"},
{&CodeGenerator::GenerateFastCharCodeAt, "_FastCharCodeAt"},
{&CodeGenerator::GenerateObjectEquals, "_ObjectEquals"},
{&CodeGenerator::GenerateLog, "_Log"}
}; };
CodeGenerator::InlineRuntimeLUT* CodeGenerator::FindInlineRuntimeLUT(
Handle<String> name) {
const int entries_count =
sizeof(kInlineRuntimeLUT) / sizeof(InlineRuntimeLUT);
for (int i = 0; i < entries_count; i++) {
InlineRuntimeLUT* entry = &kInlineRuntimeLUT[i];
if (name->IsEqualTo(CStrVector(entry->name))) {
return entry;
}
}
return NULL;
}
bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) { bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
ZoneList<Expression*>* args = node->arguments(); ZoneList<Expression*>* args = node->arguments();
// Special cases: These 'runtime calls' manipulate the current
// frame and are only used 1 or two places, so we generate them
// inline instead of generating calls to them. They are used
// for implementing Function.prototype.call() and
// Function.prototype.apply().
static const InlineRuntimeLUT kInlineRuntimeLUT[] = {
{&v8::internal::CodeGenerator::GenerateIsSmi,
"_IsSmi"},
{&v8::internal::CodeGenerator::GenerateIsNonNegativeSmi,
"_IsNonNegativeSmi"},
{&v8::internal::CodeGenerator::GenerateIsArray,
"_IsArray"},
{&v8::internal::CodeGenerator::GenerateArgumentsLength,
"_ArgumentsLength"},
{&v8::internal::CodeGenerator::GenerateArgumentsAccess,
"_Arguments"},
{&v8::internal::CodeGenerator::GenerateValueOf,
"_ValueOf"},
{&v8::internal::CodeGenerator::GenerateSetValueOf,
"_SetValueOf"},
{&v8::internal::CodeGenerator::GenerateFastCharCodeAt,
"_FastCharCodeAt"},
{&v8::internal::CodeGenerator::GenerateObjectEquals,
"_ObjectEquals"},
{&v8::internal::CodeGenerator::GenerateLog,
"_Log"}
};
Handle<String> name = node->name(); Handle<String> name = node->name();
if (name->length() > 0 && name->Get(0) == '_') { if (name->length() > 0 && name->Get(0) == '_') {
for (unsigned i = 0; InlineRuntimeLUT* entry = FindInlineRuntimeLUT(name);
i < sizeof(kInlineRuntimeLUT) / sizeof(InlineRuntimeLUT); if (entry != NULL) {
i++) {
const InlineRuntimeLUT* entry = kInlineRuntimeLUT + i;
if (name->IsEqualTo(CStrVector(entry->name))) {
((*this).*(entry->method))(args); ((*this).*(entry->method))(args);
return true; return true;
} }
} }
}
return false; return false;
} }
bool CodeGenerator::PatchInlineRuntimeEntry(Handle<String> name,
const CodeGenerator::InlineRuntimeLUT& new_entry,
CodeGenerator::InlineRuntimeLUT* old_entry) {
InlineRuntimeLUT* entry = FindInlineRuntimeLUT(name);
if (entry == NULL) return false;
if (old_entry != NULL) {
old_entry->name = entry->name;
old_entry->method = entry->method;
}
entry->name = new_entry.name;
entry->method = new_entry.method;
return true;
}
void CodeGenerator::GenerateFastCaseSwitchStatement(SwitchStatement* node, void CodeGenerator::GenerateFastCaseSwitchStatement(SwitchStatement* node,
int min_index, int min_index,
int range, int range,

20
deps/v8/src/codegen.h

@ -59,7 +59,9 @@
// ComputeCallInitializeInLoop // ComputeCallInitializeInLoop
// ProcessDeclarations // ProcessDeclarations
// DeclareGlobals // DeclareGlobals
// FindInlineRuntimeLUT
// CheckForInlineRuntimeCall // CheckForInlineRuntimeCall
// PatchInlineRuntimeEntry
// GenerateFastCaseSwitchStatement // GenerateFastCaseSwitchStatement
// GenerateFastCaseSwitchCases // GenerateFastCaseSwitchCases
// TryGenerateFastCaseSwitchStatement // TryGenerateFastCaseSwitchStatement
@ -71,10 +73,17 @@
// CodeForStatementPosition // CodeForStatementPosition
// CodeForSourcePosition // CodeForSourcePosition
#ifdef ARM
#include "codegen-arm.h" // Mode to overwrite BinaryExpression values.
#else enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
#include "codegen-ia32.h"
#if V8_TARGET_ARCH_IA32
#include "ia32/codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/codegen-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm.h"
#endif #endif
namespace v8 { namespace internal { namespace v8 { namespace internal {
@ -111,6 +120,9 @@ class DeferredCode: public ZoneObject {
JumpTarget* enter() { return &enter_; } JumpTarget* enter() { return &enter_; }
void BindExit() { exit_.Bind(0); } void BindExit() { exit_.Bind(0); }
void BindExit(Result* result) { exit_.Bind(result, 1); } void BindExit(Result* result) { exit_.Bind(result, 1); }
void BindExit(Result* result0, Result* result1) {
exit_.Bind(result0, result1, 2);
}
void BindExit(Result* result0, Result* result1, Result* result2) { void BindExit(Result* result0, Result* result1, Result* result2) {
exit_.Bind(result0, result1, result2, 3); exit_.Bind(result0, result1, result2, 3);
} }

43
deps/v8/src/compiler.cc

@ -80,8 +80,20 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
} }
static bool IsValidJSON(FunctionLiteral* lit) {
if (!lit->body()->length() == 1)
return false;
Statement* stmt = lit->body()->at(0);
if (stmt->AsExpressionStatement() == NULL)
return false;
Expression *expr = stmt->AsExpressionStatement()->expression();
return expr->IsValidJSON();
}
static Handle<JSFunction> MakeFunction(bool is_global, static Handle<JSFunction> MakeFunction(bool is_global,
bool is_eval, bool is_eval,
bool is_json,
Handle<Script> script, Handle<Script> script,
Handle<Context> context, Handle<Context> context,
v8::Extension* extension, v8::Extension* extension,
@ -92,8 +104,12 @@ static Handle<JSFunction> MakeFunction(bool is_global,
StackGuard guard; StackGuard guard;
PostponeInterruptsScope postpone; PostponeInterruptsScope postpone;
ASSERT(!i::Top::global_context().is_null());
script->set_context_data((*i::Top::global_context())->data());
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger // Notify debugger
Debugger::OnBeforeCompile(script); Debugger::OnBeforeCompile(script);
#endif
// Only allow non-global compiles for eval. // Only allow non-global compiles for eval.
ASSERT(is_eval || is_global); ASSERT(is_eval || is_global);
@ -107,6 +123,19 @@ static Handle<JSFunction> MakeFunction(bool is_global,
return Handle<JSFunction>::null(); return Handle<JSFunction>::null();
} }
// When parsing JSON we do an ordinary parse and then afterwards
// check the AST to ensure it was well-formed. If not we give a
// syntax error.
if (is_json && !IsValidJSON(lit)) {
HandleScope scope;
Handle<JSArray> args = Factory::NewJSArray(1);
Handle<Object> source(script->source());
SetElement(args, 0, source);
Handle<Object> result = Factory::NewSyntaxError("invalid_json", args);
Top::Throw(*result, NULL);
return Handle<JSFunction>::null();
}
// Measure how long it takes to do the compilation; only take the // Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the // rest of the function into account to avoid overlap with the
// parsing statistics. // parsing statistics.
@ -160,8 +189,10 @@ static Handle<JSFunction> MakeFunction(bool is_global,
// the instances of the function. // the instances of the function.
SetExpectedNofPropertiesFromEstimate(fun, lit->expected_property_count()); SetExpectedNofPropertiesFromEstimate(fun, lit->expected_property_count());
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger // Notify debugger
Debugger::OnAfterCompile(script, fun); Debugger::OnAfterCompile(script, fun);
#endif
return fun; return fun;
} }
@ -210,6 +241,7 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source,
// Compile the function and add it to the cache. // Compile the function and add it to the cache.
result = MakeFunction(true, result = MakeFunction(true,
false,
false, false,
script, script,
Handle<Context>::null(), Handle<Context>::null(),
@ -233,7 +265,8 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source,
Handle<JSFunction> Compiler::CompileEval(Handle<String> source, Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
int line_offset, int line_offset,
bool is_global) { bool is_global,
bool is_json) {
int source_length = source->length(); int source_length = source->length();
Counters::total_eval_size.Increment(source_length); Counters::total_eval_size.Increment(source_length);
Counters::total_compile_size.Increment(source_length); Counters::total_compile_size.Increment(source_length);
@ -252,7 +285,13 @@ Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
// Create a script object describing the script to be compiled. // Create a script object describing the script to be compiled.
Handle<Script> script = Factory::NewScript(source); Handle<Script> script = Factory::NewScript(source);
script->set_line_offset(Smi::FromInt(line_offset)); script->set_line_offset(Smi::FromInt(line_offset));
result = MakeFunction(is_global, true, script, context, NULL, NULL); result = MakeFunction(is_global,
true,
is_json,
script,
context,
NULL,
NULL);
if (!result.is_null()) { if (!result.is_null()) {
CompilationCache::PutEval(source, context, entry, result); CompilationCache::PutEval(source, context, entry, result);
} }

3
deps/v8/src/compiler.h

@ -60,7 +60,8 @@ class Compiler : public AllStatic {
static Handle<JSFunction> CompileEval(Handle<String> source, static Handle<JSFunction> CompileEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
int line_offset, int line_offset,
bool is_global); bool is_global,
bool is_json);
// Compile from function info (used for lazy compilation). Returns // Compile from function info (used for lazy compilation). Returns
// true on success and false if the compilation resulted in a stack // true on success and false if the compilation resulted in a stack

6
deps/v8/src/contexts.h

@ -64,6 +64,7 @@ enum ContextLookupFlags {
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \ V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \ V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \ V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
V(JSON_OBJECT_INDEX, JSObject, json_object) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \ V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \ V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun) \ V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun) \
@ -93,7 +94,8 @@ enum ContextLookupFlags {
V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \ V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \ V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \ V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
V(MAP_CACHE_INDEX, Object, map_cache) V(MAP_CACHE_INDEX, Object, map_cache) \
V(CONTEXT_DATA_INDEX, Object, data)
// JSFunctions are pairs (context, function code), sometimes also called // JSFunctions are pairs (context, function code), sometimes also called
// closures. A Context object is used to represent function contexts and // closures. A Context object is used to represent function contexts and
@ -186,6 +188,7 @@ class Context: public FixedArray {
OBJECT_FUNCTION_INDEX, OBJECT_FUNCTION_INDEX,
ARRAY_FUNCTION_INDEX, ARRAY_FUNCTION_INDEX,
DATE_FUNCTION_INDEX, DATE_FUNCTION_INDEX,
JSON_OBJECT_INDEX,
REGEXP_FUNCTION_INDEX, REGEXP_FUNCTION_INDEX,
CREATE_DATE_FUN_INDEX, CREATE_DATE_FUN_INDEX,
TO_NUMBER_FUN_INDEX, TO_NUMBER_FUN_INDEX,
@ -211,6 +214,7 @@ class Context: public FixedArray {
CONTEXT_EXTENSION_FUNCTION_INDEX, CONTEXT_EXTENSION_FUNCTION_INDEX,
OUT_OF_MEMORY_INDEX, OUT_OF_MEMORY_INDEX,
MAP_CACHE_INDEX, MAP_CACHE_INDEX,
CONTEXT_DATA_INDEX,
GLOBAL_CONTEXT_SLOTS GLOBAL_CONTEXT_SLOTS
}; };

27
deps/v8/src/d8.cc

@ -163,6 +163,22 @@ Handle<Value> Shell::Print(const Arguments& args) {
} }
Handle<Value> Shell::Read(const Arguments& args) {
if (args.Length() != 1) {
return ThrowException(String::New("Bad parameters"));
}
String::Utf8Value file(args[0]);
if (*file == NULL) {
return ThrowException(String::New("Error loading file"));
}
Handle<String> source = ReadFile(*file);
if (source.IsEmpty()) {
return ThrowException(String::New("Error loading file"));
}
return source;
}
Handle<Value> Shell::Load(const Arguments& args) { Handle<Value> Shell::Load(const Arguments& args) {
for (int i = 0; i < args.Length(); i++) { for (int i = 0; i < args.Length(); i++) {
HandleScope handle_scope; HandleScope handle_scope;
@ -246,6 +262,7 @@ Handle<Array> Shell::GetCompletions(Handle<String> text, Handle<String> full) {
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
Handle<Object> Shell::DebugMessageDetails(Handle<String> message) { Handle<Object> Shell::DebugMessageDetails(Handle<String> message) {
Context::Scope context_scope(utility_context_); Context::Scope context_scope(utility_context_);
Handle<Object> global = utility_context_->Global(); Handle<Object> global = utility_context_->Global();
@ -266,6 +283,7 @@ Handle<Value> Shell::DebugCommandToJSONRequest(Handle<String> command) {
Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv); Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
return val; return val;
} }
#endif
int32_t* Counter::Bind(const char* name, bool is_histogram) { int32_t* Counter::Bind(const char* name, bool is_histogram) {
@ -381,6 +399,7 @@ void Shell::Initialize() {
HandleScope scope; HandleScope scope;
Handle<ObjectTemplate> global_template = ObjectTemplate::New(); Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"), FunctionTemplate::New(Print)); global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("read"), FunctionTemplate::New(Read));
global_template->Set(String::New("load"), FunctionTemplate::New(Load)); global_template->Set(String::New("load"), FunctionTemplate::New(Load));
global_template->Set(String::New("quit"), FunctionTemplate::New(Quit)); global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
global_template->Set(String::New("version"), FunctionTemplate::New(Version)); global_template->Set(String::New("version"), FunctionTemplate::New(Version));
@ -406,11 +425,13 @@ void Shell::Initialize() {
global_template->Set(String::New("arguments"), global_template->Set(String::New("arguments"),
Utils::ToLocal(arguments_jsarray)); Utils::ToLocal(arguments_jsarray));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Install the debugger object in the utility scope // Install the debugger object in the utility scope
i::Debug::Load(); i::Debug::Load();
i::JSObject* debug = i::Debug::debug_context()->global(); i::JSObject* debug = i::Debug::debug_context()->global();
utility_context_->Global()->Set(String::New("$debug"), utility_context_->Global()->Set(String::New("$debug"),
Utils::ToLocal(&debug)); Utils::ToLocal(&debug));
#endif
// Run the d8 shell utility script in the utility context // Run the d8 shell utility script in the utility context
int source_index = i::NativesCollection<i::D8>::GetIndex("d8"); int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
@ -436,8 +457,10 @@ void Shell::Initialize() {
evaluation_context_ = Context::New(NULL, global_template); evaluation_context_ = Context::New(NULL, global_template);
evaluation_context_->SetSecurityToken(Undefined()); evaluation_context_->SetSecurityToken(Undefined());
#ifdef ENABLE_DEBUGGER_SUPPORT
// Set the security token of the debug context to allow access. // Set the security token of the debug context to allow access.
i::Debug::debug_context()->set_security_token(i::Heap::undefined_value()); i::Debug::debug_context()->set_security_token(i::Heap::undefined_value());
#endif
} }
@ -555,6 +578,8 @@ void ShellThread::Run() {
Handle<ObjectTemplate> global_template = ObjectTemplate::New(); Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"), global_template->Set(String::New("print"),
FunctionTemplate::New(Shell::Print)); FunctionTemplate::New(Shell::Print));
global_template->Set(String::New("read"),
FunctionTemplate::New(Shell::Read));
global_template->Set(String::New("load"), global_template->Set(String::New("load"),
FunctionTemplate::New(Shell::Load)); FunctionTemplate::New(Shell::Load));
global_template->Set(String::New("yield"), global_template->Set(String::New("yield"),
@ -690,6 +715,7 @@ int Shell::Main(int argc, char* argv[]) {
Locker::StartPreemption(preemption_interval); Locker::StartPreemption(preemption_interval);
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
// Run the remote debugger if requested. // Run the remote debugger if requested.
if (i::FLAG_remote_debugger) { if (i::FLAG_remote_debugger) {
RunRemoteDebugger(i::FLAG_debugger_port); RunRemoteDebugger(i::FLAG_debugger_port);
@ -705,6 +731,7 @@ int Shell::Main(int argc, char* argv[]) {
if (i::FLAG_debugger && !i::FLAG_debugger_agent) { if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
v8::Debug::SetDebugEventListener(HandleDebugEvent); v8::Debug::SetDebugEventListener(HandleDebugEvent);
} }
#endif
} }
if (run_shell) if (run_shell)
RunShell(); RunShell();

3
deps/v8/src/d8.h

@ -132,13 +132,16 @@ class Shell: public i::AllStatic {
static int Main(int argc, char* argv[]); static int Main(int argc, char* argv[]);
static Handle<Array> GetCompletions(Handle<String> text, static Handle<Array> GetCompletions(Handle<String> text,
Handle<String> full); Handle<String> full);
#ifdef ENABLE_DEBUGGER_SUPPORT
static Handle<Object> DebugMessageDetails(Handle<String> message); static Handle<Object> DebugMessageDetails(Handle<String> message);
static Handle<Value> DebugCommandToJSONRequest(Handle<String> command); static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
#endif
static Handle<Value> Print(const Arguments& args); static Handle<Value> Print(const Arguments& args);
static Handle<Value> Yield(const Arguments& args); static Handle<Value> Yield(const Arguments& args);
static Handle<Value> Quit(const Arguments& args); static Handle<Value> Quit(const Arguments& args);
static Handle<Value> Version(const Arguments& args); static Handle<Value> Version(const Arguments& args);
static Handle<Value> Read(const Arguments& args);
static Handle<Value> Load(const Arguments& args); static Handle<Value> Load(const Arguments& args);
// The OS object on the global object contains methods for performing // The OS object on the global object contains methods for performing
// operating system calls: // operating system calls:

37
deps/v8/src/d8.js

@ -653,17 +653,47 @@ DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) {
// Process arguments if any. // Process arguments if any.
if (args && args.length > 0) { if (args && args.length > 0) {
var target = args; var target = args;
var type = 'function';
var line;
var column;
var condition; var condition;
var pos;
var pos = args.indexOf(' '); // Check for breakpoint condition.
pos = args.indexOf(' ');
if (pos > 0) { if (pos > 0) {
target = args.substring(0, pos); target = args.substring(0, pos);
condition = args.substring(pos + 1, args.length); condition = args.substring(pos + 1, args.length);
} }
// Check for script breakpoint (name:line[:column]). If no ':' in break
// specification it is considered a function break point.
pos = target.indexOf(':');
if (pos > 0) {
type = 'script';
var tmp = target.substring(pos + 1, target.length);
target = target.substring(0, pos);
// Check for both line and column.
pos = tmp.indexOf(':');
if (pos > 0) {
column = parseInt(tmp.substring(pos + 1, tmp.length)) - 1;
line = parseInt(tmp.substring(0, pos)) - 1;
} else {
line = parseInt(tmp) - 1;
}
} else if (target[0] == '#' && target[target.length - 1] == '#') {
type = 'handle';
target = target.substring(1, target.length - 1);
} else {
type = 'function';
}
request.arguments = {}; request.arguments = {};
request.arguments.type = 'function'; request.arguments.type = type;
request.arguments.target = target; request.arguments.target = target;
request.arguments.line = line;
request.arguments.column = column;
request.arguments.condition = condition; request.arguments.condition = condition;
} else { } else {
throw new Error('Invalid break arguments.'); throw new Error('Invalid break arguments.');
@ -721,6 +751,9 @@ DebugRequest.prototype.helpCommand_ = function(args) {
} }
print('break location [condition]'); print('break location [condition]');
print(' break on named function: location is a function name');
print(' break on function: location is #<id>#');
print(' break on script position: location is name:line[:column]');
print('clear <breakpoint #>'); print('clear <breakpoint #>');
print('backtrace [from frame #] [to frame #]]'); print('backtrace [from frame #] [to frame #]]');
print('frame <frame #>'); print('frame <frame #>');

25
deps/v8/src/date-delay.js

@ -985,6 +985,25 @@ function DateToGMTString() {
} }
function PadInt(n) {
// Format integers to have at least two digits.
return n < 10 ? '0' + n : n;
}
function DateToISOString() {
return this.getUTCFullYear() + '-' + PadInt(this.getUTCMonth() + 1) +
'-' + PadInt(this.getUTCDate()) + 'T' + PadInt(this.getUTCHours()) +
':' + PadInt(this.getUTCMinutes()) + ':' + PadInt(this.getUTCSeconds()) +
'Z';
}
function DateToJSON(key) {
return CheckJSONPrimitive(this.toISOString());
}
// ------------------------------------------------------------------- // -------------------------------------------------------------------
function SetupDate() { function SetupDate() {
@ -1000,7 +1019,7 @@ function SetupDate() {
// Setup non-enumerable functions of the Date prototype object and // Setup non-enumerable functions of the Date prototype object and
// set their names. // set their names.
InstallFunctions($Date.prototype, DONT_ENUM, $Array( InstallFunctionsOnHiddenPrototype($Date.prototype, DONT_ENUM, $Array(
"toString", DateToString, "toString", DateToString,
"toDateString", DateToDateString, "toDateString", DateToDateString,
"toTimeString", DateToTimeString, "toTimeString", DateToTimeString,
@ -1044,7 +1063,9 @@ function SetupDate() {
"toGMTString", DateToGMTString, "toGMTString", DateToGMTString,
"toUTCString", DateToUTCString, "toUTCString", DateToUTCString,
"getYear", DateGetYear, "getYear", DateGetYear,
"setYear", DateSetYear "setYear", DateSetYear,
"toISOString", DateToISOString,
"toJSON", DateToJSON
)); ));
} }

5
deps/v8/src/dateparser-inl.h

@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_DATEPARSER_INL_H_
#define V8_DATEPARSER_INL_H_
namespace v8 { namespace internal { namespace v8 { namespace internal {
template <typename Char> template <typename Char>
@ -104,3 +107,5 @@ bool DateParser::Parse(Vector<Char> str, FixedArray* out) {
} }
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_DATEPARSER_INL_H_

21
deps/v8/src/debug-agent.cc

@ -29,16 +29,17 @@
#include "v8.h" #include "v8.h"
#include "debug-agent.h" #include "debug-agent.h"
#ifdef ENABLE_DEBUGGER_SUPPORT
namespace v8 { namespace internal { namespace v8 { namespace internal {
// Public V8 debugger API message handler function. This function just delegates // Public V8 debugger API message handler function. This function just delegates
// to the debugger agent through it's data parameter. // to the debugger agent through it's data parameter.
void DebuggerAgentMessageHandler(const uint16_t* message, int length, void DebuggerAgentMessageHandler(const v8::Debug::Message& message) {
void *data) { DebuggerAgent::instance_->DebuggerMessage(message);
reinterpret_cast<DebuggerAgent*>(data)->DebuggerMessage(message, length);
} }
// static
DebuggerAgent* DebuggerAgent::instance_ = NULL;
// Debugger agent main thread. // Debugger agent main thread.
void DebuggerAgent::Run() { void DebuggerAgent::Run() {
@ -105,7 +106,7 @@ void DebuggerAgent::CreateSession(Socket* client) {
// Create a new session and hook up the debug message handler. // Create a new session and hook up the debug message handler.
session_ = new DebuggerAgentSession(this, client); session_ = new DebuggerAgentSession(this, client);
v8::Debug::SetMessageHandler(DebuggerAgentMessageHandler, this); v8::Debug::SetMessageHandler2(DebuggerAgentMessageHandler);
session_->Start(); session_->Start();
} }
@ -123,13 +124,14 @@ void DebuggerAgent::CloseSession() {
} }
void DebuggerAgent::DebuggerMessage(const uint16_t* message, int length) { void DebuggerAgent::DebuggerMessage(const v8::Debug::Message& message) {
ScopedLock with(session_access_); ScopedLock with(session_access_);
// Forward the message handling to the session. // Forward the message handling to the session.
if (session_ != NULL) { if (session_ != NULL) {
session_->DebuggerMessage(Vector<uint16_t>(const_cast<uint16_t*>(message), v8::String::Value val(message.GetJSON());
length)); session_->DebuggerMessage(Vector<uint16_t>(const_cast<uint16_t*>(*val),
val.length()));
} }
} }
@ -410,5 +412,6 @@ int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) {
return total_received; return total_received;
} }
} } // namespace v8::internal } } // namespace v8::internal
#endif // ENABLE_DEBUGGER_SUPPORT

28
deps/v8/src/debug-agent.h

@ -25,15 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_V8_DEBUG_AGENT_H_ #ifndef V8_DEBUG_AGENT_H_
#define V8_V8_DEBUG_AGENT_H_ #define V8_DEBUG_AGENT_H_
#ifdef ENABLE_DEBUGGER_SUPPORT
#include "../include/v8-debug.h" #include "../include/v8-debug.h"
#include "platform.h" #include "platform.h"
namespace v8 { namespace internal { namespace v8 { namespace internal {
// Forward decelrations. // Forward decelrations.
class DebuggerAgentSession; class DebuggerAgentSession;
@ -46,15 +46,21 @@ class DebuggerAgent: public Thread {
: name_(StrDup(name)), port_(port), : name_(StrDup(name)), port_(port),
server_(OS::CreateSocket()), terminate_(false), server_(OS::CreateSocket()), terminate_(false),
session_access_(OS::CreateMutex()), session_(NULL), session_access_(OS::CreateMutex()), session_(NULL),
terminate_now_(OS::CreateSemaphore(0)) {} terminate_now_(OS::CreateSemaphore(0)) {
~DebuggerAgent() { delete server_; } ASSERT(instance_ == NULL);
instance_ = this;
}
~DebuggerAgent() {
instance_ = NULL;
delete server_;
}
void Shutdown(); void Shutdown();
private: private:
void Run(); void Run();
void CreateSession(Socket* socket); void CreateSession(Socket* socket);
void DebuggerMessage(const uint16_t* message, int length); void DebuggerMessage(const v8::Debug::Message& message);
void CloseSession(); void CloseSession();
void OnSessionClosed(DebuggerAgentSession* session); void OnSessionClosed(DebuggerAgentSession* session);
@ -66,9 +72,10 @@ class DebuggerAgent: public Thread {
DebuggerAgentSession* session_; // Current active session if any. DebuggerAgentSession* session_; // Current active session if any.
Semaphore* terminate_now_; // Semaphore to signal termination. Semaphore* terminate_now_; // Semaphore to signal termination.
static DebuggerAgent* instance_;
friend class DebuggerAgentSession; friend class DebuggerAgentSession;
friend void DebuggerAgentMessageHandler(const uint16_t* message, int length, friend void DebuggerAgentMessageHandler(const v8::Debug::Message& message);
void *data);
DISALLOW_COPY_AND_ASSIGN(DebuggerAgent); DISALLOW_COPY_AND_ASSIGN(DebuggerAgent);
}; };
@ -111,7 +118,8 @@ class DebuggerAgentUtil {
static int ReceiveAll(const Socket* conn, char* data, int len); static int ReceiveAll(const Socket* conn, char* data, int len);
}; };
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_V8_DEBUG_AGENT_H_ #endif // ENABLE_DEBUGGER_SUPPORT
#endif // V8_DEBUG_AGENT_H_

71
deps/v8/src/debug-delay.js

@ -977,6 +977,7 @@ CompileEvent.prototype.script = function() {
CompileEvent.prototype.toJSONProtocol = function() { CompileEvent.prototype.toJSONProtocol = function() {
var o = new ProtocolMessage(); var o = new ProtocolMessage();
o.running = true;
if (this.before_) { if (this.before_) {
o.event = "beforeCompile"; o.event = "beforeCompile";
} else { } else {
@ -1021,6 +1022,9 @@ function MakeScriptObject_(script, include_source) {
columnOffset: script.columnOffset(), columnOffset: script.columnOffset(),
lineCount: script.lineCount(), lineCount: script.lineCount(),
}; };
if (!IS_UNDEFINED(script.data())) {
o.data = script.data();
}
if (include_source) { if (include_source) {
o.source = script.source(); o.source = script.source();
} }
@ -1058,6 +1062,14 @@ function ProtocolMessage(request) {
} }
ProtocolMessage.prototype.setOption = function(name, value) {
if (!this.options_) {
this.options_ = {};
}
this.options_[name] = value;
}
ProtocolMessage.prototype.failed = function(message) { ProtocolMessage.prototype.failed = function(message) {
this.success = false; this.success = false;
this.message = message; this.message = message;
@ -1086,7 +1098,7 @@ ProtocolMessage.prototype.toJSONProtocol = function() {
if (this.body) { if (this.body) {
json += ',"body":'; json += ',"body":';
// Encode the body part. // Encode the body part.
var serializer = MakeMirrorSerializer(true); var serializer = MakeMirrorSerializer(true, this.options_);
if (this.body instanceof Mirror) { if (this.body instanceof Mirror) {
json += serializer.serializeValue(this.body); json += serializer.serializeValue(this.body);
} else if (this.body instanceof Array) { } else if (this.body instanceof Array) {
@ -1130,7 +1142,7 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
try { try {
try { try {
// Convert the JSON string to an object. // Convert the JSON string to an object.
request = %CompileString('(' + json_request + ')', 0)(); request = %CompileString('(' + json_request + ')', 0, false)();
// Create an initial response. // Create an initial response.
response = this.createResponse(request); response = this.createResponse(request);
@ -1270,11 +1282,12 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
var ignoreCount = request.arguments.ignoreCount; var ignoreCount = request.arguments.ignoreCount;
// Check for legal arguments. // Check for legal arguments.
if (!type || !target) { if (!type || IS_UNDEFINED(target)) {
response.failed('Missing argument "type" or "target"'); response.failed('Missing argument "type" or "target"');
return; return;
} }
if (type != 'function' && type != 'script' && type != 'scriptId') { if (type != 'function' && type != 'handle' &&
type != 'script' && type != 'scriptId') {
response.failed('Illegal type "' + type + '"'); response.failed('Illegal type "' + type + '"');
return; return;
} }
@ -1303,6 +1316,20 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
// Set function break point. // Set function break point.
break_point_number = Debug.setBreakPoint(f, line, column, condition); break_point_number = Debug.setBreakPoint(f, line, column, condition);
} else if (type == 'handle') {
// Find the object pointed by the specified handle.
var handle = parseInt(target, 10);
var mirror = LookupMirror(handle);
if (!mirror) {
return response.failed('Object #' + handle + '# not found');
}
if (!mirror.isFunction()) {
return response.failed('Object #' + handle + '# is not a function');
}
// Set function break point.
break_point_number = Debug.setBreakPoint(mirror.value(),
line, column, condition);
} else if (type == 'script') { } else if (type == 'script') {
// set script break point. // set script break point.
break_point_number = break_point_number =
@ -1547,20 +1574,24 @@ DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
} }
// Pull out arguments. // Pull out arguments.
var handle = request.arguments.handle; var handles = request.arguments.handles;
// Check for legal arguments. // Check for legal arguments.
if (IS_UNDEFINED(handle)) { if (IS_UNDEFINED(handles)) {
return response.failed('Argument "handle" missing'); return response.failed('Argument "handles" missing');
} }
// Lookup handle. // Lookup handles.
var mirrors = {};
for (var i = 0; i < handles.length; i++) {
var handle = handles[i];
var mirror = LookupMirror(handle); var mirror = LookupMirror(handle);
if (mirror) { if (!mirror) {
response.body = mirror;
} else {
return response.failed('Object #' + handle + '# not found'); return response.failed('Object #' + handle + '# not found');
} }
mirrors[handle] = mirror;
}
response.body = mirrors;
}; };
@ -1657,6 +1688,7 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
if (!IS_UNDEFINED(request.arguments.includeSource)) { if (!IS_UNDEFINED(request.arguments.includeSource)) {
includeSource = %ToBoolean(request.arguments.includeSource); includeSource = %ToBoolean(request.arguments.includeSource);
response.setOption('includeSource', includeSource);
} }
} }
@ -1667,22 +1699,7 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
for (var i = 0; i < scripts.length; i++) { for (var i = 0; i < scripts.length; i++) {
if (types & ScriptTypeFlag(scripts[i].type)) { if (types & ScriptTypeFlag(scripts[i].type)) {
var script = {}; response.body.push(MakeMirror(scripts[i]));
if (scripts[i].name) {
script.name = scripts[i].name;
}
script.id = scripts[i].id;
script.lineOffset = scripts[i].line_offset;
script.columnOffset = scripts[i].column_offset;
script.lineCount = scripts[i].lineCount();
if (includeSource) {
script.source = scripts[i].source;
} else {
script.sourceStart = scripts[i].source.substring(0, 80);
}
script.sourceLength = scripts[i].source.length;
script.type = scripts[i].type;
response.body.push(script);
} }
} }
}; };

489
deps/v8/src/debug.cc

@ -35,12 +35,17 @@
#include "debug.h" #include "debug.h"
#include "execution.h" #include "execution.h"
#include "global-handles.h" #include "global-handles.h"
#include "ic.h"
#include "ic-inl.h"
#include "natives.h" #include "natives.h"
#include "stub-cache.h" #include "stub-cache.h"
#include "log.h" #include "log.h"
#include "../include/v8-debug.h"
namespace v8 { namespace internal { namespace v8 { namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
static void PrintLn(v8::Local<v8::Value> value) { static void PrintLn(v8::Local<v8::Value> value) {
v8::Local<v8::String> s = value->ToString(); v8::Local<v8::String> s = value->ToString();
char* data = NewArray<char>(s->Length() + 1); char* data = NewArray<char>(s->Length() + 1);
@ -288,14 +293,8 @@ void BreakLocationIterator::SetDebugBreak() {
// Patch the frame exit code with a break point. // Patch the frame exit code with a break point.
SetDebugBreakAtReturn(); SetDebugBreakAtReturn();
} else { } else {
// Patch the original code with the current address as the current address // Patch the IC call.
// might have changed by the inline caching since the code was copied. SetDebugBreakAtIC();
original_rinfo()->set_target_address(rinfo()->target_address());
// Patch the code to invoke the builtin debug break function matching the
// calling convention used by the call site.
Handle<Code> dbgbrk_code(Debug::FindDebugBreak(rinfo()));
rinfo()->set_target_address(dbgbrk_code->entry());
} }
ASSERT(IsDebugBreak()); ASSERT(IsDebugBreak());
} }
@ -306,8 +305,8 @@ void BreakLocationIterator::ClearDebugBreak() {
// Restore the frame exit code. // Restore the frame exit code.
ClearDebugBreakAtReturn(); ClearDebugBreakAtReturn();
} else { } else {
// Patch the code to the original invoke. // Patch the IC call.
rinfo()->set_target_address(original_rinfo()->target_address()); ClearDebugBreakAtIC();
} }
ASSERT(!IsDebugBreak()); ASSERT(!IsDebugBreak());
} }
@ -360,6 +359,37 @@ bool BreakLocationIterator::IsDebugBreak() {
} }
void BreakLocationIterator::SetDebugBreakAtIC() {
// Patch the original code with the current address as the current address
// might have changed by the inline caching since the code was copied.
original_rinfo()->set_target_address(rinfo()->target_address());
RelocInfo::Mode mode = rmode();
if (RelocInfo::IsCodeTarget(mode)) {
Address target = rinfo()->target_address();
Handle<Code> code(Code::GetCodeFromTargetAddress(target));
// Patch the code to invoke the builtin debug break function matching the
// calling convention used by the call site.
Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
rinfo()->set_target_address(dbgbrk_code->entry());
// For stubs that refer back to an inlined version clear the cached map for
// the inlined case to always go through the IC. As long as the break point
// is set the patching performed by the runtime system will take place in
// the code copy and will therefore have no effect on the running code
// keeping it from using the inlined code.
if (code->is_keyed_load_stub()) KeyedLoadIC::ClearInlinedVersion(pc());
}
}
void BreakLocationIterator::ClearDebugBreakAtIC() {
// Patch the code to the original invoke.
rinfo()->set_target_address(original_rinfo()->target_address());
}
Object* BreakLocationIterator::BreakPointObjects() { Object* BreakLocationIterator::BreakPointObjects() {
return debug_info_->GetBreakPointObjects(code_position()); return debug_info_->GetBreakPointObjects(code_position());
} }
@ -1055,14 +1085,9 @@ bool Debug::IsBreakStub(Code* code) {
// Find the builtin to use for invoking the debug break // Find the builtin to use for invoking the debug break
Handle<Code> Debug::FindDebugBreak(RelocInfo* rinfo) { Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
// Find the builtin debug break function matching the calling convention // Find the builtin debug break function matching the calling convention
// used by the call site. // used by the call site.
RelocInfo::Mode mode = rinfo->rmode();
if (RelocInfo::IsCodeTarget(mode)) {
Address target = rinfo->target_address();
Code* code = Code::GetCodeFromTargetAddress(target);
if (code->is_inline_cache_stub()) { if (code->is_inline_cache_stub()) {
if (code->is_call_stub()) { if (code->is_call_stub()) {
return ComputeCallDebugBreak(code->arguments_count()); return ComputeCallDebugBreak(code->arguments_count());
@ -1096,7 +1121,6 @@ Handle<Code> Debug::FindDebugBreak(RelocInfo* rinfo) {
Handle<Code>(Builtins::builtin(Builtins::StubNoRegisters_DebugBreak)); Handle<Code>(Builtins::builtin(Builtins::StubNoRegisters_DebugBreak));
return result; return result;
} }
}
UNREACHABLE(); UNREACHABLE();
return Handle<Code>::null(); return Handle<Code>::null();
@ -1396,17 +1420,13 @@ Handle<Object> Debugger::event_listener_data_ = Handle<Object>();
bool Debugger::compiling_natives_ = false; bool Debugger::compiling_natives_ = false;
bool Debugger::is_loading_debugger_ = false; bool Debugger::is_loading_debugger_ = false;
bool Debugger::never_unload_debugger_ = false; bool Debugger::never_unload_debugger_ = false;
DebugMessageThread* Debugger::message_thread_ = NULL; v8::Debug::MessageHandler2 Debugger::message_handler_ = NULL;
v8::DebugMessageHandler Debugger::message_handler_ = NULL;
bool Debugger::message_handler_cleared_ = false; bool Debugger::message_handler_cleared_ = false;
void* Debugger::message_handler_data_ = NULL; v8::Debug::HostDispatchHandler Debugger::host_dispatch_handler_ = NULL;
v8::DebugHostDispatchHandler Debugger::host_dispatch_handler_ = NULL; int Debugger::host_dispatch_micros_ = 100 * 1000;
void* Debugger::host_dispatch_handler_data_ = NULL;
DebuggerAgent* Debugger::agent_ = NULL; DebuggerAgent* Debugger::agent_ = NULL;
LockingMessageQueue Debugger::command_queue_(kQueueInitialSize); LockingCommandMessageQueue Debugger::command_queue_(kQueueInitialSize);
LockingMessageQueue Debugger::message_queue_(kQueueInitialSize);
Semaphore* Debugger::command_received_ = OS::CreateSemaphore(0); Semaphore* Debugger::command_received_ = OS::CreateSemaphore(0);
Semaphore* Debugger::message_received_ = OS::CreateSemaphore(0);
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name, Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
@ -1534,8 +1554,8 @@ void Debugger::OnException(Handle<Object> exception, bool uncaught) {
return; return;
} }
// Process debug event // Process debug event.
ProcessDebugEvent(v8::Exception, event_data, false); ProcessDebugEvent(v8::Exception, Handle<JSObject>::cast(event_data), false);
// Return to continue execution from where the exception was thrown. // Return to continue execution from where the exception was thrown.
} }
@ -1566,8 +1586,10 @@ void Debugger::OnDebugBreak(Handle<Object> break_points_hit,
return; return;
} }
// Process debug event // Process debug event.
ProcessDebugEvent(v8::Break, event_data, auto_continue); ProcessDebugEvent(v8::Break,
Handle<JSObject>::cast(event_data),
auto_continue);
} }
@ -1591,8 +1613,10 @@ void Debugger::OnBeforeCompile(Handle<Script> script) {
return; return;
} }
// Process debug event // Process debug event.
ProcessDebugEvent(v8::BeforeCompile, event_data, false); ProcessDebugEvent(v8::BeforeCompile,
Handle<JSObject>::cast(event_data),
true);
} }
@ -1652,8 +1676,10 @@ void Debugger::OnAfterCompile(Handle<Script> script, Handle<JSFunction> fun) {
if (caught_exception) { if (caught_exception) {
return; return;
} }
// Process debug event // Process debug event.
ProcessDebugEvent(v8::AfterCompile, event_data, false); ProcessDebugEvent(v8::AfterCompile,
Handle<JSObject>::cast(event_data),
true);
} }
@ -1678,12 +1704,12 @@ void Debugger::OnNewFunction(Handle<JSFunction> function) {
return; return;
} }
// Process debug event. // Process debug event.
ProcessDebugEvent(v8::NewFunction, event_data, false); ProcessDebugEvent(v8::NewFunction, Handle<JSObject>::cast(event_data), true);
} }
void Debugger::ProcessDebugEvent(v8::DebugEvent event, void Debugger::ProcessDebugEvent(v8::DebugEvent event,
Handle<Object> event_data, Handle<JSObject> event_data,
bool auto_continue) { bool auto_continue) {
HandleScope scope; HandleScope scope;
@ -1695,7 +1721,10 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event,
} }
// First notify the message handler if any. // First notify the message handler if any.
if (message_handler_ != NULL) { if (message_handler_ != NULL) {
NotifyMessageHandler(event, exec_state, event_data, auto_continue); NotifyMessageHandler(event,
Handle<JSObject>::cast(exec_state),
event_data,
auto_continue);
} }
// Notify registered debug event listener. This can be either a C or a // Notify registered debug event listener. This can be either a C or a
// JavaScript function. // JavaScript function.
@ -1703,11 +1732,11 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event,
if (event_listener_->IsProxy()) { if (event_listener_->IsProxy()) {
// C debug event listener. // C debug event listener.
Handle<Proxy> callback_obj(Handle<Proxy>::cast(event_listener_)); Handle<Proxy> callback_obj(Handle<Proxy>::cast(event_listener_));
v8::DebugEventCallback callback = v8::Debug::EventCallback callback =
FUNCTION_CAST<v8::DebugEventCallback>(callback_obj->proxy()); FUNCTION_CAST<v8::Debug::EventCallback>(callback_obj->proxy());
callback(event, callback(event,
v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state)), v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state)),
v8::Utils::ToLocal(Handle<JSObject>::cast(event_data)), v8::Utils::ToLocal(event_data),
v8::Utils::ToLocal(Handle<Object>::cast(event_listener_data_))); v8::Utils::ToLocal(Handle<Object>::cast(event_listener_data_)));
} else { } else {
// JavaScript debug event listener. // JavaScript debug event listener.
@ -1718,7 +1747,7 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event,
const int argc = 4; const int argc = 4;
Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(), Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(),
exec_state.location(), exec_state.location(),
event_data.location(), Handle<Object>::cast(event_data).location(),
event_listener_data_.location() }; event_listener_data_.location() };
Handle<Object> result = Execution::TryCall(fun, Top::global(), Handle<Object> result = Execution::TryCall(fun, Top::global(),
argc, argv, &caught_exception); argc, argv, &caught_exception);
@ -1748,25 +1777,26 @@ void Debugger::UnloadDebugger() {
void Debugger::NotifyMessageHandler(v8::DebugEvent event, void Debugger::NotifyMessageHandler(v8::DebugEvent event,
Handle<Object> exec_state, Handle<JSObject> exec_state,
Handle<Object> event_data, Handle<JSObject> event_data,
bool auto_continue) { bool auto_continue) {
HandleScope scope; HandleScope scope;
if (!Debug::Load()) return; if (!Debug::Load()) return;
// Process the individual events. // Process the individual events.
bool interactive = false; bool sendEventMessage = false;
switch (event) { switch (event) {
case v8::Break: case v8::Break:
interactive = true; // Break event is always interactive sendEventMessage = !auto_continue;
break; break;
case v8::Exception: case v8::Exception:
interactive = true; // Exception event is always interactive sendEventMessage = true;
break; break;
case v8::BeforeCompile: case v8::BeforeCompile:
break; break;
case v8::AfterCompile: case v8::AfterCompile:
sendEventMessage = true;
break; break;
case v8::NewFunction: case v8::NewFunction:
break; break;
@ -1774,8 +1804,25 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
UNREACHABLE(); UNREACHABLE();
} }
// Done if not interactive. // The debug command interrupt flag might have been set when the command was
if (!interactive) return; // added. It should be enough to clear the flag only once while we are in the
// debugger.
ASSERT(Debug::InDebugger());
StackGuard::Continue(DEBUGCOMMAND);
// Notify the debugger that a debug event has occurred unless auto continue is
// active in which case no event is send.
if (sendEventMessage) {
MessageImpl message = MessageImpl::NewEvent(
event,
auto_continue,
Handle<JSObject>::cast(exec_state),
Handle<JSObject>::cast(event_data));
InvokeMessageHandler(message);
}
if (auto_continue && !HasCommands()) {
return;
}
// Get the DebugCommandProcessor. // Get the DebugCommandProcessor.
v8::Local<v8::Object> api_exec_state = v8::Local<v8::Object> api_exec_state =
@ -1792,45 +1839,30 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
return; return;
} }
// Notify the debugger that a debug event has occurred unless auto continue is
// active in which case no event is send.
if (!auto_continue) {
bool success = SendEventMessage(event_data);
if (!success) {
// If failed to notify debugger just continue running.
return;
}
}
// Process requests from the debugger. // Process requests from the debugger.
while (true) { while (true) {
// Wait for new command in the queue. // Wait for new command in the queue.
if (Debugger::host_dispatch_handler_) {
// In case there is a host dispatch - do periodic dispatches.
if (!command_received_->Wait(host_dispatch_micros_)) {
// Timout expired, do the dispatch.
Debugger::host_dispatch_handler_();
continue;
}
} else {
// In case there is no host dispatch - just wait.
command_received_->Wait(); command_received_->Wait();
}
// The debug command interrupt flag might have been set when the command was
// added.
StackGuard::Continue(DEBUGCOMMAND);
// Get the command from the queue. // Get the command from the queue.
Vector<uint16_t> command = command_queue_.Get(); CommandMessage command = command_queue_.Get();
Logger::DebugTag("Got request from command queue, in interactive loop."); Logger::DebugTag("Got request from command queue, in interactive loop.");
if (!Debugger::IsDebuggerActive()) { if (!Debugger::IsDebuggerActive()) {
// Delete command text and user data.
command.Dispose();
return; return;
} }
// Check if the command is a host dispatch.
if (command[0] == 0) {
if (Debugger::host_dispatch_handler_) {
int32_t dispatch = (command[1] << 16) | command[2];
Debugger::host_dispatch_handler_(reinterpret_cast<void*>(dispatch),
Debugger::host_dispatch_handler_data_);
}
if (auto_continue && !HasCommands()) {
return;
}
continue;
}
// Invoke JavaScript to process the debug request. // Invoke JavaScript to process the debug request.
v8::Local<v8::String> fun_name; v8::Local<v8::String> fun_name;
v8::Local<v8::Function> fun; v8::Local<v8::Function> fun;
@ -1838,8 +1870,9 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
v8::TryCatch try_catch; v8::TryCatch try_catch;
fun_name = v8::String::New("processDebugRequest"); fun_name = v8::String::New("processDebugRequest");
fun = v8::Function::Cast(*cmd_processor->Get(fun_name)); fun = v8::Function::Cast(*cmd_processor->Get(fun_name));
request = v8::String::New(reinterpret_cast<uint16_t*>(command.start()),
command.length()); request = v8::String::New(command.text().start(),
command.text().length());
static const int kArgc = 1; static const int kArgc = 1;
v8::Handle<Value> argv[kArgc] = { request }; v8::Handle<Value> argv[kArgc] = { request };
v8::Local<v8::Value> response_val = fun->Call(cmd_processor, kArgc, argv); v8::Local<v8::Value> response_val = fun->Call(cmd_processor, kArgc, argv);
@ -1875,13 +1908,16 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
response = try_catch.Exception()->ToString(); response = try_catch.Exception()->ToString();
} }
// Convert text result to C string.
v8::String::Value val(response);
Vector<uint16_t> str(reinterpret_cast<uint16_t*>(*val),
response->Length());
// Return the result. // Return the result.
SendMessage(str); MessageImpl message = MessageImpl::NewResponse(
event,
running,
Handle<JSObject>::cast(exec_state),
Handle<JSObject>::cast(event_data),
Handle<String>(Utils::OpenHandle(*response)),
command.client_data());
InvokeMessageHandler(message);
command.Dispose();
// Return from debug event processing if either the VM is put into the // Return from debug event processing if either the VM is put into the
// runnning state (through a continue command) or auto continue is active // runnning state (through a continue command) or auto continue is active
@ -1927,18 +1963,11 @@ void Debugger::SetEventListener(Handle<Object> callback,
} }
void Debugger::SetMessageHandler(v8::DebugMessageHandler handler, void* data, void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
bool message_handler_thread) {
ScopedLock with(debugger_access_); ScopedLock with(debugger_access_);
message_handler_ = handler; message_handler_ = handler;
message_handler_data_ = data; if (handler == NULL) {
if (handler != NULL) {
if (!message_thread_ && message_handler_thread) {
message_thread_ = new DebugMessageThread();
message_thread_->Start();
}
} else {
// Indicate that the message handler was recently cleared. // Indicate that the message handler was recently cleared.
message_handler_cleared_ = true; message_handler_cleared_ = true;
@ -1951,87 +1980,37 @@ void Debugger::SetMessageHandler(v8::DebugMessageHandler handler, void* data,
} }
void Debugger::SetHostDispatchHandler(v8::DebugHostDispatchHandler handler, void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
void* data) { int period) {
host_dispatch_handler_ = handler; host_dispatch_handler_ = handler;
host_dispatch_handler_data_ = data; host_dispatch_micros_ = period * 1000;
} }
// Calls the registered debug message handler. This callback is part of the // Calls the registered debug message handler. This callback is part of the
// public API. Messages are kept internally as Vector<uint16_t> strings, which // public API.
// are allocated in various places and deallocated by the calling function void Debugger::InvokeMessageHandler(MessageImpl message) {
// sometime after this call.
void Debugger::InvokeMessageHandler(Vector<uint16_t> message) {
ScopedLock with(debugger_access_); ScopedLock with(debugger_access_);
if (message_handler_ != NULL) { if (message_handler_ != NULL) {
message_handler_(message.start(), message.length(), message_handler_data_); message_handler_(message);
}
}
void Debugger::SendMessage(Vector<uint16_t> message) {
if (message_thread_ == NULL) {
// If there is no message thread just invoke the message handler from the
// V8 thread.
InvokeMessageHandler(message);
} else {
// Put a copy of the message coming from V8 on the queue. The new copy of
// the event string is destroyed by the message thread.
Vector<uint16_t> message_copy = message.Clone();
Logger::DebugTag("Put message on event message_queue.");
message_queue_.Put(message_copy);
message_received_->Signal();
}
}
bool Debugger::SendEventMessage(Handle<Object> event_data) {
v8::HandleScope scope;
// Call toJSONProtocol on the debug event object.
v8::Local<v8::Object> api_event_data =
v8::Utils::ToLocal(Handle<JSObject>::cast(event_data));
v8::Local<v8::String> fun_name = v8::String::New("toJSONProtocol");
v8::Local<v8::Function> fun =
v8::Function::Cast(*api_event_data->Get(fun_name));
v8::TryCatch try_catch;
v8::Local<v8::Value> json_event = *fun->Call(api_event_data, 0, NULL);
v8::Local<v8::String> json_event_string;
if (!try_catch.HasCaught()) {
if (!json_event->IsUndefined()) {
json_event_string = json_event->ToString();
if (FLAG_trace_debug_json) {
PrintLn(json_event_string);
}
v8::String::Value val(json_event_string);
Vector<uint16_t> str(reinterpret_cast<uint16_t*>(*val),
json_event_string->Length());
SendMessage(str);
} else {
SendMessage(Vector<uint16_t>::empty());
} }
} else {
PrintLn(try_catch.Exception());
return false;
}
return true;
} }
// Puts a command coming from the public API on the queue. Creates // Puts a command coming from the public API on the queue. Creates
// a copy of the command string managed by the debugger. Up to this // a copy of the command string managed by the debugger. Up to this
// point, the command data was managed by the API client. Called // point, the command data was managed by the API client. Called
// by the API client thread. This is where the API client hands off // by the API client thread.
// processing of the command to the DebugMessageThread thread. void Debugger::ProcessCommand(Vector<const uint16_t> command,
// The new copy of the command is destroyed in HandleCommand(). v8::Debug::ClientData* client_data) {
void Debugger::ProcessCommand(Vector<const uint16_t> command) { // Need to cast away const.
// Make a copy of the command. Need to cast away const for Clone to work. CommandMessage message = CommandMessage::New(
Vector<uint16_t> command_copy =
Vector<uint16_t>(const_cast<uint16_t*>(command.start()), Vector<uint16_t>(const_cast<uint16_t*>(command.start()),
command.length()).Clone(); command.length()),
client_data);
Logger::DebugTag("Put command on command_queue."); Logger::DebugTag("Put command on command_queue.");
command_queue_.Put(command_copy); command_queue_.Put(message);
command_received_->Signal(); command_received_->Signal();
// Set the debug command break flag to have the command processed. // Set the debug command break flag to have the command processed.
@ -2046,23 +2025,6 @@ bool Debugger::HasCommands() {
} }
void Debugger::ProcessHostDispatch(void* dispatch) {
// Puts a host dispatch comming from the public API on the queue.
uint16_t hack[3];
hack[0] = 0;
hack[1] = reinterpret_cast<uint32_t>(dispatch) >> 16;
hack[2] = reinterpret_cast<uint32_t>(dispatch) & 0xFFFF;
Logger::DebugTag("Put dispatch on command_queue.");
command_queue_.Put(Vector<uint16_t>(hack, 3).Clone());
command_received_->Signal();
// Set the debug command break flag to have the host dispatch processed.
if (!Debug::InDebugger()) {
StackGuard::DebugCommand();
}
}
bool Debugger::IsDebuggerActive() { bool Debugger::IsDebuggerActive() {
ScopedLock with(debugger_access_); ScopedLock with(debugger_access_);
@ -2118,47 +2080,152 @@ void Debugger::StopAgent() {
} }
void Debugger::TearDown() { MessageImpl MessageImpl::NewEvent(DebugEvent event,
if (message_thread_ != NULL) { bool running,
message_thread_->Stop(); Handle<JSObject> exec_state,
delete message_thread_; Handle<JSObject> event_data) {
message_thread_ = NULL; MessageImpl message(true, event, running,
} exec_state, event_data, Handle<String>(), NULL);
return message;
}
MessageImpl MessageImpl::NewResponse(DebugEvent event,
bool running,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
Handle<String> response_json,
v8::Debug::ClientData* client_data) {
MessageImpl message(false, event, running,
exec_state, event_data, response_json, client_data);
return message;
}
MessageImpl::MessageImpl(bool is_event,
DebugEvent event,
bool running,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
Handle<String> response_json,
v8::Debug::ClientData* client_data)
: is_event_(is_event),
event_(event),
running_(running),
exec_state_(exec_state),
event_data_(event_data),
response_json_(response_json),
client_data_(client_data) {}
bool MessageImpl::IsEvent() const {
return is_event_;
}
bool MessageImpl::IsResponse() const {
return !is_event_;
} }
void DebugMessageThread::Run() { DebugEvent MessageImpl::GetEvent() const {
// Sends debug events to an installed debugger message callback. return event_;
while (keep_running_) { }
// Wait and Get are paired so that semaphore count equals queue length.
Debugger::message_received_->Wait();
Logger::DebugTag("Get message from event message_queue."); bool MessageImpl::WillStartRunning() const {
Vector<uint16_t> message = Debugger::message_queue_.Get(); return running_;
if (message.length() > 0) { }
Debugger::InvokeMessageHandler(message);
v8::Handle<v8::Object> MessageImpl::GetExecutionState() const {
return v8::Utils::ToLocal(exec_state_);
}
v8::Handle<v8::Object> MessageImpl::GetEventData() const {
return v8::Utils::ToLocal(event_data_);
}
v8::Handle<v8::String> MessageImpl::GetJSON() const {
v8::HandleScope scope;
if (IsEvent()) {
// Call toJSONProtocol on the debug event object.
Handle<Object> fun = GetProperty(event_data_, "toJSONProtocol");
if (!fun->IsJSFunction()) {
return v8::Handle<v8::String>();
} }
bool caught_exception;
Handle<Object> json = Execution::TryCall(Handle<JSFunction>::cast(fun),
event_data_,
0, NULL, &caught_exception);
if (caught_exception || !json->IsString()) {
return v8::Handle<v8::String>();
}
return scope.Close(v8::Utils::ToLocal(Handle<String>::cast(json)));
} else {
return v8::Utils::ToLocal(response_json_);
} }
} }
void DebugMessageThread::Stop() { v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
keep_running_ = false; return v8::Utils::ToLocal(Debug::debugger_entry()->GetContext());
Debugger::SendMessage(Vector<uint16_t>(NULL, 0));
Join();
} }
MessageQueue::MessageQueue(int size) : start_(0), end_(0), size_(size) { v8::Debug::ClientData* MessageImpl::GetClientData() const {
messages_ = NewArray<Vector<uint16_t> >(size); return client_data_;
} }
MessageQueue::~MessageQueue() { CommandMessage::CommandMessage() : text_(Vector<uint16_t>::empty()),
client_data_(NULL) {
}
CommandMessage::CommandMessage(const Vector<uint16_t>& text,
v8::Debug::ClientData* data)
: text_(text),
client_data_(data) {
}
CommandMessage::~CommandMessage() {
}
void CommandMessage::Dispose() {
text_.Dispose();
delete client_data_;
client_data_ = NULL;
}
CommandMessage CommandMessage::New(const Vector<uint16_t>& command,
v8::Debug::ClientData* data) {
return CommandMessage(command.Clone(), data);
}
CommandMessageQueue::CommandMessageQueue(int size) : start_(0), end_(0),
size_(size) {
messages_ = NewArray<CommandMessage>(size);
}
CommandMessageQueue::~CommandMessageQueue() {
while (!IsEmpty()) {
CommandMessage m = Get();
m.Dispose();
}
DeleteArray(messages_); DeleteArray(messages_);
} }
Vector<uint16_t> MessageQueue::Get() { CommandMessage CommandMessageQueue::Get() {
ASSERT(!IsEmpty()); ASSERT(!IsEmpty());
int result = start_; int result = start_;
start_ = (start_ + 1) % size_; start_ = (start_ + 1) % size_;
@ -2166,7 +2233,7 @@ Vector<uint16_t> MessageQueue::Get() {
} }
void MessageQueue::Put(const Vector<uint16_t>& message) { void CommandMessageQueue::Put(const CommandMessage& message) {
if ((end_ + 1) % size_ == start_) { if ((end_ + 1) % size_ == start_) {
Expand(); Expand();
} }
@ -2175,53 +2242,57 @@ void MessageQueue::Put(const Vector<uint16_t>& message) {
} }
void MessageQueue::Expand() { void CommandMessageQueue::Expand() {
MessageQueue new_queue(size_ * 2); CommandMessageQueue new_queue(size_ * 2);
while (!IsEmpty()) { while (!IsEmpty()) {
new_queue.Put(Get()); new_queue.Put(Get());
} }
Vector<uint16_t>* array_to_free = messages_; CommandMessage* array_to_free = messages_;
*this = new_queue; *this = new_queue;
new_queue.messages_ = array_to_free; new_queue.messages_ = array_to_free;
// Make the new_queue empty so that it doesn't call Dispose on any messages.
new_queue.start_ = new_queue.end_;
// Automatic destructor called on new_queue, freeing array_to_free. // Automatic destructor called on new_queue, freeing array_to_free.
} }
LockingMessageQueue::LockingMessageQueue(int size) : queue_(size) { LockingCommandMessageQueue::LockingCommandMessageQueue(int size)
: queue_(size) {
lock_ = OS::CreateMutex(); lock_ = OS::CreateMutex();
} }
LockingMessageQueue::~LockingMessageQueue() { LockingCommandMessageQueue::~LockingCommandMessageQueue() {
delete lock_; delete lock_;
} }
bool LockingMessageQueue::IsEmpty() const { bool LockingCommandMessageQueue::IsEmpty() const {
ScopedLock sl(lock_); ScopedLock sl(lock_);
return queue_.IsEmpty(); return queue_.IsEmpty();
} }
Vector<uint16_t> LockingMessageQueue::Get() { CommandMessage LockingCommandMessageQueue::Get() {
ScopedLock sl(lock_); ScopedLock sl(lock_);
Vector<uint16_t> result = queue_.Get(); CommandMessage result = queue_.Get();
Logger::DebugEvent("Get", result); Logger::DebugEvent("Get", result.text());
return result; return result;
} }
void LockingMessageQueue::Put(const Vector<uint16_t>& message) { void LockingCommandMessageQueue::Put(const CommandMessage& message) {
ScopedLock sl(lock_); ScopedLock sl(lock_);
queue_.Put(message); queue_.Put(message);
Logger::DebugEvent("Put", message); Logger::DebugEvent("Put", message.text());
} }
void LockingMessageQueue::Clear() { void LockingCommandMessageQueue::Clear() {
ScopedLock sl(lock_); ScopedLock sl(lock_);
queue_.Clear(); queue_.Clear();
} }
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal } } // namespace v8::internal

188
deps/v8/src/debug.h

@ -25,10 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_V8_DEBUG_H_ #ifndef V8_DEBUG_H_
#define V8_V8_DEBUG_H_ #define V8_DEBUG_H_
#include "../include/v8-debug.h"
#include "assembler.h" #include "assembler.h"
#include "code-stubs.h" #include "code-stubs.h"
#include "debug-agent.h" #include "debug-agent.h"
@ -38,6 +37,8 @@
#include "string-stream.h" #include "string-stream.h"
#include "v8threads.h" #include "v8threads.h"
#ifdef ENABLE_DEBUGGER_SUPPORT
#include "../include/v8-debug.h"
namespace v8 { namespace internal { namespace v8 { namespace internal {
@ -131,6 +132,10 @@ class BreakLocationIterator {
private: private:
void SetDebugBreak(); void SetDebugBreak();
void ClearDebugBreak(); void ClearDebugBreak();
void SetDebugBreakAtIC();
void ClearDebugBreakAtIC();
bool IsDebugBreakAtReturn(); bool IsDebugBreakAtReturn();
void SetDebugBreakAtReturn(); void SetDebugBreakAtReturn();
void ClearDebugBreakAtReturn(); void ClearDebugBreakAtReturn();
@ -204,7 +209,7 @@ class Debug {
static bool IsBreakStub(Code* code); static bool IsBreakStub(Code* code);
// Find the builtin to use for invoking the debug break // Find the builtin to use for invoking the debug break
static Handle<Code> FindDebugBreak(RelocInfo* rinfo); static Handle<Code> FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode);
static Handle<Object> GetSourceBreakLocations( static Handle<Object> GetSourceBreakLocations(
Handle<SharedFunctionInfo> shared); Handle<SharedFunctionInfo> shared);
@ -396,48 +401,117 @@ class Debug {
}; };
// A Queue of Vector<uint16_t> objects. A thread-safe version is // Message delivered to the message handler callback. This is either a debugger
// LockingMessageQueue, based on this class. // event or the response to a command.
class MessageQueue BASE_EMBEDDED { class MessageImpl: public v8::Debug::Message {
public:
// Create a message object for a debug event.
static MessageImpl NewEvent(DebugEvent event,
bool running,
Handle<JSObject> exec_state,
Handle<JSObject> event_data);
// Create a message object for the response to a debug command.
static MessageImpl NewResponse(DebugEvent event,
bool running,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
Handle<String> response_json,
v8::Debug::ClientData* client_data);
// Implementation of interface v8::Debug::Message.
virtual bool IsEvent() const;
virtual bool IsResponse() const;
virtual DebugEvent GetEvent() const;
virtual bool WillStartRunning() const;
virtual v8::Handle<v8::Object> GetExecutionState() const;
virtual v8::Handle<v8::Object> GetEventData() const;
virtual v8::Handle<v8::String> GetJSON() const;
virtual v8::Handle<v8::Context> GetEventContext() const;
virtual v8::Debug::ClientData* GetClientData() const;
private:
MessageImpl(bool is_event,
DebugEvent event,
bool running,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
Handle<String> response_json,
v8::Debug::ClientData* client_data);
bool is_event_; // Does this message represent a debug event?
DebugEvent event_; // Debug event causing the break.
bool running_; // Will the VM start running after this event?
Handle<JSObject> exec_state_; // Current execution state.
Handle<JSObject> event_data_; // Data associated with the event.
Handle<String> response_json_; // Response JSON if message holds a response.
v8::Debug::ClientData* client_data_; // Client data passed with the request.
};
// Message send by user to v8 debugger or debugger output message.
// In addition to command text it may contain a pointer to some user data
// which are expected to be passed along with the command reponse to message
// handler.
class CommandMessage {
public: public:
explicit MessageQueue(int size); static CommandMessage New(const Vector<uint16_t>& command,
~MessageQueue(); v8::Debug::ClientData* data);
CommandMessage();
~CommandMessage();
// Deletes user data and disposes of the text.
void Dispose();
Vector<uint16_t> text() const { return text_; }
v8::Debug::ClientData* client_data() const { return client_data_; }
private:
CommandMessage(const Vector<uint16_t>& text,
v8::Debug::ClientData* data);
Vector<uint16_t> text_;
v8::Debug::ClientData* client_data_;
};
// A Queue of CommandMessage objects. A thread-safe version is
// LockingCommandMessageQueue, based on this class.
class CommandMessageQueue BASE_EMBEDDED {
public:
explicit CommandMessageQueue(int size);
~CommandMessageQueue();
bool IsEmpty() const { return start_ == end_; } bool IsEmpty() const { return start_ == end_; }
Vector<uint16_t> Get(); CommandMessage Get();
void Put(const Vector<uint16_t>& message); void Put(const CommandMessage& message);
void Clear() { start_ = end_ = 0; } // Queue is empty after Clear(). void Clear() { start_ = end_ = 0; } // Queue is empty after Clear().
private: private:
// Doubles the size of the message queue, and copies the messages. // Doubles the size of the message queue, and copies the messages.
void Expand(); void Expand();
Vector<uint16_t>* messages_; CommandMessage* messages_;
int start_; int start_;
int end_; int end_;
int size_; // The size of the queue buffer. Queue can hold size-1 messages. int size_; // The size of the queue buffer. Queue can hold size-1 messages.
}; };
// LockingMessageQueue is a thread-safe circular buffer of Vector<uint16_t> // LockingCommandMessageQueue is a thread-safe circular buffer of CommandMessage
// messages. The message data is not managed by LockingMessageQueue. // messages. The message data is not managed by LockingCommandMessageQueue.
// Pointers to the data are passed in and out. Implemented by adding a // Pointers to the data are passed in and out. Implemented by adding a
// Mutex to MessageQueue. Includes logging of all puts and gets. // Mutex to CommandMessageQueue. Includes logging of all puts and gets.
class LockingMessageQueue BASE_EMBEDDED { class LockingCommandMessageQueue BASE_EMBEDDED {
public: public:
explicit LockingMessageQueue(int size); explicit LockingCommandMessageQueue(int size);
~LockingMessageQueue(); ~LockingCommandMessageQueue();
bool IsEmpty() const; bool IsEmpty() const;
Vector<uint16_t> Get(); CommandMessage Get();
void Put(const Vector<uint16_t>& message); void Put(const CommandMessage& message);
void Clear(); void Clear();
private: private:
MessageQueue queue_; CommandMessageQueue queue_;
Mutex* lock_; Mutex* lock_;
DISALLOW_COPY_AND_ASSIGN(LockingMessageQueue); DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
}; };
class DebugMessageThread;
class Debugger { class Debugger {
public: public:
static void DebugRequest(const uint16_t* json_request, int length); static void DebugRequest(const uint16_t* json_request, int length);
@ -465,36 +539,27 @@ class Debugger {
Handle<JSFunction> fun); Handle<JSFunction> fun);
static void OnNewFunction(Handle<JSFunction> fun); static void OnNewFunction(Handle<JSFunction> fun);
static void ProcessDebugEvent(v8::DebugEvent event, static void ProcessDebugEvent(v8::DebugEvent event,
Handle<Object> event_data, Handle<JSObject> event_data,
bool auto_continue); bool auto_continue);
static void NotifyMessageHandler(v8::DebugEvent event, static void NotifyMessageHandler(v8::DebugEvent event,
Handle<Object> exec_state, Handle<JSObject> exec_state,
Handle<Object> event_data, Handle<JSObject> event_data,
bool auto_continue); bool auto_continue);
static void SetEventListener(Handle<Object> callback, Handle<Object> data); static void SetEventListener(Handle<Object> callback, Handle<Object> data);
static void SetMessageHandler(v8::DebugMessageHandler handler, void* data, static void SetMessageHandler(v8::Debug::MessageHandler2 handler);
bool message_handler_thread); static void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
static void TearDown(); int period);
static void SetHostDispatchHandler(v8::DebugHostDispatchHandler handler,
void* data);
// Invoke the message handler function. // Invoke the message handler function.
static void InvokeMessageHandler(Vector< uint16_t> message); static void InvokeMessageHandler(MessageImpl message);
// Send a message to the message handler eiher through the message thread or
// directly.
static void SendMessage(Vector<uint16_t> message);
// Send the JSON message for a debug event.
static bool SendEventMessage(Handle<Object> event_data);
// Add a debugger command to the command queue. // Add a debugger command to the command queue.
static void ProcessCommand(Vector<const uint16_t> command); static void ProcessCommand(Vector<const uint16_t> command,
v8::Debug::ClientData* client_data = NULL);
// Check whether there are commands in the command queue. // Check whether there are commands in the command queue.
static bool HasCommands(); static bool HasCommands();
static void ProcessHostDispatch(void* dispatch);
static Handle<Object> Call(Handle<JSFunction> fun, static Handle<Object> Call(Handle<JSFunction> fun,
Handle<Object> data, Handle<Object> data,
bool* pending_exception); bool* pending_exception);
@ -537,42 +602,18 @@ class Debugger {
static bool compiling_natives_; // Are we compiling natives? static bool compiling_natives_; // Are we compiling natives?
static bool is_loading_debugger_; // Are we loading the debugger? static bool is_loading_debugger_; // Are we loading the debugger?
static bool never_unload_debugger_; // Can we unload the debugger? static bool never_unload_debugger_; // Can we unload the debugger?
static DebugMessageThread* message_thread_; static v8::Debug::MessageHandler2 message_handler_;
static v8::DebugMessageHandler message_handler_;
static bool message_handler_cleared_; // Was message handler cleared? static bool message_handler_cleared_; // Was message handler cleared?
static void* message_handler_data_; static v8::Debug::HostDispatchHandler host_dispatch_handler_;
static v8::DebugHostDispatchHandler host_dispatch_handler_; static int host_dispatch_micros_;
static void* host_dispatch_handler_data_;
static DebuggerAgent* agent_; static DebuggerAgent* agent_;
static const int kQueueInitialSize = 4; static const int kQueueInitialSize = 4;
static LockingMessageQueue command_queue_; static LockingCommandMessageQueue command_queue_;
static LockingMessageQueue message_queue_;
static Semaphore* command_received_; // Signaled for each command received. static Semaphore* command_received_; // Signaled for each command received.
static Semaphore* message_received_; // Signalled for each message send.
friend class EnterDebugger; friend class EnterDebugger;
friend class DebugMessageThread;
};
// Thread to read messages from the message queue and invoke the debug message
// handler in another thread as the V8 thread. This thread is started if the
// registration of the debug message handler requested to be called in a thread
// seperate from the V8 thread.
class DebugMessageThread: public Thread {
public:
DebugMessageThread() : keep_running_(true) {}
virtual ~DebugMessageThread() {}
// Main function of DebugMessageThread thread.
void Run();
void Stop();
private:
bool keep_running_;
DISALLOW_COPY_AND_ASSIGN(DebugMessageThread);
}; };
@ -646,6 +687,9 @@ class EnterDebugger BASE_EMBEDDED {
// Check whether there are any JavaScript frames on the stack. // Check whether there are any JavaScript frames on the stack.
inline bool HasJavaScriptFrames() { return has_js_frames_; } inline bool HasJavaScriptFrames() { return has_js_frames_; }
// Get the active context from before entering the debugger.
inline Handle<Context> GetContext() { return save_.context(); }
private: private:
EnterDebugger* prev_; // Previous debugger entry if entered recursively. EnterDebugger* prev_; // Previous debugger entry if entered recursively.
JavaScriptFrameIterator it_; JavaScriptFrameIterator it_;
@ -719,4 +763,6 @@ class Debug_Address {
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_V8_DEBUG_H_ #endif // ENABLE_DEBUGGER_SUPPORT
#endif // V8_DEBUG_H_

26
deps/v8/src/execution.cc

@ -32,10 +32,12 @@
#include "api.h" #include "api.h"
#include "codegen-inl.h" #include "codegen-inl.h"
#ifdef ARM #if V8_TARGET_ARCH_IA32
#include "simulator-arm.h" #include "ia32/simulator-ia32.h"
#else // ia32 #elif V8_TARGET_ARCH_X64
#include "simulator-ia32.h" #include "x64/simulator-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/simulator-arm.h"
#endif #endif
#include "debug.h" #include "debug.h"
@ -305,6 +307,7 @@ void StackGuard::Preempt() {
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
bool StackGuard::IsDebugBreak() { bool StackGuard::IsDebugBreak() {
ExecutionAccess access; ExecutionAccess access;
return thread_local_.interrupt_flags_ & DEBUGBREAK; return thread_local_.interrupt_flags_ & DEBUGBREAK;
@ -331,7 +334,7 @@ void StackGuard::DebugCommand() {
set_limits(kInterruptLimit, access); set_limits(kInterruptLimit, access);
} }
} }
#endif
void StackGuard::Continue(InterruptFlag after_what) { void StackGuard::Continue(InterruptFlag after_what) {
ExecutionAccess access; ExecutionAccess access;
@ -539,6 +542,7 @@ static Object* RuntimePreempt() {
ContextSwitcher::PreemptionReceived(); ContextSwitcher::PreemptionReceived();
#ifdef ENABLE_DEBUGGER_SUPPORT
if (Debug::InDebugger()) { if (Debug::InDebugger()) {
// If currently in the debugger don't do any actual preemption but record // If currently in the debugger don't do any actual preemption but record
// that preemption occoured while in the debugger. // that preemption occoured while in the debugger.
@ -548,11 +552,17 @@ static Object* RuntimePreempt() {
v8::Unlocker unlocker; v8::Unlocker unlocker;
Thread::YieldCPU(); Thread::YieldCPU();
} }
#else
// Perform preemption.
v8::Unlocker unlocker;
Thread::YieldCPU();
#endif
return Heap::undefined_value(); return Heap::undefined_value();
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
Object* Execution::DebugBreakHelper() { Object* Execution::DebugBreakHelper() {
// Just continue if breaks are disabled. // Just continue if breaks are disabled.
if (Debug::disable_break()) { if (Debug::disable_break()) {
@ -598,12 +608,14 @@ Object* Execution::DebugBreakHelper() {
// Return to continue execution. // Return to continue execution.
return Heap::undefined_value(); return Heap::undefined_value();
} }
#endif
Object* Execution::HandleStackGuardInterrupt() { Object* Execution::HandleStackGuardInterrupt() {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (StackGuard::IsDebugBreak() || StackGuard::IsDebugCommand()) { if (StackGuard::IsDebugBreak() || StackGuard::IsDebugCommand()) {
DebugBreakHelper(); DebugBreakHelper();
} }
#endif
if (StackGuard::IsPreempted()) RuntimePreempt(); if (StackGuard::IsPreempted()) RuntimePreempt();
if (StackGuard::IsInterrupted()) { if (StackGuard::IsInterrupted()) {
// interrupt // interrupt
@ -626,7 +638,7 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) { v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
// All allocation spaces other than NEW_SPACE have the same effect. // All allocation spaces other than NEW_SPACE have the same effect.
Heap::CollectGarbage(0, OLD_DATA_SPACE); Heap::CollectAllGarbage();
return v8::Undefined(); return v8::Undefined();
} }

11
deps/v8/src/execution.h

@ -118,8 +118,9 @@ class Execution : public AllStatic {
Handle<JSFunction> fun, Handle<JSFunction> fun,
Handle<Object> pos, Handle<Object> pos,
Handle<Object> is_global); Handle<Object> is_global);
#ifdef ENABLE_DEBUGGER_SUPPORT
static Object* DebugBreakHelper(); static Object* DebugBreakHelper();
#endif
// If the stack guard is triggered, but it is not an actual // If the stack guard is triggered, but it is not an actual
// stack overflow, then handle the interruption accordingly. // stack overflow, then handle the interruption accordingly.
@ -158,11 +159,13 @@ class StackGuard BASE_EMBEDDED {
static void Preempt(); static void Preempt();
static bool IsInterrupted(); static bool IsInterrupted();
static void Interrupt(); static void Interrupt();
static bool IsDebugBreak(); static void Continue(InterruptFlag after_what);
#ifdef ENABLE_DEBUGGER_SUPPORT
static void DebugBreak(); static void DebugBreak();
static bool IsDebugCommand();
static void DebugCommand(); static void DebugCommand();
static void Continue(InterruptFlag after_what); static bool IsDebugBreak();
static bool IsDebugCommand();
#endif
private: private:
// You should hold the ExecutionAccess lock when calling this method. // You should hold the ExecutionAccess lock when calling this method.

13
deps/v8/src/factory.cc

@ -167,14 +167,17 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
Heap::SetLastScriptId(Smi::FromInt(id)); Heap::SetLastScriptId(Smi::FromInt(id));
// Create and initialize script object. // Create and initialize script object.
Handle<Proxy> wrapper = Factory::NewProxy(0, TENURED);
Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE)); Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE));
script->set_source(*source); script->set_source(*source);
script->set_name(Heap::undefined_value()); script->set_name(Heap::undefined_value());
script->set_id(Heap::last_script_id()); script->set_id(Heap::last_script_id());
script->set_line_offset(Smi::FromInt(0)); script->set_line_offset(Smi::FromInt(0));
script->set_column_offset(Smi::FromInt(0)); script->set_column_offset(Smi::FromInt(0));
script->set_data(Heap::undefined_value());
script->set_context_data(Heap::undefined_value());
script->set_type(Smi::FromInt(SCRIPT_TYPE_NORMAL)); script->set_type(Smi::FromInt(SCRIPT_TYPE_NORMAL));
script->set_wrapper(*Factory::NewProxy(0, TENURED)); script->set_wrapper(*wrapper);
script->set_line_ends(Heap::undefined_value()); script->set_line_ends(Heap::undefined_value());
return script; return script;
@ -207,14 +210,14 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
} }
Handle<Map> Factory::CopyMap(Handle<Map> src) { Handle<Map> Factory::CopyMapDropDescriptors(Handle<Map> src) {
CALL_HEAP_FUNCTION(src->Copy(), Map); CALL_HEAP_FUNCTION(src->CopyDropDescriptors(), Map);
} }
Handle<Map> Factory::CopyMap(Handle<Map> src, Handle<Map> Factory::CopyMap(Handle<Map> src,
int extra_inobject_properties) { int extra_inobject_properties) {
Handle<Map> copy = CopyMap(src); Handle<Map> copy = CopyMapDropDescriptors(src);
// Check that we do not overflow the instance size when adding the // Check that we do not overflow the instance size when adding the
// extra inobject properties. // extra inobject properties.
int instance_size_delta = extra_inobject_properties * kPointerSize; int instance_size_delta = extra_inobject_properties * kPointerSize;
@ -671,6 +674,7 @@ Handle<Object> Factory::ToObject(Handle<Object> object,
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) { Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
// Get the original code of the function. // Get the original code of the function.
Handle<Code> code(shared->code()); Handle<Code> code(shared->code());
@ -700,6 +704,7 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
return debug_info; return debug_info;
} }
#endif
Handle<JSObject> Factory::NewArgumentsObject(Handle<Object> callee, Handle<JSObject> Factory::NewArgumentsObject(Handle<Object> callee,

5
deps/v8/src/factory.h

@ -153,7 +153,7 @@ class Factory : public AllStatic {
static Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function); static Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
static Handle<Map> CopyMap(Handle<Map> map); static Handle<Map> CopyMapDropDescriptors(Handle<Map> map);
// Copy the map adding more inobject properties if possible without // Copy the map adding more inobject properties if possible without
// overflowing the instance size. // overflowing the instance size.
@ -310,8 +310,9 @@ class Factory : public AllStatic {
uint32_t key, uint32_t key,
Handle<Object> value); Handle<Object> value);
#ifdef ENABLE_DEBUGGER_SUPPORT
static Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared); static Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
#endif
// Return a map using the map cache in the global context. // Return a map using the map cache in the global context.
// The key the an ordered set of property names. // The key the an ordered set of property names.

12
deps/v8/src/frames-inl.h

@ -29,12 +29,14 @@
#define V8_FRAMES_INL_H_ #define V8_FRAMES_INL_H_
#include "frames.h" #include "frames.h"
#ifdef ARM
#include "frames-arm.h"
#else
#include "frames-ia32.h"
#endif
#if V8_TARGET_ARCH_IA32
#include "ia32/frames-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/frames-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/frames-arm.h"
#endif
namespace v8 { namespace internal { namespace v8 { namespace internal {

4
deps/v8/src/frames.cc

@ -647,10 +647,10 @@ void EntryFrame::Iterate(ObjectVisitor* v) const {
handler->Iterate(v); handler->Iterate(v);
// Make sure that there's the entry frame does not contain more than // Make sure that there's the entry frame does not contain more than
// one stack handler. // one stack handler.
if (kDebug) { #ifdef DEBUG
it.Advance(); it.Advance();
ASSERT(it.done()); ASSERT(it.done());
} #endif
} }

9
deps/v8/src/func-name-inferrer.cc

@ -63,11 +63,12 @@ Handle<String> FuncNameInferrer::MakeNameFromStackHelper(int pos,
} }
void FuncNameInferrer::MaybeInferFunctionName() { void FuncNameInferrer::InferFunctionsNames() {
if (func_to_infer_ != NULL) { Handle<String> func_name = MakeNameFromStack();
func_to_infer_->set_inferred_name(MakeNameFromStack()); for (int i = 0; i < funcs_to_infer_.length(); ++i) {
func_to_infer_ = NULL; funcs_to_infer_[i]->set_inferred_name(func_name);
} }
funcs_to_infer_.Rewind(0);
} }

23
deps/v8/src/func-name-inferrer.h

@ -45,7 +45,7 @@ class FuncNameInferrer BASE_EMBEDDED {
FuncNameInferrer() : FuncNameInferrer() :
entries_stack_(10), entries_stack_(10),
names_stack_(5), names_stack_(5),
func_to_infer_(NULL), funcs_to_infer_(4),
dot_(Factory::NewStringFromAscii(CStrVector("."))) { dot_(Factory::NewStringFromAscii(CStrVector("."))) {
} }
@ -57,39 +57,34 @@ class FuncNameInferrer BASE_EMBEDDED {
entries_stack_.Add(names_stack_.length()); entries_stack_.Add(names_stack_.length());
} }
void Leave() {
ASSERT(IsOpen());
names_stack_.Rewind(entries_stack_.RemoveLast());
}
void PushName(Handle<String> name) { void PushName(Handle<String> name) {
if (IsOpen()) { if (IsOpen()) {
names_stack_.Add(name); names_stack_.Add(name);
} }
} }
void SetFuncToInfer(FunctionLiteral* func_to_infer) { void AddFunction(FunctionLiteral* func_to_infer) {
if (IsOpen()) { if (IsOpen()) {
// If we encounter another function literal after already having funcs_to_infer_.Add(func_to_infer);
// encountered one, the second one replaces the first.
func_to_infer_ = func_to_infer;
} }
} }
void InferAndLeave() { void InferAndLeave() {
ASSERT(IsOpen()); ASSERT(IsOpen());
MaybeInferFunctionName(); if (!funcs_to_infer_.is_empty()) {
Leave(); InferFunctionsNames();
}
names_stack_.Rewind(entries_stack_.RemoveLast());
} }
private: private:
Handle<String> MakeNameFromStack(); Handle<String> MakeNameFromStack();
Handle<String> MakeNameFromStackHelper(int pos, Handle<String> prev); Handle<String> MakeNameFromStackHelper(int pos, Handle<String> prev);
void MaybeInferFunctionName(); void InferFunctionsNames();
List<int> entries_stack_; List<int> entries_stack_;
List<Handle<String> > names_stack_; List<Handle<String> > names_stack_;
FunctionLiteral* func_to_infer_; List<FunctionLiteral*> funcs_to_infer_;
Handle<String> dot_; Handle<String> dot_;
DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer); DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer);

2
deps/v8/src/global-handles.cc

@ -258,7 +258,7 @@ void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
} }
void GlobalHandles::MarkWeakRoots(WeakSlotCallback f) { void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
for (Node* current = head_; current != NULL; current = current->next()) { for (Node* current = head_; current != NULL; current = current->next()) {
if (current->state_ == Node::WEAK) { if (current->state_ == Node::WEAK) {
if (f(&current->object_)) { if (f(&current->object_)) {

5
deps/v8/src/global-handles.h

@ -98,8 +98,9 @@ class GlobalHandles : public AllStatic {
// Iterates over all weak roots in heap. // Iterates over all weak roots in heap.
static void IterateWeakRoots(ObjectVisitor* v); static void IterateWeakRoots(ObjectVisitor* v);
// Mark the weak pointers based on the callback. // Find all weak handles satisfying the callback predicate, mark
static void MarkWeakRoots(WeakSlotCallback f); // them as pending.
static void IdentifyWeakHandles(WeakSlotCallback f);
// Add an object group. // Add an object group.
// Should only used in GC callback function before a collection. // Should only used in GC callback function before a collection.

110
deps/v8/src/globals.h

@ -28,27 +28,27 @@
#ifndef V8_GLOBALS_H_ #ifndef V8_GLOBALS_H_
#define V8_GLOBALS_H_ #define V8_GLOBALS_H_
// -----------------------------------------------------------------------------
// Types
// Visual Studio C++ is missing the stdint.h header file. Instead we define
// standard integer types for Windows here.
#ifdef _MSC_VER
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef short int16_t; // NOLINT
typedef unsigned short uint16_t; // NOLINT
typedef int int32_t;
typedef unsigned int uint32_t;
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
#else // _MSC_VER
#include <stdint.h> // for intptr_t
#endif // _MSC_VER
namespace v8 { namespace internal { namespace v8 { namespace internal {
// Processor architecture detection. For more info on what's defined, see:
// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
// http://www.agner.org/optimize/calling_conventions.pdf
// or with gcc, run: "echo | gcc -E -dM -"
#if defined(_M_X64) || defined(__x86_64__)
#define V8_HOST_ARCH_X64 1
#define V8_HOST_ARCH_64_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(_M_IX86) || defined(__i386__)
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
#else
#error Your architecture was not detected as supported by v8
#endif
// Support for alternative bool type. This is only enabled if the code is // Support for alternative bool type. This is only enabled if the code is
// compiled with USE_MYBOOL defined. This catches some nasty type bugs. // compiled with USE_MYBOOL defined. This catches some nasty type bugs.
// For instance, 'bool b = "false";' results in b == true! This is a hidden // For instance, 'bool b = "false";' results in b == true! This is a hidden
@ -69,58 +69,73 @@ typedef unsigned int __my_bool__;
typedef uint8_t byte; typedef uint8_t byte;
typedef byte* Address; typedef byte* Address;
// Define our own macros for writing 64-bit constants. This is less fragile
// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
// works on compilers that don't have it (like MSVC).
#if V8_HOST_ARCH_64_BIT
#ifdef _MSC_VER
#define V8_UINT64_C(x) (x ## UI64)
#define V8_INT64_C(x) (x ## I64)
#define V8_PTR_PREFIX "ll"
#else
#define V8_UINT64_C(x) (x ## UL)
#define V8_INT64_C(x) (x ## L)
#define V8_PTR_PREFIX "l"
#endif
#else // V8_HOST_ARCH_64_BIT
#define V8_PTR_PREFIX ""
#endif
#define V8PRIp V8_PTR_PREFIX "x"
// Code-point values in Unicode 4.0 are 21 bits wide. // Code-point values in Unicode 4.0 are 21 bits wide.
typedef uint16_t uc16; typedef uint16_t uc16;
typedef signed int uc32; typedef int32_t uc32;
#ifndef ARM
#define CAN_READ_UNALIGNED 1
#endif
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Constants // Constants
#ifdef DEBUG
const bool kDebug = true;
#else
const bool kDebug = false;
#endif // DEBUG
const int KB = 1024; const int KB = 1024;
const int MB = KB * KB; const int MB = KB * KB;
const int GB = KB * KB * KB; const int GB = KB * KB * KB;
const int kMaxInt = 0x7FFFFFFF; const int kMaxInt = 0x7FFFFFFF;
const int kMinInt = -kMaxInt - 1; const int kMinInt = -kMaxInt - 1;
const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
const int kCharSize = sizeof(char); // NOLINT const int kCharSize = sizeof(char); // NOLINT
const int kShortSize = sizeof(short); // NOLINT const int kShortSize = sizeof(short); // NOLINT
const int kIntSize = sizeof(int); // NOLINT const int kIntSize = sizeof(int); // NOLINT
const int kDoubleSize = sizeof(double); // NOLINT const int kDoubleSize = sizeof(double); // NOLINT
const int kPointerSize = sizeof(void*); // NOLINT const int kPointerSize = sizeof(void*); // NOLINT
#if V8_HOST_ARCH_64_BIT
const int kPointerSizeLog2 = 3;
#else
const int kPointerSizeLog2 = 2; const int kPointerSizeLog2 = 2;
#endif
const int kObjectAlignmentBits = 2; const int kObjectAlignmentBits = kPointerSizeLog2;
const int kObjectAlignmentMask = (1 << kObjectAlignmentBits) - 1; const intptr_t kObjectAlignmentMask = (1 << kObjectAlignmentBits) - 1;
const int kObjectAlignment = 1 << kObjectAlignmentBits; const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
// Tag information for HeapObject. // Tag information for HeapObject.
const int kHeapObjectTag = 1; const int kHeapObjectTag = 1;
const int kHeapObjectTagSize = 2; const int kHeapObjectTagSize = 2;
const int kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1; const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
// Tag information for Smi. // Tag information for Smi.
const int kSmiTag = 0; const int kSmiTag = 0;
const int kSmiTagSize = 1; const int kSmiTagSize = 1;
const int kSmiTagMask = (1 << kSmiTagSize) - 1; const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
// Tag information for Failure. // Tag information for Failure.
const int kFailureTag = 3; const int kFailureTag = 3;
const int kFailureTagSize = 2; const int kFailureTagSize = 2;
const int kFailureTagMask = (1 << kFailureTagSize) - 1; const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1;
const int kBitsPerByte = 8; const int kBitsPerByte = 8;
@ -129,11 +144,21 @@ const int kBitsPerPointer = kPointerSize * kBitsPerByte;
const int kBitsPerInt = kIntSize * kBitsPerByte; const int kBitsPerInt = kIntSize * kBitsPerByte;
// Zap-value: The value used for zapping dead objects. Should be a recognizable // Zap-value: The value used for zapping dead objects.
// illegal heap object pointer. // Should be a recognizable hex value tagged as a heap object pointer.
#ifdef V8_HOST_ARCH_64_BIT
const Address kZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeed));
const Address kHandleZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead));
const Address kFromSpaceZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad));
#else
const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed); const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead); const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad); const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
#endif
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Forward declarations for frequently used classes // Forward declarations for frequently used classes
@ -146,7 +171,6 @@ class Assembler;
class BreakableStatement; class BreakableStatement;
class Code; class Code;
class CodeGenerator; class CodeGenerator;
class CodeRegion;
class CodeStub; class CodeStub;
class Context; class Context;
class Debug; class Debug;
@ -377,13 +401,13 @@ enum StateTag {
// Testers for test. // Testers for test.
#define HAS_SMI_TAG(value) \ #define HAS_SMI_TAG(value) \
((reinterpret_cast<int>(value) & kSmiTagMask) == kSmiTag) ((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
#define HAS_FAILURE_TAG(value) \ #define HAS_FAILURE_TAG(value) \
((reinterpret_cast<int>(value) & kFailureTagMask) == kFailureTag) ((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
#define HAS_HEAP_OBJECT_TAG(value) \ #define HAS_HEAP_OBJECT_TAG(value) \
((reinterpret_cast<int>(value) & kHeapObjectTagMask) == kHeapObjectTag) ((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) == kHeapObjectTag)
// OBJECT_SIZE_ALIGN returns the value aligned HeapObject size // OBJECT_SIZE_ALIGN returns the value aligned HeapObject size
#define OBJECT_SIZE_ALIGN(value) \ #define OBJECT_SIZE_ALIGN(value) \
@ -492,7 +516,7 @@ F FUNCTION_CAST(Address addr) {
// exception'. // exception'.
// //
// Bit_cast uses the memcpy exception to move the bits from a variable of one // Bit_cast uses the memcpy exception to move the bits from a variable of one
// type o a variable of another type. Of course the end result is likely to // type of a variable of another type. Of course the end result is likely to
// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005) // be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
// will completely optimize bit_cast away. // will completely optimize bit_cast away.
// //

66
deps/v8/src/handles.cc

@ -212,7 +212,17 @@ Handle<Object> SetProperty(Handle<Object> object,
} }
Handle<Object> IgnoreAttributesAndSetLocalProperty(Handle<JSObject> object, Handle<Object> ForceSetProperty(Handle<JSObject> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes) {
CALL_HEAP_FUNCTION(
Runtime::ForceSetObjectProperty(object, key, value, attributes), Object);
}
Handle<Object> IgnoreAttributesAndSetLocalProperty(
Handle<JSObject> object,
Handle<String> key, Handle<String> key,
Handle<Object> value, Handle<Object> value,
PropertyAttributes attributes) { PropertyAttributes attributes) {
@ -491,17 +501,6 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object) {
break; break;
} }
// Compute the property keys.
content = UnionOfKeys(content, GetEnumPropertyKeys(current));
// Add the property keys from the interceptor.
if (current->HasNamedInterceptor()) {
v8::Handle<v8::Array> result =
GetKeysForNamedInterceptor(object, current);
if (!result.IsEmpty())
content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
}
// Compute the element keys. // Compute the element keys.
Handle<FixedArray> element_keys = Handle<FixedArray> element_keys =
Factory::NewFixedArray(current->NumberOfEnumElements()); Factory::NewFixedArray(current->NumberOfEnumElements());
@ -515,6 +514,17 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object) {
if (!result.IsEmpty()) if (!result.IsEmpty())
content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result)); content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
} }
// Compute the property keys.
content = UnionOfKeys(content, GetEnumPropertyKeys(current));
// Add the property keys from the interceptor.
if (current->HasNamedInterceptor()) {
v8::Handle<v8::Array> result =
GetKeysForNamedInterceptor(object, current);
if (!result.IsEmpty())
content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
}
} }
} }
return content; return content;
@ -549,7 +559,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object) {
index++; index++;
} }
} }
(*storage)->SortPairs(*sort_array); (*storage)->SortPairs(*sort_array, sort_array->length());
Handle<FixedArray> bridge_storage = Handle<FixedArray> bridge_storage =
Factory::NewFixedArray(DescriptorArray::kEnumCacheBridgeLength); Factory::NewFixedArray(DescriptorArray::kEnumCacheBridgeLength);
DescriptorArray* desc = object->map()->instance_descriptors(); DescriptorArray* desc = object->map()->instance_descriptors();
@ -617,9 +627,9 @@ OptimizedObjectForAddingMultipleProperties::
} }
void LoadLazy(Handle<JSFunction> fun, bool* pending_exception) { void LoadLazy(Handle<JSObject> obj, bool* pending_exception) {
HandleScope scope; HandleScope scope;
Handle<FixedArray> info(FixedArray::cast(fun->shared()->lazy_load_data())); Handle<FixedArray> info(FixedArray::cast(obj->map()->constructor()));
int index = Smi::cast(info->get(0))->value(); int index = Smi::cast(info->get(0))->value();
ASSERT(index >= 0); ASSERT(index >= 0);
Handle<Context> compile_context(Context::cast(info->get(1))); Handle<Context> compile_context(Context::cast(info->get(1)));
@ -651,6 +661,7 @@ void LoadLazy(Handle<JSFunction> fun, bool* pending_exception) {
// We shouldn't get here if compiling the script failed. // We shouldn't get here if compiling the script failed.
ASSERT(!boilerplate.is_null()); ASSERT(!boilerplate.is_null());
#ifdef ENABLE_DEBUGGER_SUPPORT
// When the debugger running in its own context touches lazy loaded // When the debugger running in its own context touches lazy loaded
// functions loading can be triggered. In that case ensure that the // functions loading can be triggered. In that case ensure that the
// execution of the boilerplate is in the correct context. // execution of the boilerplate is in the correct context.
@ -659,30 +670,43 @@ void LoadLazy(Handle<JSFunction> fun, bool* pending_exception) {
Top::context() == *Debug::debug_context()) { Top::context() == *Debug::debug_context()) {
Top::set_context(*compile_context); Top::set_context(*compile_context);
} }
#endif
// Reset the lazy load data before running the script to make sure // Reset the lazy load data before running the script to make sure
// not to get recursive lazy loading. // not to get recursive lazy loading.
fun->shared()->set_lazy_load_data(Heap::undefined_value()); obj->map()->set_needs_loading(false);
obj->map()->set_constructor(info->get(3));
// Run the script. // Run the script.
Handle<JSFunction> script_fun( Handle<JSFunction> script_fun(
Factory::NewFunctionFromBoilerplate(boilerplate, function_context)); Factory::NewFunctionFromBoilerplate(boilerplate, function_context));
Execution::Call(script_fun, receiver, 0, NULL, pending_exception); Execution::Call(script_fun, receiver, 0, NULL, pending_exception);
// If lazy loading failed, restore the unloaded state of fun. // If lazy loading failed, restore the unloaded state of obj.
if (*pending_exception) fun->shared()->set_lazy_load_data(*info); if (*pending_exception) {
obj->map()->set_needs_loading(true);
obj->map()->set_constructor(*info);
}
} }
void SetupLazy(Handle<JSFunction> fun, void SetupLazy(Handle<JSObject> obj,
int index, int index,
Handle<Context> compile_context, Handle<Context> compile_context,
Handle<Context> function_context) { Handle<Context> function_context) {
Handle<FixedArray> arr = Factory::NewFixedArray(3); Handle<FixedArray> arr = Factory::NewFixedArray(4);
arr->set(0, Smi::FromInt(index)); arr->set(0, Smi::FromInt(index));
arr->set(1, *compile_context); // Compile in this context arr->set(1, *compile_context); // Compile in this context
arr->set(2, *function_context); // Set function context to this arr->set(2, *function_context); // Set function context to this
fun->shared()->set_lazy_load_data(*arr); arr->set(3, obj->map()->constructor()); // Remember the constructor
Handle<Map> old_map(obj->map());
Handle<Map> new_map = Factory::CopyMapDropTransitions(old_map);
obj->set_map(*new_map);
new_map->set_needs_loading(true);
// Store the lazy loading info in the constructor field. We'll
// reestablish the constructor from the fixed array after loading.
new_map->set_constructor(*arr);
ASSERT(!obj->IsLoaded());
} }
} } // namespace v8::internal } } // namespace v8::internal

9
deps/v8/src/handles.h

@ -196,6 +196,11 @@ Handle<Object> SetProperty(Handle<Object> object,
Handle<Object> value, Handle<Object> value,
PropertyAttributes attributes); PropertyAttributes attributes);
Handle<Object> ForceSetProperty(Handle<JSObject> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes);
Handle<Object> IgnoreAttributesAndSetLocalProperty(Handle<JSObject> object, Handle<Object> IgnoreAttributesAndSetLocalProperty(Handle<JSObject> object,
Handle<String> key, Handle<String> key,
Handle<Object> value, Handle<Object> value,
@ -296,11 +301,11 @@ bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag); bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
// These deal with lazily loaded properties. // These deal with lazily loaded properties.
void SetupLazy(Handle<JSFunction> fun, void SetupLazy(Handle<JSObject> obj,
int index, int index,
Handle<Context> compile_context, Handle<Context> compile_context,
Handle<Context> function_context); Handle<Context> function_context);
void LoadLazy(Handle<JSFunction> fun, bool* pending_exception); void LoadLazy(Handle<JSObject> obj, bool* pending_exception);
class NoHandleAllocation BASE_EMBEDDED { class NoHandleAllocation BASE_EMBEDDED {
public: public:

4
deps/v8/src/heap-inl.h

@ -251,11 +251,11 @@ void Heap::SetLastScriptId(Object* last_script_id) {
__object__ = FUNCTION_CALL; \ __object__ = FUNCTION_CALL; \
} \ } \
if (!__object__->IsFailure()) RETURN_VALUE; \ if (!__object__->IsFailure()) RETURN_VALUE; \
if (__object__->IsOutOfMemoryFailure()) { \ if (__object__->IsOutOfMemoryFailure() || \
__object__->IsRetryAfterGC()) { \
/* TODO(1181417): Fix this. */ \ /* TODO(1181417): Fix this. */ \
v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2"); \ v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2"); \
} \ } \
ASSERT(!__object__->IsRetryAfterGC()); \
RETURN_EMPTY; \ RETURN_EMPTY; \
} while (false) } while (false)

151
deps/v8/src/heap.cc

@ -538,7 +538,7 @@ class ScavengeVisitor: public ObjectVisitor {
// Shared state read by the scavenge collector and set by ScavengeObject. // Shared state read by the scavenge collector and set by ScavengeObject.
static Address promoted_top = NULL; static Address promoted_rear = NULL;
#ifdef DEBUG #ifdef DEBUG
@ -554,24 +554,34 @@ class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
} }
} }
}; };
#endif
void Heap::Scavenge() {
#ifdef DEBUG static void VerifyNonPointerSpacePointers() {
if (FLAG_enable_slow_asserts) { // Verify that there are no pointers to new space in spaces where we
// do not expect them.
VerifyNonPointerSpacePointersVisitor v; VerifyNonPointerSpacePointersVisitor v;
HeapObjectIterator it(code_space_); HeapObjectIterator code_it(Heap::code_space());
while (it.has_next()) { while (code_it.has_next()) {
HeapObject* object = it.next(); HeapObject* object = code_it.next();
if (object->IsCode()) { if (object->IsCode()) {
Code::cast(object)->ConvertICTargetsFromAddressToObject(); Code::cast(object)->ConvertICTargetsFromAddressToObject();
}
object->Iterate(&v); object->Iterate(&v);
if (object->IsCode()) {
Code::cast(object)->ConvertICTargetsFromObjectToAddress(); Code::cast(object)->ConvertICTargetsFromObjectToAddress();
} else {
// If we find non-code objects in code space (e.g., free list
// nodes) we want to verify them as well.
object->Iterate(&v);
} }
} }
}
HeapObjectIterator data_it(Heap::old_data_space());
while (data_it.has_next()) data_it.next()->Iterate(&v);
}
#endif
void Heap::Scavenge() {
#ifdef DEBUG
if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
#endif #endif
gc_state_ = SCAVENGE; gc_state_ = SCAVENGE;
@ -596,72 +606,70 @@ void Heap::Scavenge() {
new_space_.Flip(); new_space_.Flip();
new_space_.ResetAllocationInfo(); new_space_.ResetAllocationInfo();
// We need to sweep newly copied objects which can be in either the to space // We need to sweep newly copied objects which can be either in the
// or the old space. For to space objects, we use a mark. Newly copied // to space or promoted to the old generation. For to-space
// objects lie between the mark and the allocation top. For objects // objects, we treat the bottom of the to space as a queue. Newly
// promoted to old space, we write their addresses downward from the top of // copied and unswept objects lie between a 'front' mark and the
// the new space. Sweeping newly promoted objects requires an allocation // allocation pointer.
// pointer and a mark. Note that the allocation pointer 'top' actually //
// moves downward from the high address in the to space. // Promoted objects can go into various old-generation spaces, and
// can be allocated internally in the spaces (from the free list).
// We treat the top of the to space as a queue of addresses of
// promoted objects. The addresses of newly promoted and unswept
// objects lie between a 'front' mark and a 'rear' mark that is
// updated as a side effect of promoting an object.
// //
// There is guaranteed to be enough room at the top of the to space for the // There is guaranteed to be enough room at the top of the to space
// addresses of promoted objects: every object promoted frees up its size in // for the addresses of promoted objects: every object promoted
// bytes from the top of the new space, and objects are at least one pointer // frees up its size in bytes from the top of the new space, and
// in size. Using the new space to record promoted addresses makes the // objects are at least one pointer in size.
// scavenge collector agnostic to the allocation strategy (eg, linear or Address new_space_front = new_space_.ToSpaceLow();
// free-list) used in old space. Address promoted_front = new_space_.ToSpaceHigh();
Address new_mark = new_space_.ToSpaceLow(); promoted_rear = new_space_.ToSpaceHigh();
Address promoted_mark = new_space_.ToSpaceHigh();
promoted_top = new_space_.ToSpaceHigh();
ScavengeVisitor scavenge_visitor; ScavengeVisitor scavenge_visitor;
// Copy roots. // Copy roots.
IterateRoots(&scavenge_visitor); IterateRoots(&scavenge_visitor);
// Copy objects reachable from the old generation. By definition, there // Copy objects reachable from weak pointers.
// are no intergenerational pointers in code or data spaces. GlobalHandles::IterateWeakRoots(&scavenge_visitor);
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
IterateRSet(old_pointer_space_, &ScavengePointer); IterateRSet(old_pointer_space_, &ScavengePointer);
IterateRSet(map_space_, &ScavengePointer); IterateRSet(map_space_, &ScavengePointer);
lo_space_->IterateRSet(&ScavengePointer); lo_space_->IterateRSet(&ScavengePointer);
bool has_processed_weak_pointers = false; do {
ASSERT(new_space_front <= new_space_.top());
while (true) { ASSERT(promoted_front >= promoted_rear);
ASSERT(new_mark <= new_space_.top());
ASSERT(promoted_mark >= promoted_top); // The addresses new_space_front and new_space_.top() define a
// queue of unprocessed copied objects. Process them until the
// Copy objects reachable from newly copied objects. // queue is empty.
while (new_mark < new_space_.top() || promoted_mark > promoted_top) { while (new_space_front < new_space_.top()) {
// Sweep newly copied objects in the to space. The allocation pointer HeapObject* object = HeapObject::FromAddress(new_space_front);
// can change during sweeping.
Address previous_top = new_space_.top();
SemiSpaceIterator new_it(new_space(), new_mark);
while (new_it.has_next()) {
new_it.next()->Iterate(&scavenge_visitor);
}
new_mark = previous_top;
// Sweep newly copied objects in the old space. The promotion 'top'
// pointer could change during sweeping.
previous_top = promoted_top;
for (Address current = promoted_mark - kPointerSize;
current >= previous_top;
current -= kPointerSize) {
HeapObject* object = HeapObject::cast(Memory::Object_at(current));
object->Iterate(&scavenge_visitor); object->Iterate(&scavenge_visitor);
UpdateRSet(object); new_space_front += object->Size();
}
promoted_mark = previous_top;
} }
if (has_processed_weak_pointers) break; // We are done. // The addresses promoted_front and promoted_rear define a queue
// Copy objects reachable from weak pointers. // of unprocessed addresses of promoted objects. Process them
GlobalHandles::IterateWeakRoots(&scavenge_visitor); // until the queue is empty.
has_processed_weak_pointers = true; while (promoted_front > promoted_rear) {
promoted_front -= kPointerSize;
HeapObject* object =
HeapObject::cast(Memory::Object_at(promoted_front));
object->Iterate(&scavenge_visitor);
UpdateRSet(object);
} }
// Take another spin if there are now unswept objects in new space
// (there are currently no more unswept promoted objects).
} while (new_space_front < new_space_.top());
// Set age mark. // Set age mark.
new_space_.set_age_mark(new_mark); new_space_.set_age_mark(new_space_.top());
LOG(ResourceEvent("scavenge", "end")); LOG(ResourceEvent("scavenge", "end"));
@ -882,8 +890,8 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
if (target_space == Heap::old_pointer_space_) { if (target_space == Heap::old_pointer_space_) {
// Record the object's address at the top of the to space, to allow // Record the object's address at the top of the to space, to allow
// it to be swept by the scavenger. // it to be swept by the scavenger.
promoted_top -= kPointerSize; promoted_rear -= kPointerSize;
Memory::Object_at(promoted_top) = *p; Memory::Object_at(promoted_rear) = *p;
} else { } else {
#ifdef DEBUG #ifdef DEBUG
// Objects promoted to the data space should not have pointers to // Objects promoted to the data space should not have pointers to
@ -939,6 +947,7 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
map->set_code_cache(empty_fixed_array()); map->set_code_cache(empty_fixed_array());
map->set_unused_property_fields(0); map->set_unused_property_fields(0);
map->set_bit_field(0); map->set_bit_field(0);
map->set_bit_field2(0);
return map; return map;
} }
@ -1409,7 +1418,6 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_formal_parameter_count(0); share->set_formal_parameter_count(0);
share->set_instance_class_name(Object_symbol()); share->set_instance_class_name(Object_symbol());
share->set_function_data(undefined_value()); share->set_function_data(undefined_value());
share->set_lazy_load_data(undefined_value());
share->set_script(undefined_value()); share->set_script(undefined_value());
share->set_start_position_and_type(0); share->set_start_position_and_type(0);
share->set_debug_info(undefined_value()); share->set_debug_info(undefined_value());
@ -1423,8 +1431,8 @@ Object* Heap::AllocateConsString(String* first,
int first_length = first->length(); int first_length = first->length();
int second_length = second->length(); int second_length = second->length();
int length = first_length + second_length; int length = first_length + second_length;
bool is_ascii = StringShape(first).IsAsciiRepresentation() bool is_ascii = first->IsAsciiRepresentation()
&& StringShape(second).IsAsciiRepresentation(); && second->IsAsciiRepresentation();
// If the resulting string is small make a flat string. // If the resulting string is small make a flat string.
if (length < String::kMinNonFlatLength) { if (length < String::kMinNonFlatLength) {
@ -1484,15 +1492,15 @@ Object* Heap::AllocateSlicedString(String* buffer,
Map* map; Map* map;
if (length <= String::kMaxShortStringSize) { if (length <= String::kMaxShortStringSize) {
map = StringShape(buffer).IsAsciiRepresentation() ? map = buffer->IsAsciiRepresentation() ?
short_sliced_ascii_string_map() : short_sliced_ascii_string_map() :
short_sliced_string_map(); short_sliced_string_map();
} else if (length <= String::kMaxMediumStringSize) { } else if (length <= String::kMaxMediumStringSize) {
map = StringShape(buffer).IsAsciiRepresentation() ? map = buffer->IsAsciiRepresentation() ?
medium_sliced_ascii_string_map() : medium_sliced_ascii_string_map() :
medium_sliced_string_map(); medium_sliced_string_map();
} else { } else {
map = StringShape(buffer).IsAsciiRepresentation() ? map = buffer->IsAsciiRepresentation() ?
long_sliced_ascii_string_map() : long_sliced_ascii_string_map() :
long_sliced_string_map(); long_sliced_string_map();
} }
@ -1524,7 +1532,7 @@ Object* Heap::AllocateSubString(String* buffer,
buffer->TryFlatten(); buffer->TryFlatten();
} }
Object* result = StringShape(buffer).IsAsciiRepresentation() Object* result = buffer->IsAsciiRepresentation()
? AllocateRawAsciiString(length) ? AllocateRawAsciiString(length)
: AllocateRawTwoByteString(length); : AllocateRawTwoByteString(length);
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
@ -2679,7 +2687,10 @@ void Heap::IterateStrongRoots(ObjectVisitor* v) {
SYNCHRONIZE_TAG("bootstrapper"); SYNCHRONIZE_TAG("bootstrapper");
Top::Iterate(v); Top::Iterate(v);
SYNCHRONIZE_TAG("top"); SYNCHRONIZE_TAG("top");
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug::Iterate(v); Debug::Iterate(v);
#endif
SYNCHRONIZE_TAG("debug"); SYNCHRONIZE_TAG("debug");
CompilationCache::Iterate(v); CompilationCache::Iterate(v);
SYNCHRONIZE_TAG("compilationcache"); SYNCHRONIZE_TAG("compilationcache");

6
deps/v8/src/assembler-ia32-inl.h → deps/v8/src/ia32/assembler-ia32-inl.h

@ -34,8 +34,8 @@
// A light-weight IA32 Assembler. // A light-weight IA32 Assembler.
#ifndef V8_ASSEMBLER_IA32_INL_H_ #ifndef V8_IA32_ASSEMBLER_IA32_INL_H_
#define V8_ASSEMBLER_IA32_INL_H_ #define V8_IA32_ASSEMBLER_IA32_INL_H_
#include "cpu.h" #include "cpu.h"
@ -299,4 +299,4 @@ Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_ASSEMBLER_IA32_INL_H_ #endif // V8_IA32_ASSEMBLER_IA32_INL_H_

41
deps/v8/src/assembler-ia32.cc → deps/v8/src/ia32/assembler-ia32.cc

@ -283,6 +283,10 @@ bool Operand::is_reg(Register reg) const {
*pc_++ = (x) *pc_++ = (x)
#ifdef GENERATED_CODE_COVERAGE
static void InitCoverageLog();
#endif
// spare_buffer_ // spare_buffer_
static byte* spare_buffer_ = NULL; static byte* spare_buffer_ = NULL;
@ -315,9 +319,11 @@ Assembler::Assembler(void* buffer, int buffer_size) {
// Clear the buffer in debug mode unless it was provided by the // Clear the buffer in debug mode unless it was provided by the
// caller in which case we can't be sure it's okay to overwrite // caller in which case we can't be sure it's okay to overwrite
// existing code in it; see CodePatcher::CodePatcher(...). // existing code in it; see CodePatcher::CodePatcher(...).
if (kDebug && own_buffer_) { #ifdef DEBUG
if (own_buffer_) {
memset(buffer_, 0xCC, buffer_size); // int3 memset(buffer_, 0xCC, buffer_size); // int3
} }
#endif
// setup buffer pointers // setup buffer pointers
ASSERT(buffer_ != NULL); ASSERT(buffer_ != NULL);
@ -329,6 +335,9 @@ Assembler::Assembler(void* buffer, int buffer_size) {
current_position_ = RelocInfo::kNoPosition; current_position_ = RelocInfo::kNoPosition;
written_statement_position_ = current_statement_position_; written_statement_position_ = current_statement_position_;
written_position_ = current_position_; written_position_ = current_position_;
#ifdef GENERATED_CODE_COVERAGE
InitCoverageLog();
#endif
} }
@ -2073,9 +2082,9 @@ void Assembler::GrowBuffer() {
// Clear the buffer in debug mode. Use 'int3' instructions to make // Clear the buffer in debug mode. Use 'int3' instructions to make
// sure to get into problems if we ever run uninitialized code. // sure to get into problems if we ever run uninitialized code.
if (kDebug) { #ifdef DEBUG
memset(desc.buffer, 0xCC, desc.buffer_size); memset(desc.buffer, 0xCC, desc.buffer_size);
} #endif
// copy the data // copy the data
int pc_delta = desc.buffer - buffer_; int pc_delta = desc.buffer - buffer_;
@ -2202,4 +2211,30 @@ void Assembler::WriteInternalReference(int position, const Label& bound_label) {
long_at_put(position, label_loc); long_at_put(position, label_loc);
} }
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
static void InitCoverageLog() {
char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
if (file_name != NULL) {
coverage_log = fopen(file_name, "aw+");
}
}
void LogGeneratedCodeCoverage(const char* file_line) {
const char* return_address = (&file_line)[-1];
char* push_insn = const_cast<char*>(return_address - 12);
push_insn[0] = 0xeb; // Relative branch insn.
push_insn[1] = 13; // Skip over coverage insns.
if (coverage_log != NULL) {
fprintf(coverage_log, "%s\n", file_line);
fflush(coverage_log);
}
}
#endif
} } // namespace v8::internal } } // namespace v8::internal

6
deps/v8/src/assembler-ia32.h → deps/v8/src/ia32/assembler-ia32.h

@ -34,8 +34,8 @@
// A light-weight IA32 Assembler. // A light-weight IA32 Assembler.
#ifndef V8_ASSEMBLER_IA32_H_ #ifndef V8_IA32_ASSEMBLER_IA32_H_
#define V8_ASSEMBLER_IA32_H_ #define V8_IA32_ASSEMBLER_IA32_H_
namespace v8 { namespace internal { namespace v8 { namespace internal {
@ -860,4 +860,4 @@ class EnsureSpace BASE_EMBEDDED {
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_ASSEMBLER_IA32_H_ #endif // V8_IA32_ASSEMBLER_IA32_H_

28
deps/v8/src/builtins-ia32.cc → deps/v8/src/ia32/builtins-ia32.cc

@ -32,7 +32,7 @@
namespace v8 { namespace internal { namespace v8 { namespace internal {
#define __ masm-> #define __ ACCESS_MASM(masm)
void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) { void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
@ -54,6 +54,14 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- edi: constructor function // -- edi: constructor function
// ----------------------------------- // -----------------------------------
Label non_function_call;
// Check that function is not a smi.
__ test(edi, Immediate(kSmiTagMask));
__ j(zero, &non_function_call);
// Check that function is a JSFunction.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &non_function_call);
// Enter a construct frame. // Enter a construct frame.
__ EnterConstructFrame(); __ EnterConstructFrame();
@ -69,16 +77,12 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
Label rt_call, allocated; Label rt_call, allocated;
if (FLAG_inline_new) { if (FLAG_inline_new) {
Label undo_allocation; Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp = ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address(); ExternalReference::debug_step_in_fp_address();
__ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0)); __ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
__ j(not_equal, &rt_call); __ j(not_equal, &rt_call);
// Check that function is not a Smi. #endif
__ test(edi, Immediate(kSmiTagMask));
__ j(zero, &rt_call);
// Check that function is a JSFunction
__ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
__ j(not_equal, &rt_call);
// Verified that the constructor is a JSFunction. // Verified that the constructor is a JSFunction.
// Load the initial map and verify that it is in fact a map. // Load the initial map and verify that it is in fact a map.
@ -300,6 +304,16 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
__ push(ecx); __ push(ecx);
__ ret(0); __ ret(0);
// edi: called object
// eax: number of arguments
__ bind(&non_function_call);
// Set expected number of arguments to zero (not changing eax).
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
__ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
} }

216
deps/v8/src/codegen-ia32.cc → deps/v8/src/ia32/codegen-ia32.cc

@ -30,6 +30,7 @@
#include "bootstrapper.h" #include "bootstrapper.h"
#include "codegen-inl.h" #include "codegen-inl.h"
#include "debug.h" #include "debug.h"
#include "ic-inl.h"
#include "parser.h" #include "parser.h"
#include "register-allocator-inl.h" #include "register-allocator-inl.h"
#include "runtime.h" #include "runtime.h"
@ -37,7 +38,7 @@
namespace v8 { namespace internal { namespace v8 { namespace internal {
#define __ masm_-> #define __ ACCESS_MASM(masm_)
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// CodeGenState implementation. // CodeGenState implementation.
@ -1274,12 +1275,9 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
smi_value, smi_value,
overwrite_mode); overwrite_mode);
__ Set(answer.reg(), Immediate(value)); __ Set(answer.reg(), Immediate(value));
if (operand->is_register()) { // We are in the reversed case so they can't both be Smi constants.
ASSERT(operand->is_register());
__ sub(answer.reg(), Operand(operand->reg())); __ sub(answer.reg(), Operand(operand->reg()));
} else {
ASSERT(operand->is_constant());
__ sub(Operand(answer.reg()), Immediate(operand->handle()));
}
} else { } else {
operand->ToRegister(); operand->ToRegister();
frame_->Spill(operand->reg()); frame_->Spill(operand->reg());
@ -1374,14 +1372,13 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->ToRegister(); operand->ToRegister();
__ test(operand->reg(), Immediate(kSmiTagMask)); __ test(operand->reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, operand, not_taken); deferred->enter()->Branch(not_zero, operand, not_taken);
if (shift_value != 0) {
Result answer = allocator()->Allocate(); Result answer = allocator()->Allocate();
ASSERT(answer.is_valid()); ASSERT(answer.is_valid());
__ mov(answer.reg(), operand->reg()); __ mov(answer.reg(), operand->reg());
ASSERT(kSmiTag == 0); // adjust code if not the case ASSERT(kSmiTag == 0); // adjust code if not the case
// We do no shifts, only the Smi conversion, if shift_value is 1. // We do no shifts, only the Smi conversion, if shift_value is 1.
if (shift_value == 0) { if (shift_value > 1) {
__ sar(answer.reg(), kSmiTagSize);
} else if (shift_value > 1) {
__ shl(answer.reg(), shift_value - 1); __ shl(answer.reg(), shift_value - 1);
} }
// Convert int result to Smi, checking that it is in int range. // Convert int result to Smi, checking that it is in int range.
@ -1391,6 +1388,10 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->Unuse(); operand->Unuse();
deferred->BindExit(&answer); deferred->BindExit(&answer);
frame_->Push(&answer); frame_->Push(&answer);
} else {
deferred->BindExit(operand);
frame_->Push(operand);
}
} }
break; break;
} }
@ -1411,11 +1412,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
deferred->enter()->Branch(not_zero, operand, not_taken); deferred->enter()->Branch(not_zero, operand, not_taken);
frame_->Spill(operand->reg()); frame_->Spill(operand->reg());
if (op == Token::BIT_AND) { if (op == Token::BIT_AND) {
if (int_value == 0) {
__ xor_(Operand(operand->reg()), operand->reg());
} else {
__ and_(Operand(operand->reg()), Immediate(value)); __ and_(Operand(operand->reg()), Immediate(value));
}
} else if (op == Token::BIT_XOR) { } else if (op == Token::BIT_XOR) {
if (int_value != 0) { if (int_value != 0) {
__ xor_(Operand(operand->reg()), Immediate(value)); __ xor_(Operand(operand->reg()), Immediate(value));
@ -2009,18 +2006,18 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
// Add a label for checking the size of the code used for returning. // Add a label for checking the size of the code used for returning.
Label check_exit_codesize; Label check_exit_codesize;
__ bind(&check_exit_codesize); masm_->bind(&check_exit_codesize);
// Leave the frame and return popping the arguments and the // Leave the frame and return popping the arguments and the
// receiver. // receiver.
frame_->Exit(); frame_->Exit();
__ ret((scope_->num_parameters() + 1) * kPointerSize); masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
DeleteFrame(); DeleteFrame();
// Check that the size of the code used for returning matches what is // Check that the size of the code used for returning matches what is
// expected by the debugger. // expected by the debugger.
ASSERT_EQ(Debug::kIa32JSReturnSequenceLength, ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
__ SizeOfCodeGeneratedSince(&check_exit_codesize)); masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
} }
@ -2143,7 +2140,7 @@ void CodeGenerator::GenerateFastCaseSwitchJumpTable(
times_1, 0x0, RelocInfo::INTERNAL_REFERENCE)); times_1, 0x0, RelocInfo::INTERNAL_REFERENCE));
smi_value.Unuse(); smi_value.Unuse();
// Calculate address to overwrite later with actual address of table. // Calculate address to overwrite later with actual address of table.
int32_t jump_table_ref = __ pc_offset() - sizeof(int32_t); int32_t jump_table_ref = masm_->pc_offset() - sizeof(int32_t);
__ Align(4); __ Align(4);
Label table_start; Label table_start;
__ bind(&table_start); __ bind(&table_start);
@ -3179,10 +3176,12 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
ASSERT(!in_spilled_code()); ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ DebuggerStatement"); Comment cmnt(masm_, "[ DebuggerStatement");
CodeForStatementPosition(node); CodeForStatementPosition(node);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Spill everything, even constants, to the frame. // Spill everything, even constants, to the frame.
frame_->SpillAll(); frame_->SpillAll();
frame_->CallRuntime(Runtime::kDebugBreak, 0); frame_->CallRuntime(Runtime::kDebugBreak, 0);
// Ignore the return value. // Ignore the return value.
#endif
} }
@ -3384,7 +3383,9 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
// Loop up the context chain. There is no frame effect so it is // Loop up the context chain. There is no frame effect so it is
// safe to use raw labels here. // safe to use raw labels here.
Label next, fast; Label next, fast;
if (!context.reg().is(tmp.reg())) __ mov(tmp.reg(), context.reg()); if (!context.reg().is(tmp.reg())) {
__ mov(tmp.reg(), context.reg());
}
__ bind(&next); __ bind(&next);
// Terminate at global context. // Terminate at global context.
__ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset), __ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
@ -3410,7 +3411,10 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
? RelocInfo::CODE_TARGET ? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT; : RelocInfo::CODE_TARGET_CONTEXT;
Result answer = frame_->CallLoadIC(mode); Result answer = frame_->CallLoadIC(mode);
// A test eax instruction following the call signals that the inobject
// property case was inlined. Ensure that there is not a test eax
// instruction here.
__ nop();
// Discard the global object. The result is in answer. // Discard the global object. The result is in answer.
frame_->Drop(); frame_->Drop();
return answer; return answer;
@ -3933,6 +3937,9 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
} else { } else {
Literal* literal = node->value()->AsLiteral(); Literal* literal = node->value()->AsLiteral();
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
Variable* right_var = node->value()->AsVariableProxy()->AsVariable(); Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
// There are two cases where the target is not read in the right hand // There are two cases where the target is not read in the right hand
// side, that are easy to test for: the right hand side is a literal, // side, that are easy to test for: the right hand side is a literal,
@ -3945,7 +3952,9 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
target.GetValue(NOT_INSIDE_TYPEOF); target.GetValue(NOT_INSIDE_TYPEOF);
} }
Load(node->value()); Load(node->value());
GenericBinaryOperation(node->binary_op(), node->type()); GenericBinaryOperation(node->binary_op(),
node->type(),
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
} }
if (var != NULL && if (var != NULL &&
@ -4535,6 +4544,17 @@ void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
} }
void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
Result ebp_as_smi = allocator_->Allocate();
ASSERT(ebp_as_smi.is_valid());
__ mov(ebp_as_smi.reg(), Operand(ebp));
__ shr(ebp_as_smi.reg(), kSmiTagSize);
frame_->Push(&ebp_as_smi);
}
void CodeGenerator::VisitCallRuntime(CallRuntime* node) { void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (CheckForInlineRuntimeCall(node)) { if (CheckForInlineRuntimeCall(node)) {
return; return;
@ -5228,6 +5248,48 @@ bool CodeGenerator::HasValidEntryRegisters() {
#endif #endif
class DeferredReferenceGetNamedValue: public DeferredCode {
public:
DeferredReferenceGetNamedValue(CodeGenerator* cgen, Handle<String> name)
: DeferredCode(cgen), name_(name) {
set_comment("[ DeferredReferenceGetNamedValue");
}
virtual void Generate();
Label* patch_site() { return &patch_site_; }
private:
Label patch_site_;
Handle<String> name_;
};
void DeferredReferenceGetNamedValue::Generate() {
CodeGenerator* cgen = generator();
Result receiver(cgen);
enter()->Bind(&receiver);
cgen->frame()->Push(&receiver);
cgen->frame()->Push(name_);
Result answer = cgen->frame()->CallLoadIC(RelocInfo::CODE_TARGET);
// The call must be followed by a test eax instruction to indicate
// that the inobject property case was inlined.
ASSERT(answer.is_register() && answer.reg().is(eax));
// Store the delta to the map check instruction here in the test instruction.
// Use masm_-> instead of the double underscore macro since the latter can't
// return a value.
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
// Here we use masm_-> instead of the double underscore macro because
// this is the instruction that gets patched and coverage code gets in
// the way.
masm_->test(answer.reg(), Immediate(-delta_to_patch_site));
__ IncrementCounter(&Counters::named_load_inline_miss, 1);
receiver = cgen->frame()->Pop();
exit_.Jump(&receiver, &answer);
}
class DeferredReferenceGetKeyedValue: public DeferredCode { class DeferredReferenceGetKeyedValue: public DeferredCode {
public: public:
DeferredReferenceGetKeyedValue(CodeGenerator* generator, bool is_global) DeferredReferenceGetKeyedValue(CodeGenerator* generator, bool is_global)
@ -5268,9 +5330,14 @@ void DeferredReferenceGetKeyedValue::Generate() {
// instruction. // instruction.
ASSERT(value.is_register() && value.reg().is(eax)); ASSERT(value.is_register() && value.reg().is(eax));
// The delta from the start of the map-compare instruction to the // The delta from the start of the map-compare instruction to the
// test eax instruction. // test instruction. We use masm_ directly here instead of the
int delta_to_patch_site = __ SizeOfCodeGeneratedSince(patch_site()); // double underscore macro because the macro sometimes uses macro
__ test(value.reg(), Immediate(-delta_to_patch_site)); // expansion to turn into something that can't return a value. This
// is encountered when doing generated code coverage tests.
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
// Here we use masm_-> instead of the double underscore macro because this
// is the instruction that gets patched and coverage code gets in the way.
masm_->test(value.reg(), Immediate(-delta_to_patch_site));
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1); __ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
// The receiver and key were spilled by the call, so their state as // The receiver and key were spilled by the call, so their state as
@ -5284,7 +5351,7 @@ void DeferredReferenceGetKeyedValue::Generate() {
#undef __ #undef __
#define __ masm-> #define __ ACCESS_MASM(masm)
Handle<String> Reference::GetName() { Handle<String> Reference::GetName() {
ASSERT(type_ == NAMED); ASSERT(type_ == NAMED);
@ -5324,16 +5391,66 @@ void Reference::GetValue(TypeofState typeof_state) {
// thrown below, we must distinguish between the two kinds of // thrown below, we must distinguish between the two kinds of
// loads (typeof expression loads must not throw a reference // loads (typeof expression loads must not throw a reference
// error). // error).
Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
if (is_global || cgen_->scope()->is_global_scope()) {
// Do not inline the inobject property case for loads from the
// global object or loads in toplevel code.
Comment cmnt(masm, "[ Load from named Property"); Comment cmnt(masm, "[ Load from named Property");
cgen_->frame()->Push(GetName()); cgen_->frame()->Push(GetName());
Variable* var = expression_->AsVariableProxy()->AsVariable(); RelocInfo::Mode mode = is_global
ASSERT(var == NULL || var->is_global()); ? RelocInfo::CODE_TARGET_CONTEXT
RelocInfo::Mode mode = (var == NULL) : RelocInfo::CODE_TARGET;
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Result answer = cgen_->frame()->CallLoadIC(mode); Result answer = cgen_->frame()->CallLoadIC(mode);
// A test eax instruction following the call signals that the
// inobject property case was inlined. Ensure that there is not
// a test eax instruction here.
__ nop();
cgen_->frame()->Push(&answer); cgen_->frame()->Push(&answer);
} else {
// Inline the inobject property case.
Comment cmnt(masm, "[ Inlined named property load");
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(cgen_, GetName());
Result receiver = cgen_->frame()->Pop();
receiver.ToRegister();
// Check that the receiver is a heap object.
__ test(receiver.reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(zero, &receiver, not_taken);
// Preallocate the value register to ensure that there is no
// spill emitted between the patch site label and the offset in
// the load instruction.
Result value = cgen_->allocator()->Allocate();
ASSERT(value.is_valid());
__ bind(deferred->patch_site());
// This is the map check instruction that will be patched (so we can't
// use the double underscore macro that may insert instructions).
// Initially use an invalid map to force a failure.
masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
Immediate(Factory::null_value()));
// This branch is always a forwards branch so it's always a fixed
// size which allows the assert below to succeed and patching to work.
deferred->enter()->Branch(not_equal, &receiver, not_taken);
// The delta from the patch label to the load offset must be
// statically known.
ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
LoadIC::kOffsetToLoadInstruction);
// The initial (invalid) offset has to be large enough to force
// a 32-bit instruction encoding to allow patching with an
// arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
int offset = kMaxInt;
masm->mov(value.reg(), FieldOperand(receiver.reg(), offset));
__ IncrementCounter(&Counters::named_load_inline, 1);
deferred->BindExit(&receiver, &value);
cgen_->frame()->Push(&receiver);
cgen_->frame()->Push(&value);
}
break; break;
} }
@ -5369,7 +5486,9 @@ void Reference::GetValue(TypeofState typeof_state) {
// Initially, use an invalid map. The map is patched in the IC // Initially, use an invalid map. The map is patched in the IC
// initialization code. // initialization code.
__ bind(deferred->patch_site()); __ bind(deferred->patch_site());
__ cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset), // Use masm-> here instead of the double underscore macro since extra
// coverage code can interfere with the patching.
masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
Immediate(Factory::null_value())); Immediate(Factory::null_value()));
deferred->enter()->Branch(not_equal, &receiver, &key, not_taken); deferred->enter()->Branch(not_equal, &receiver, &key, not_taken);
@ -5566,7 +5685,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
#undef __ #undef __
#define __ masm_-> #define __ ACCESS_MASM(masm_)
Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left, Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
Result* right) { Result* right) {
@ -5794,10 +5913,10 @@ Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
// Right operand must be in register cl because x86 likes it that way. // Right operand must be in register cl because x86 likes it that way.
if (right->reg().is(ecx)) { if (right->reg().is(ecx)) {
// Right is already in the right place. Left may be in the // Right is already in the right place. Left may be in the
// same register, which causes problems. Use answer instead. // same register, which causes problems. Always use answer
if (left->reg().is(ecx)) { // instead of left, even if left is not ecx, since this avoids
// spilling left.
*left = answer; *left = answer;
}
} else if (left->reg().is(ecx)) { } else if (left->reg().is(ecx)) {
generator()->frame()->Spill(left->reg()); generator()->frame()->Spill(left->reg());
__ mov(left->reg(), right->reg()); __ mov(left->reg(), right->reg());
@ -5811,6 +5930,9 @@ Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
ASSERT(reg_ecx.is_valid()); ASSERT(reg_ecx.is_valid());
__ mov(ecx, right->reg()); __ mov(ecx, right->reg());
*right = reg_ecx; *right = reg_ecx;
// Answer and left both contain the left operand. Use answer, so
// left is not spilled.
*left = answer;
} }
ASSERT(left->reg().is_valid()); ASSERT(left->reg().is_valid());
ASSERT(!left->reg().is(ecx)); ASSERT(!left->reg().is(ecx));
@ -5860,16 +5982,10 @@ Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
case Token::SHL: { case Token::SHL: {
__ shl(left->reg()); __ shl(left->reg());
// Check that the *signed* result fits in a smi. // Check that the *signed* result fits in a smi.
//
// TODO(207): Can reduce registers from 4 to 3 by
// preallocating ecx.
JumpTarget result_ok(generator()); JumpTarget result_ok(generator());
Result smi_test_reg = generator()->allocator()->Allocate(); __ cmp(left->reg(), 0xc0000000);
ASSERT(smi_test_reg.is_valid()); result_ok.Branch(positive, left, taken);
__ lea(smi_test_reg.reg(), Operand(left->reg(), 0x40000000));
__ test(smi_test_reg.reg(), Immediate(0x80000000));
smi_test_reg.Unuse();
result_ok.Branch(zero, left, taken);
__ shr(left->reg()); __ shr(left->reg());
ASSERT(kSmiTag == 0); ASSERT(kSmiTag == 0);
__ shl(left->reg(), kSmiTagSize); __ shl(left->reg(), kSmiTagSize);
@ -5900,7 +6016,7 @@ Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
#undef __ #undef __
#define __ masm-> #define __ ACCESS_MASM(masm)
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform fast-case smi code for the operation (eax <op> ebx) and // Perform fast-case smi code for the operation (eax <op> ebx) and
@ -6169,11 +6285,15 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::SHR: __ shr(eax); break; case Token::SHR: __ shr(eax); break;
default: UNREACHABLE(); default: UNREACHABLE();
} }
if (op_ == Token::SHR) {
// Check if result is non-negative and fits in a smi. // Check if result is non-negative and fits in a smi.
__ test(eax, Immediate(0xc0000000)); __ test(eax, Immediate(0xc0000000));
__ j(not_zero, &non_smi_result); __ j(not_zero, &non_smi_result);
} else {
// Check if result fits in a smi.
__ cmp(eax, 0xc0000000);
__ j(negative, &non_smi_result);
}
// Tag smi result and return. // Tag smi result and return.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(eax, Operand(eax, eax, times_1, kSmiTag)); __ lea(eax, Operand(eax, eax, times_1, kSmiTag));
@ -6225,7 +6345,9 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
} }
// SHR should return uint32 - go to runtime for non-smi/negative result. // SHR should return uint32 - go to runtime for non-smi/negative result.
if (op_ == Token::SHR) __ bind(&non_smi_result); if (op_ == Token::SHR) {
__ bind(&non_smi_result);
}
__ mov(eax, Operand(esp, 1 * kPointerSize)); __ mov(eax, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 2 * kPointerSize)); __ mov(edx, Operand(esp, 2 * kPointerSize));
break; break;

633
deps/v8/src/ia32/codegen-ia32.h

@ -0,0 +1,633 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_IA32_CODEGEN_IA32_H_
#define V8_IA32_CODEGEN_IA32_H_
namespace v8 { namespace internal {
// Forward declarations
class DeferredCode;
class RegisterAllocator;
class RegisterFile;
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Reference support
// A reference is a C++ stack-allocated object that keeps an ECMA
// reference on the execution stack while in scope. For variables
// the reference is empty, indicating that it isn't necessary to
// store state on the stack for keeping track of references to those.
// For properties, we keep either one (named) or two (indexed) values
// on the execution stack to represent the reference.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
Reference(CodeGenerator* cgen, Expression* expression);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
ASSERT(type_ == ILLEGAL);
type_ = value;
}
// The size the reference takes up on the stack.
int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is left in place with its value above it.
void GetValue(TypeofState typeof_state);
// Like GetValue except that the slot is expected to be written to before
// being read from again. Thae value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
void TakeValue(TypeofState typeof_state);
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
// on the expression stack. The stored value is left in place (with the
// reference intact below it) to support chained assignments.
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
};
// -------------------------------------------------------------------------
// Control destinations.
// A control destination encapsulates a pair of jump targets and a
// flag indicating which one is the preferred fall-through. The
// preferred fall-through must be unbound, the other may be already
// bound (ie, a backward target).
//
// The true and false targets may be jumped to unconditionally or
// control may split conditionally. Unconditional jumping and
// splitting should be emitted in tail position (as the last thing
// when compiling an expression) because they can cause either label
// to be bound or the non-fall through to be jumped to leaving an
// invalid virtual frame.
//
// The labels in the control destination can be extracted and
// manipulated normally without affecting the state of the
// destination.
class ControlDestination BASE_EMBEDDED {
public:
ControlDestination(JumpTarget* true_target,
JumpTarget* false_target,
bool true_is_fall_through)
: true_target_(true_target),
false_target_(false_target),
true_is_fall_through_(true_is_fall_through),
is_used_(false) {
ASSERT(true_is_fall_through ? !true_target->is_bound()
: !false_target->is_bound());
}
// Accessors for the jump targets. Directly jumping or branching to
// or binding the targets will not update the destination's state.
JumpTarget* true_target() const { return true_target_; }
JumpTarget* false_target() const { return false_target_; }
// True if the the destination has been jumped to unconditionally or
// control has been split to both targets. This predicate does not
// test whether the targets have been extracted and manipulated as
// raw jump targets.
bool is_used() const { return is_used_; }
// True if the destination is used and the true target (respectively
// false target) was the fall through. If the target is backward,
// "fall through" included jumping unconditionally to it.
bool true_was_fall_through() const {
return is_used_ && true_is_fall_through_;
}
bool false_was_fall_through() const {
return is_used_ && !true_is_fall_through_;
}
// Emit a branch to one of the true or false targets, and bind the
// other target. Because this binds the fall-through target, it
// should be emitted in tail position (as the last thing when
// compiling an expression).
void Split(Condition cc) {
ASSERT(!is_used_);
if (true_is_fall_through_) {
false_target_->Branch(NegateCondition(cc));
true_target_->Bind();
} else {
true_target_->Branch(cc);
false_target_->Bind();
}
is_used_ = true;
}
// Emit an unconditional jump in tail position, to the true target
// (if the argument is true) or the false target. The "jump" will
// actually bind the jump target if it is forward, jump to it if it
// is backward.
void Goto(bool where) {
ASSERT(!is_used_);
JumpTarget* target = where ? true_target_ : false_target_;
if (target->is_bound()) {
target->Jump();
} else {
target->Bind();
}
is_used_ = true;
true_is_fall_through_ = where;
}
// Mark this jump target as used as if Goto had been called, but
// without generating a jump or binding a label (the control effect
// should have already happened). This is used when the left
// subexpression of the short-circuit boolean operators are
// compiled.
void Use(bool where) {
ASSERT(!is_used_);
ASSERT((where ? true_target_ : false_target_)->is_bound());
is_used_ = true;
true_is_fall_through_ = where;
}
// Swap the true and false targets but keep the same actual label as
// the fall through. This is used when compiling negated
// expressions, where we want to swap the targets but preserve the
// state.
void Invert() {
JumpTarget* temp_target = true_target_;
true_target_ = false_target_;
false_target_ = temp_target;
true_is_fall_through_ = !true_is_fall_through_;
}
private:
// True and false jump targets.
JumpTarget* true_target_;
JumpTarget* false_target_;
// Before using the destination: true if the true target is the
// preferred fall through, false if the false target is. After
// using the destination: true if the true target was actually used
// as the fall through, false if the false target was.
bool true_is_fall_through_;
// True if the Split or Goto functions have been called.
bool is_used_;
};
// -------------------------------------------------------------------------
// Code generation state
// The state is passed down the AST by the code generator (and back up, in
// the form of the state of the jump target pair). It is threaded through
// the call stack. Constructing a state implicitly pushes it on the owning
// code generator's stack of states, and destroying one implicitly pops it.
//
// The code generator state is only used for expressions, so statements have
// the initial state.
class CodeGenState BASE_EMBEDDED {
public:
// Create an initial code generator state. Destroying the initial state
// leaves the code generator with a NULL state.
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
// state. The new state may or may not be inside a typeof, and has its
// own control destination.
CodeGenState(CodeGenerator* owner,
TypeofState typeof_state,
ControlDestination* destination);
// Destroy a code generator state and restore the owning code generator's
// previous state.
~CodeGenState();
// Accessors for the state.
TypeofState typeof_state() const { return typeof_state_; }
ControlDestination* destination() const { return destination_; }
private:
// The owning code generator.
CodeGenerator* owner_;
// A flag indicating whether we are compiling the immediate subexpression
// of a typeof expression.
TypeofState typeof_state_;
// A control destination in case the expression has a control-flow
// effect.
ControlDestination* destination_;
// The previous state of the owning code generator, restored when
// this state is destroyed.
CodeGenState* previous_;
};
// -------------------------------------------------------------------------
// CodeGenerator
class CodeGenerator: public AstVisitor {
public:
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
static Handle<Code> MakeCode(FunctionLiteral* fun,
Handle<Script> script,
bool is_eval);
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
static void SetFunctionInfo(Handle<JSFunction> fun,
int length,
int function_token_position,
int start_position,
int end_position,
bool is_expression,
bool is_toplevel,
Handle<Script> script,
Handle<String> inferred_name);
// Accessors
MacroAssembler* masm() { return masm_; }
VirtualFrame* frame() const { return frame_; }
bool has_valid_frame() const { return frame_ != NULL; }
// Set the virtual frame to be new_frame, with non-frame register
// reference counts given by non_frame_registers. The non-frame
// register reference counts of the old frame are returned in
// non_frame_registers.
void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
void DeleteFrame();
RegisterAllocator* allocator() const { return allocator_; }
CodeGenState* state() { return state_; }
void set_state(CodeGenState* state) { state_ = state; }
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
bool in_spilled_code() const { return in_spilled_code_; }
void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
private:
// Construction/Destruction
CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
virtual ~CodeGenerator() { delete masm_; }
// Accessors
Scope* scope() const { return scope_; }
// Clearing and generating deferred code.
void ClearDeferred();
void ProcessDeferred();
bool is_eval() { return is_eval_; }
// State
TypeofState typeof_state() const { return state_->typeof_state(); }
ControlDestination* destination() const { return state_->destination(); }
// Track loop nesting level.
int loop_nesting() const { return loop_nesting_; }
void IncrementLoopNesting() { loop_nesting_++; }
void DecrementLoopNesting() { loop_nesting_--; }
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
#define DEF_VISIT(type) \
void Visit##type(type* node);
NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
// Visit a statement and then spill the virtual frame if control flow can
// reach the end of the statement (ie, it does not exit via break,
// continue, return, or throw). This function is used temporarily while
// the code generator is being transformed.
void VisitAndSpill(Statement* statement);
// Visit a list of statements and then spill the virtual frame if control
// flow can reach the end of the list.
void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
void GenCode(FunctionLiteral* fun);
// Generate the return sequence code. Should be called no more than
// once per compiled function, immediately after binding the return
// target (which can not be done more than once).
void GenerateReturnSequence(Result* return_value);
// The following are used by class Reference.
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
Operand ContextOperand(Register context, int index) const {
return Operand(context, Context::SlotOffset(index));
}
Operand SlotOperand(Slot* slot, Register tmp);
Operand ContextSlotOperandCheckExtensions(Slot* slot,
Result tmp,
JumpTarget* slow);
// Expressions
Operand GlobalObject() const {
return ContextOperand(esi, Context::GLOBAL_INDEX);
}
void LoadCondition(Expression* x,
TypeofState typeof_state,
ControlDestination* destination,
bool force_control);
void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
void LoadGlobal();
void LoadGlobalReceiver();
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
void LoadAndSpill(Expression* expression,
TypeofState typeof_state = NOT_INSIDE_TYPEOF);
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow);
// Store the value on top of the expression stack into a slot, leaving the
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
// non-existing properties of the global object, so we must make it
// look like an explicit property access, instead of an access
// through the context chain.
void LoadTypeofExpression(Expression* x);
// Translate the value on top of the frame into control flow to the
// control destination.
void ToBoolean(ControlDestination* destination);
void GenericBinaryOperation(
Token::Value op,
SmiAnalysis* type,
OverwriteMode overwrite_mode);
// If possible, combine two constant smi values using op to produce
// a smi result, and push it on the virtual frame, all at compile time.
// Returns true if it succeeds. Otherwise it has no effect.
bool FoldConstantSmis(Token::Value op, int left, int right);
// Emit code to perform a binary operation on a constant
// smi and a likely smi. Consumes the Result *operand.
void ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> constant_operand,
SmiAnalysis* type,
bool reversed,
OverwriteMode overwrite_mode);
// Emit code to perform a binary operation on two likely smis.
// The code to handle smi arguments is produced inline.
// Consumes the Results *left and *right.
void LikelySmiBinaryOperation(Token::Value op,
Result* left,
Result* right,
OverwriteMode overwrite_mode);
void Comparison(Condition cc,
bool strict,
ControlDestination* destination);
// To prevent long attacker-controlled byte sequences, integer constants
// from the JavaScript source are loaded in two parts if they are larger
// than 16 bits.
static const int kMaxSmiInlinedBits = 16;
bool IsUnsafeSmi(Handle<Object> value);
// Load an integer constant x into a register target using
// at most 16 bits of user-controlled data per assembly operation.
void LoadUnsafeSmi(Register target, Handle<Object> value);
void CallWithArguments(ZoneList<Expression*>* arguments, int position);
void CheckStack();
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*);
const char* name;
};
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node);
static bool PatchInlineRuntimeEntry(Handle<String> name,
const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry);
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
Handle<Code> ComputeCallInitialize(int argc);
Handle<Code> ComputeCallInitializeInLoop(int argc);
// Declare global variables and functions in the given array of
// name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs);
// Instantiate the function boilerplate.
void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
// Support for type checks.
void GenerateIsSmi(ZoneList<Expression*>* args);
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArgumentsAccess(ZoneList<Expression*>* args);
// Support for accessing the value field of an object (used by Date).
void GenerateValueOf(ZoneList<Expression*>* args);
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
void GenerateLog(ZoneList<Expression*>* args);
void GenerateGetFramePointer(ZoneList<Expression*>* args);
// Methods and constants for fast case switch statement support.
//
// Only allow fast-case switch if the range of labels is at most
// this factor times the number of case labels.
// Value is derived from comparing the size of code generated by the normal
// switch code for Smi-labels to the size of a single pointer. If code
// quality increases this number should be decreased to match.
static const int kFastSwitchMaxOverheadFactor = 5;
// Minimal number of switch cases required before we allow jump-table
// optimization.
static const int kFastSwitchMinCaseCount = 5;
// The limit of the range of a fast-case switch, as a factor of the number
// of cases of the switch. Each platform should return a value that
// is optimal compared to the default code generated for a switch statement
// on that platform.
int FastCaseSwitchMaxOverheadFactor();
// The minimal number of cases in a switch before the fast-case switch
// optimization is enabled. Each platform should return a value that
// is optimal compared to the default code generated for a switch statement
// on that platform.
int FastCaseSwitchMinCaseCount();
// Allocate a jump table and create code to jump through it.
// Should call GenerateFastCaseSwitchCases to generate the code for
// all the cases at the appropriate point.
void GenerateFastCaseSwitchJumpTable(SwitchStatement* node,
int min_index,
int range,
Label* fail_label,
Vector<Label*> case_targets,
Vector<Label> case_labels);
// Generate the code for cases for the fast case switch.
// Called by GenerateFastCaseSwitchJumpTable.
void GenerateFastCaseSwitchCases(SwitchStatement* node,
Vector<Label> case_labels,
VirtualFrame* start_frame);
// Fast support for constant-Smi switches.
void GenerateFastCaseSwitchStatement(SwitchStatement* node,
int min_index,
int range,
int default_index);
// Fast support for constant-Smi switches. Tests whether switch statement
// permits optimization and calls GenerateFastCaseSwitch if it does.
// Returns true if the fast-case switch was generated, and false if not.
bool TryGenerateFastCaseSwitchStatement(SwitchStatement* node);
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
void CodeForStatementPosition(Node* node);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
// True if the registers are valid for entry to a block. There should be
// no frame-external references to eax, ebx, ecx, edx, or edi.
bool HasValidEntryRegisters();
#endif
bool is_eval_; // Tells whether code is generated for eval.
Handle<Script> script_;
List<DeferredCode*> deferred_;
// Assembler
MacroAssembler* masm_; // to generate code
// Code generation state
Scope* scope_;
VirtualFrame* frame_;
RegisterAllocator* allocator_;
CodeGenState* state_;
int loop_nesting_;
// Jump targets.
// The target of the return from the function.
BreakTarget function_return_;
// True if the function return is shadowed (ie, jumping to the target
// function_return_ does not jump to the true function return, but rather
// to some unlinking code).
bool function_return_is_shadowed_;
// True when we are in code that expects the virtual frame to be fully
// spilled. Some virtual frame function are disabled in DEBUG builds when
// called from spilled code, because they do not leave the virtual frame
// in a spilled state.
bool in_spilled_code_;
static InlineRuntimeLUT kInlineRuntimeLUT[];
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
friend class Result;
friend class CodeGeneratorPatcher; // Used in test-log-ia32.cc
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_

0
deps/v8/src/cpu-ia32.cc → deps/v8/src/ia32/cpu-ia32.cc

4
deps/v8/src/debug-ia32.cc → deps/v8/src/ia32/debug-ia32.cc

@ -33,6 +33,7 @@
namespace v8 { namespace internal { namespace v8 { namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
// A debug break in the frame exit code is identified by a call instruction. // A debug break in the frame exit code is identified by a call instruction.
bool BreakLocationIterator::IsDebugBreakAtReturn() { bool BreakLocationIterator::IsDebugBreakAtReturn() {
@ -67,7 +68,7 @@ bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
} }
#define __ masm-> #define __ ACCESS_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm, static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
@ -214,5 +215,6 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
#undef __ #undef __
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal } } // namespace v8::internal

0
deps/v8/src/disasm-ia32.cc → deps/v8/src/ia32/disasm-ia32.cc

0
deps/v8/src/frames-ia32.cc → deps/v8/src/ia32/frames-ia32.cc

6
deps/v8/src/frames-ia32.h → deps/v8/src/ia32/frames-ia32.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_FRAMES_IA32_H_ #ifndef V8_IA32_FRAMES_IA32_H_
#define V8_FRAMES_IA32_H_ #define V8_IA32_FRAMES_IA32_H_
namespace v8 { namespace internal { namespace v8 { namespace internal {
@ -288,4 +288,4 @@ inline Object* JavaScriptFrame::function_slot_object() const {
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_FRAMES_IA32_H_ #endif // V8_IA32_FRAMES_IA32_H_

112
deps/v8/src/ic-ia32.cc → deps/v8/src/ia32/ic-ia32.cc

@ -38,7 +38,7 @@ namespace v8 { namespace internal {
// Static IC stub generators. // Static IC stub generators.
// //
#define __ masm-> #define __ ACCESS_MASM(masm)
// Helper function used to load a property from a dictionary backing storage. // Helper function used to load a property from a dictionary backing storage.
@ -91,7 +91,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
// Compute the masked index: (hash + i + i * i) & mask. // Compute the masked index: (hash + i + i * i) & mask.
__ mov(r1, FieldOperand(name, String::kLengthOffset)); __ mov(r1, FieldOperand(name, String::kLengthOffset));
__ shr(r1, String::kHashShift); __ shr(r1, String::kHashShift);
if (i > 0) __ add(Operand(r1), Immediate(Dictionary::GetProbeOffset(i))); if (i > 0) {
__ add(Operand(r1), Immediate(Dictionary::GetProbeOffset(i)));
}
__ and_(r1, Operand(r2)); __ and_(r1, Operand(r2));
// Scale the index by multiplying by the element size. // Scale the index by multiplying by the element size.
@ -121,23 +123,19 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
} }
// Helper function used to check that a value is either not a function // Helper function used to check that a value is either not an object
// or is loaded if it is a function. // or is loaded if it is an object.
static void GenerateCheckNonFunctionOrLoaded(MacroAssembler* masm, Label* miss, static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
Register value, Register scratch) { Register value, Register scratch) {
Label done; Label done;
// Check if the value is a Smi. // Check if the value is a Smi.
__ test(value, Immediate(kSmiTagMask)); __ test(value, Immediate(kSmiTagMask));
__ j(zero, &done, not_taken); __ j(zero, &done, not_taken);
// Check if the value is a function. // Check if the object has been loaded.
__ CmpObjectType(value, JS_FUNCTION_TYPE, scratch); __ mov(scratch, FieldOperand(value, JSFunction::kMapOffset));
__ j(not_equal, &done, taken); __ mov(scratch, FieldOperand(scratch, Map::kBitField2Offset));
// Check if the function has been loaded. __ test(scratch, Immediate(1 << Map::kNeedsLoading));
__ mov(scratch, FieldOperand(value, JSFunction::kSharedFunctionInfoOffset)); __ j(not_zero, miss, not_taken);
__ mov(scratch,
FieldOperand(scratch, SharedFunctionInfo::kLazyLoadDataOffset));
__ cmp(scratch, Factory::undefined_value());
__ j(not_equal, miss, not_taken);
__ bind(&done); __ bind(&done);
} }
@ -266,7 +264,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ j(not_zero, &slow, not_taken); __ j(not_zero, &slow, not_taken);
// Probe the dictionary leaving result in ecx. // Probe the dictionary leaving result in ecx.
GenerateDictionaryLoad(masm, &slow, ebx, ecx, edx, eax); GenerateDictionaryLoad(masm, &slow, ebx, ecx, edx, eax);
GenerateCheckNonFunctionOrLoaded(masm, &slow, ecx, edx); GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx);
__ mov(eax, Operand(ecx)); __ mov(eax, Operand(ecx));
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1); __ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0); __ ret(0);
@ -491,10 +489,10 @@ static void GenerateNormalHelper(MacroAssembler* masm,
__ j(not_equal, miss, not_taken); __ j(not_equal, miss, not_taken);
// Check that the function has been loaded. // Check that the function has been loaded.
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset)); __ mov(edx, FieldOperand(edi, JSFunction::kMapOffset));
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kLazyLoadDataOffset)); __ mov(edx, FieldOperand(edx, Map::kBitField2Offset));
__ cmp(edx, Factory::undefined_value()); __ test(edx, Immediate(1 << Map::kNeedsLoading));
__ j(not_equal, miss, not_taken); __ j(not_zero, miss, not_taken);
// Patch the receiver with the global proxy if necessary. // Patch the receiver with the global proxy if necessary.
if (is_global_object) { if (is_global_object) {
@ -681,7 +679,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Search the dictionary placing the result in eax. // Search the dictionary placing the result in eax.
__ bind(&probe); __ bind(&probe);
GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx); GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx);
GenerateCheckNonFunctionOrLoaded(masm, &miss, eax, edx); GenerateCheckNonObjectOrLoaded(masm, &miss, eax, edx);
__ ret(0); __ ret(0);
// Global object access: Check access rights. // Global object access: Check access rights.
@ -727,24 +725,70 @@ void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
} }
void KeyedLoadIC::PatchInlinedMapCheck(Address address, Object* value) { // One byte opcode for test eax,0xXXXXXXXX.
static const byte kTestEaxByte = 0xA9; static const byte kTestEaxByte = 0xA9;
void LoadIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined inobject property load (if
// present) to guarantee failure by holding an invalid map (the null
// value). The offset can be patched to anything.
PatchInlinedLoad(address, Heap::null_value(), kMaxInt);
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {
// Insert null as the map to check for to make sure the map check fails
// sending control flow to the IC instead of the inlined version.
PatchInlinedLoad(address, Heap::null_value());
}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// The address of the instruction following the call.
Address test_instruction_address = address + 4;
// If the instruction following the call is not a test eax, nothing
// was inlined.
if (*test_instruction_address != kTestEaxByte) return false;
Address delta_address = test_instruction_address + 1;
// The delta to the start of the map check instruction.
int delta = *reinterpret_cast<int*>(delta_address);
// The map address is the last 4 bytes of the 7-byte
// operand-immediate compare instruction, so we add 3 to get the
// offset to the last 4 bytes.
Address map_address = test_instruction_address + delta + 3;
*(reinterpret_cast<Object**>(map_address)) = map;
// The offset is in the last 4 bytes of a six byte
// memory-to-register move instruction, so we add 2 to get the
// offset to the last 4 bytes.
Address offset_address =
test_instruction_address + delta + kOffsetToLoadInstruction + 2;
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
return true;
}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
Address test_instruction_address = address + 4; // 4 = stub address Address test_instruction_address = address + 4; // 4 = stub address
// The keyed load has a fast inlined case if the IC call instruction // The keyed load has a fast inlined case if the IC call instruction
// is immediately followed by a test instruction. // is immediately followed by a test instruction.
if (*test_instruction_address == kTestEaxByte) { if (*test_instruction_address != kTestEaxByte) return false;
// Fetch the offset from the test instruction to the map cmp // Fetch the offset from the test instruction to the map cmp
// instruction. This offset is stored in the last 4 bytes of the // instruction. This offset is stored in the last 4 bytes of the 5
// 5 byte test instruction. // byte test instruction.
Address offset_address = test_instruction_address + 1; Address delta_address = test_instruction_address + 1;
int offset_value = *(reinterpret_cast<int*>(offset_address)); int delta = *reinterpret_cast<int*>(delta_address);
// Compute the map address. The map address is in the last 4 // Compute the map address. The map address is in the last 4 bytes
// bytes of the 7-byte operand-immediate compare instruction, so // of the 7-byte operand-immediate compare instruction, so we add 3
// we add 3 to the offset to get the map address. // to the offset to get the map address.
Address map_address = test_instruction_address + offset_value + 3; Address map_address = test_instruction_address + delta + 3;
// patch the map check. // Patch the map check.
(*(reinterpret_cast<Object**>(map_address))) = value; *(reinterpret_cast<Object**>(map_address)) = map;
} return true;
} }

12
deps/v8/src/jump-target-ia32.cc → deps/v8/src/ia32/jump-target-ia32.cc

@ -35,7 +35,7 @@ namespace v8 { namespace internal {
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// JumpTarget implementation. // JumpTarget implementation.
#define __ masm_-> #define __ ACCESS_MASM(masm_)
void JumpTarget::DoJump() { void JumpTarget::DoJump() {
ASSERT(cgen_ != NULL); ASSERT(cgen_ != NULL);
@ -115,11 +115,13 @@ void JumpTarget::DoBranch(Condition cc, Hint hint) {
__ bind(&original_fall_through); __ bind(&original_fall_through);
} else { } else {
// Forward branch. A copy of the current frame is added to the end // Forward branch. A copy of the current frame is added to the end of the
// of the list of frames reaching the target block and a branch to // list of frames reaching the target block and a branch to the merge code
// the merge code is emitted. // is emitted. Use masm_-> instead of __ as forward branches are expected
// to be a fixed size (no inserted coverage-checking instructions please).
// This is used in Reference::GetValue.
AddReachingFrame(new VirtualFrame(cgen_->frame())); AddReachingFrame(new VirtualFrame(cgen_->frame()));
__ j(cc, &merge_labels_.last(), hint); masm_->j(cc, &merge_labels_.last(), hint);
is_linked_ = true; is_linked_ = true;
} }
} }

11
deps/v8/src/macro-assembler-ia32.cc → deps/v8/src/ia32/macro-assembler-ia32.cc

@ -216,6 +216,7 @@ void MacroAssembler::RecordWrite(Register object, int offset,
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::SaveRegistersToMemory(RegList regs) { void MacroAssembler::SaveRegistersToMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0); ASSERT((regs & ~kJSCallerSaved) == 0);
// Copy the content of registers to memory location. // Copy the content of registers to memory location.
@ -290,7 +291,7 @@ void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
} }
} }
} }
#endif
void MacroAssembler::Set(Register dst, const Immediate& x) { void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) { if (x.is_zero()) {
@ -378,6 +379,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
mov(edi, Operand(eax)); mov(edi, Operand(eax));
lea(esi, Operand(ebp, eax, times_4, offset)); lea(esi, Operand(ebp, eax, times_4, offset));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory // Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points. // location. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) { if (type == StackFrame::EXIT_DEBUG) {
@ -389,6 +391,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
// associated with this issue). // associated with this issue).
PushRegistersFromMemory(kJSCallerSaved); PushRegistersFromMemory(kJSCallerSaved);
} }
#endif
// Reserve space for two arguments: argc and argv. // Reserve space for two arguments: argc and argv.
sub(Operand(esp), Immediate(2 * kPointerSize)); sub(Operand(esp), Immediate(2 * kPointerSize));
@ -406,6 +409,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
void MacroAssembler::LeaveExitFrame(StackFrame::Type type) { void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from // Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points. // the stack. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) { if (type == StackFrame::EXIT_DEBUG) {
@ -416,6 +420,7 @@ void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
lea(ebx, Operand(ebp, kOffset)); lea(ebx, Operand(ebp, kOffset));
CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved); CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved);
} }
#endif
// Get the return address from the stack and restore the frame pointer. // Get the return address from the stack and restore the frame pointer.
mov(ecx, Operand(ebp, 1 * kPointerSize)); mov(ecx, Operand(ebp, 1 * kPointerSize));
@ -427,9 +432,9 @@ void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
// Restore current context from top and clear it in debug mode. // Restore current context from top and clear it in debug mode.
ExternalReference context_address(Top::k_context_address); ExternalReference context_address(Top::k_context_address);
mov(esi, Operand::StaticVariable(context_address)); mov(esi, Operand::StaticVariable(context_address));
if (kDebug) { #ifdef DEBUG
mov(Operand::StaticVariable(context_address), Immediate(0)); mov(Operand::StaticVariable(context_address), Immediate(0));
} #endif
// Push the return address to get ready to return. // Push the return address to get ready to return.
push(ecx); push(ecx);

371
deps/v8/src/ia32/macro-assembler-ia32.h

@ -0,0 +1,371 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
#include "assembler.h"
namespace v8 { namespace internal {
// Forward declaration.
class JumpTarget;
// Helper types to make flags easier to read at call sites.
enum InvokeFlag {
CALL_FUNCTION,
JUMP_FUNCTION
};
enum CodeLocation {
IN_JAVASCRIPT,
IN_JS_ENTRY,
IN_C_ENTRY
};
enum HandlerType {
TRY_CATCH_HANDLER,
TRY_FINALLY_HANDLER,
JS_ENTRY_HANDLER
};
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
MacroAssembler(void* buffer, int size);
// ---------------------------------------------------------------------------
// GC Support
// Set the remembered set bit for [object+offset].
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi.
// All registers are clobbered by the operation.
void RecordWrite(Register object,
int offset,
Register value,
Register scratch);
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
void SaveRegistersToMemory(RegList regs);
void RestoreRegistersFromMemory(RegList regs);
void PushRegistersFromMemory(RegList regs);
void PopRegistersToMemory(RegList regs);
void CopyRegistersFromStackToMemory(Register base,
Register scratch,
RegList regs);
#endif
// ---------------------------------------------------------------------------
// Activation frames
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter specific kind of exit frame; either EXIT or
// EXIT_DEBUG. Expects the number of arguments in register eax and
// sets up the number of arguments in register edi and the pointer
// to the first argument in register esi.
void EnterExitFrame(StackFrame::Type type);
// Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first
// argument in register esi.
void LeaveExitFrame(StackFrame::Type type);
// ---------------------------------------------------------------------------
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(const Operand& code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
// Store the code object for the given builtin in the target register.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
// Expression support
void Set(Register dst, const Immediate& x);
void Set(const Operand& dst, const Immediate& x);
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
// FCmp is similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
// ---------------------------------------------------------------------------
// Exception handling
// Push a new try handler and link into try handler chain.
// The return address must be pushed before calling this helper.
// On exit, eax contains TOS (next_sp).
void PushTryHandler(CodeLocation try_location, HandlerType type);
// ---------------------------------------------------------------------------
// Inline caching support
// Generates code that verifies that the maps of objects in the
// prototype chain of object hasn't changed since the code was
// generated and branches to the miss label if any map has. If
// necessary the function also generates code for security check
// in case of global object holders. The scratch and holder
// registers are always clobbered, but the object register is only
// clobbered if it the same as the holder register. The function
// returns a register containing the holder - either object_reg or
// holder_reg.
Register CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg,
Register scratch, Label* miss);
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
// is left untouched, but the scratch register is clobbered.
void CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss);
// ---------------------------------------------------------------------------
// Support functions.
// Check if result is zero and op is negative.
void NegativeZeroTest(Register result, Register op, Label* then_label);
// Check if result is zero and op is negative in code using jump targets.
void NegativeZeroTest(CodeGenerator* cgen,
Register result,
Register op,
JumpTarget* then_target);
// Check if result is zero and any of op1 and op2 are negative.
// Register scratch is destroyed, and it must be different from op2.
void NegativeZeroTest(Register result, Register op1, Register op2,
Register scratch, Label* then_label);
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
// function register will be untouched; the other registers may be
// clobbered.
void TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
Label* miss);
// Generates code for reporting that an illegal operation has
// occurred.
void IllegalOperation(int num_arguments);
// ---------------------------------------------------------------------------
// Runtime calls
// Call a code stub.
void CallStub(CodeStub* stub);
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
// Call a runtime routine.
// Eventually this should be used for all C calls.
void CallRuntime(Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments);
// Tail call of a runtime routine (jump).
// Like JumpToBuiltin, but also takes care of passing the number
// of arguments.
void TailCallRuntime(const ExternalReference& ext, int num_arguments);
// Jump to the builtin routine.
void JumpToBuiltin(const ExternalReference& ext);
// ---------------------------------------------------------------------------
// Utilities
void Ret();
struct Unresolved {
int pc;
uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
const char* name;
};
List<Unresolved>* unresolved() { return &unresolved_; }
Handle<Object> CodeObject() { return code_object_; }
// ---------------------------------------------------------------------------
// StatsCounter support
void SetCounter(StatsCounter* counter, int value);
void IncrementCounter(StatsCounter* counter, int value);
void DecrementCounter(StatsCounter* counter, int value);
// ---------------------------------------------------------------------------
// Debugging
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg);
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg);
// Print a message to stdout and abort execution.
void Abort(const char* msg);
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
private:
List<Unresolved> unresolved_;
bool generating_stub_;
bool allow_stub_calls_;
Handle<Object> code_object_; // This handle will be patched with the code
// code object on installation.
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
const Operand& code_operand,
Label* done,
InvokeFlag flag);
// Get the code for the given builtin. Returns if able to resolve
// the function in the 'resolved' flag.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
};
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. Is not legal to emit
// relocation information. If any of these constraints are violated it causes
// an assertion.
class CodePatcher {
public:
CodePatcher(byte* address, int size);
virtual ~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
private:
byte* address_; // The address of the code being patched.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
};
// -----------------------------------------------------------------------------
// Static helper functions.
// Generate an Operand for loading a field from an object.
static inline Operand FieldOperand(Register object, int offset) {
return Operand(object, offset - kHeapObjectTag);
}
// Generate an Operand for loading an indexed field from an object.
static inline Operand FieldOperand(Register object,
Register index,
ScaleFactor scale,
int offset) {
return Operand(object, index, scale, offset - kHeapObjectTag);
}
#ifdef GENERATED_CODE_COVERAGE
extern void LogGeneratedCodeCoverage(const char* file_line);
#define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
#define ACCESS_MASM(masm) { \
byte* ia32_coverage_function = \
reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
masm->pushfd(); \
masm->pushad(); \
masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
masm->call(ia32_coverage_function, RelocInfo::RUNTIME_ENTRY); \
masm->pop(eax); \
masm->popad(); \
masm->popfd(); \
} \
masm->
#else
#define ACCESS_MASM(masm) masm->
#endif
} } // namespace v8::internal
#endif // V8_IA32_MACRO_ASSEMBLER_IA32_H_

16
deps/v8/src/regexp-macro-assembler-ia32.cc → deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -32,8 +32,8 @@
#include "regexp-stack.h" #include "regexp-stack.h"
#include "macro-assembler.h" #include "macro-assembler.h"
#include "regexp-macro-assembler.h" #include "regexp-macro-assembler.h"
#include "macro-assembler-ia32.h" #include "ia32/macro-assembler-ia32.h"
#include "regexp-macro-assembler-ia32.h" #include "ia32/regexp-macro-assembler-ia32.h"
namespace v8 { namespace internal { namespace v8 { namespace internal {
@ -86,7 +86,7 @@ namespace v8 { namespace internal {
* byte* stack_area_top) * byte* stack_area_top)
*/ */
#define __ masm_-> #define __ ACCESS_MASM(masm_)
RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32( RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
Mode mode, Mode mode,
@ -974,7 +974,7 @@ RegExpMacroAssemblerIA32::Result RegExpMacroAssemblerIA32::Match(
int start_offset = previous_index; int start_offset = previous_index;
int end_offset = subject_ptr->length(); int end_offset = subject_ptr->length();
bool is_ascii = StringShape(*subject).IsAsciiRepresentation(); bool is_ascii = subject->IsAsciiRepresentation();
if (StringShape(subject_ptr).IsCons()) { if (StringShape(subject_ptr).IsCons()) {
subject_ptr = ConsString::cast(subject_ptr)->first(); subject_ptr = ConsString::cast(subject_ptr)->first();
@ -985,7 +985,7 @@ RegExpMacroAssemblerIA32::Result RegExpMacroAssemblerIA32::Match(
subject_ptr = slice->buffer(); subject_ptr = slice->buffer();
} }
// Ensure that an underlying string has the same ascii-ness. // Ensure that an underlying string has the same ascii-ness.
ASSERT(StringShape(subject_ptr).IsAsciiRepresentation() == is_ascii); ASSERT(subject_ptr->IsAsciiRepresentation() == is_ascii);
ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString()); ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
// String is now either Sequential or External // String is now either Sequential or External
int char_size_shift = is_ascii ? 0 : 1; int char_size_shift = is_ascii ? 0 : 1;
@ -1112,7 +1112,7 @@ const byte* RegExpMacroAssemblerIA32::StringCharacterPosition(String* subject,
ASSERT(subject->IsExternalString() || subject->IsSeqString()); ASSERT(subject->IsExternalString() || subject->IsSeqString());
ASSERT(start_index >= 0); ASSERT(start_index >= 0);
ASSERT(start_index <= subject->length()); ASSERT(start_index <= subject->length());
if (StringShape(subject).IsAsciiRepresentation()) { if (subject->IsAsciiRepresentation()) {
const byte* address; const byte* address;
if (StringShape(subject).IsExternal()) { if (StringShape(subject).IsExternal()) {
const char* data = ExternalAsciiString::cast(subject)->resource()->data(); const char* data = ExternalAsciiString::cast(subject)->resource()->data();
@ -1152,7 +1152,7 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString)); Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string. // Current string.
bool is_ascii = StringShape(*subject).IsAsciiRepresentation(); bool is_ascii = subject->IsAsciiRepresentation();
ASSERT(re_code->instruction_start() <= *return_address); ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <= ASSERT(*return_address <=
@ -1171,7 +1171,7 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
} }
// String might have changed. // String might have changed.
if (StringShape(*subject).IsAsciiRepresentation() != is_ascii) { if (subject->IsAsciiRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized // If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from // code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code). // scratch (including, potentially, compiling a new version of the code).

6
deps/v8/src/regexp-macro-assembler-ia32.h → deps/v8/src/ia32/regexp-macro-assembler-ia32.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef REGEXP_MACRO_ASSEMBLER_IA32_H_ #ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
#define REGEXP_MACRO_ASSEMBLER_IA32_H_ #define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
namespace v8 { namespace internal { namespace v8 { namespace internal {
@ -282,4 +282,4 @@ class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
}} // namespace v8::internal }} // namespace v8::internal
#endif /* REGEXP_MACRO_ASSEMBLER_IA32_H_ */ #endif // V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_

6
deps/v8/src/register-allocator-ia32.cc → deps/v8/src/ia32/register-allocator-ia32.cc

@ -97,6 +97,12 @@ void RegisterAllocator::UnuseReserved(RegisterFile* register_file) {
} }
bool RegisterAllocator::IsReserved(int reg_code) {
// Test below relies on the order of register codes.
return reg_code >= esp.code() && reg_code <= esi.code();
}
void RegisterAllocator::Initialize() { void RegisterAllocator::Initialize() {
Reset(); Reset();
// The following register is live on function entry, saved in the // The following register is live on function entry, saved in the

0
deps/v8/src/simulator-ia32.cc → deps/v8/src/ia32/simulator-ia32.cc

6
deps/v8/src/simulator-ia32.h → deps/v8/src/ia32/simulator-ia32.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_SIMULATOR_IA32_H_ #ifndef V8_IA32_SIMULATOR_IA32_H_
#define V8_SIMULATOR_IA32_H_ #define V8_IA32_SIMULATOR_IA32_H_
// Since there is no simulator for the ia32 architecture the only thing we can // Since there is no simulator for the ia32 architecture the only thing we can
@ -44,4 +44,4 @@
(reinterpret_cast<uintptr_t>(this) >= limit ? \ (reinterpret_cast<uintptr_t>(this) >= limit ? \
reinterpret_cast<uintptr_t>(this) - limit : 0) reinterpret_cast<uintptr_t>(this) - limit : 0)
#endif // V8_SIMULATOR_IA32_H_ #endif // V8_IA32_SIMULATOR_IA32_H_

16
deps/v8/src/stub-cache-ia32.cc → deps/v8/src/ia32/stub-cache-ia32.cc

@ -33,7 +33,7 @@
namespace v8 { namespace internal { namespace v8 { namespace internal {
#define __ masm-> #define __ ACCESS_MASM(masm)
static void ProbeTable(MacroAssembler* masm, static void ProbeTable(MacroAssembler* masm,
@ -256,7 +256,7 @@ void StubCompiler::GenerateLoadField(MacroAssembler* masm,
// Check that the maps haven't changed. // Check that the maps haven't changed.
Register reg = Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label); masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Get the value from the properties. // Get the value from the properties.
GenerateFastPropertyLoad(masm, eax, reg, holder, index); GenerateFastPropertyLoad(masm, eax, reg, holder, index);
@ -279,7 +279,7 @@ void StubCompiler::GenerateLoadCallback(MacroAssembler* masm,
// Check that the maps haven't changed. // Check that the maps haven't changed.
Register reg = Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label); masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Push the arguments on the JS stack of the caller. // Push the arguments on the JS stack of the caller.
__ pop(scratch2); // remove return address __ pop(scratch2); // remove return address
@ -310,7 +310,7 @@ void StubCompiler::GenerateLoadConstant(MacroAssembler* masm,
// Check that the maps haven't changed. // Check that the maps haven't changed.
Register reg = Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label); masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Return the constant value. // Return the constant value.
__ mov(eax, Handle<Object>(value)); __ mov(eax, Handle<Object>(value));
@ -332,7 +332,7 @@ void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
// Check that the maps haven't changed. // Check that the maps haven't changed.
Register reg = Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label); masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Push the arguments on the JS stack of the caller. // Push the arguments on the JS stack of the caller.
__ pop(scratch2); // remove return address __ pop(scratch2); // remove return address
@ -440,7 +440,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
#undef __ #undef __
#define __ masm()-> #define __ ACCESS_MASM(masm())
// TODO(1241006): Avoid having lazy compile stubs specialized by the // TODO(1241006): Avoid having lazy compile stubs specialized by the
@ -485,7 +485,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
// Do the right check and compute the holder register. // Do the right check and compute the holder register.
Register reg = Register reg =
__ CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss); masm()->CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
GenerateFastPropertyLoad(masm(), edi, reg, holder, index); GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
@ -656,7 +656,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
// Check that maps have not changed and compute the holder register. // Check that maps have not changed and compute the holder register.
Register reg = Register reg =
__ CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss); masm()->CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
// Enter an internal frame. // Enter an internal frame.
__ EnterInternalFrame(); __ EnterInternalFrame();

156
deps/v8/src/virtual-frame-ia32.cc → deps/v8/src/ia32/virtual-frame-ia32.cc

@ -33,7 +33,7 @@
namespace v8 { namespace internal { namespace v8 { namespace internal {
#define __ masm_-> #define __ ACCESS_MASM(masm_)
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// VirtualFrame implementation. // VirtualFrame implementation.
@ -158,6 +158,28 @@ void VirtualFrame::SyncElementByPushing(int index) {
} }
// Clear the dirty bits for the range of elements in
// [min(stack_pointer_ + 1,begin), end].
void VirtualFrame::SyncRange(int begin, int end) {
ASSERT(begin >= 0);
ASSERT(end < elements_.length());
// Sync elements below the range if they have not been materialized
// on the stack.
int start = Min(begin, stack_pointer_ + 1);
// If positive we have to adjust the stack pointer.
int delta = end - stack_pointer_;
if (delta > 0) {
stack_pointer_ = end;
__ sub(Operand(esp), Immediate(delta * kPointerSize));
}
for (int i = start; i <= end; i++) {
if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
}
}
void VirtualFrame::MergeTo(VirtualFrame* expected) { void VirtualFrame::MergeTo(VirtualFrame* expected) {
Comment cmnt(masm_, "[ Merge frame"); Comment cmnt(masm_, "[ Merge frame");
// We should always be merging the code generator's current frame to an // We should always be merging the code generator's current frame to an
@ -288,64 +310,33 @@ void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
// We have already done X-to-memory moves. // We have already done X-to-memory moves.
ASSERT(stack_pointer_ >= expected->stack_pointer_); ASSERT(stack_pointer_ >= expected->stack_pointer_);
// Perform register-to-register moves. for (int i = 0; i < kNumRegisters; i++) {
int start = 0; // Move the right value into register i if it is currently in a register.
int end = elements_.length() - 1; int index = expected->register_locations_[i];
bool any_moves_blocked; // Did we fail to make some moves this iteration? int use_index = register_locations_[i];
bool should_break_cycles = false; // Fast check if register is unused in target or already correct
bool any_moves_made; // Did we make any progress this iteration? if (index != kIllegalIndex
do { && index != use_index
any_moves_blocked = false; && elements_[index].is_register()) {
any_moves_made = false; Register source = elements_[index].reg();
int first_move_blocked = kIllegalIndex; Register target = { i };
int last_move_blocked = kIllegalIndex; if (use_index == kIllegalIndex) { // Target is currently unused.
for (int i = start; i <= end; i++) { // Copy contents of source from source to target.
FrameElement source = elements_[i]; // Set frame element register to target.
FrameElement target = expected->elements_[i]; elements_[index].set_reg(target);
if (source.is_register() && target.is_register()) { Use(target, index);
if (target.reg().is(source.reg())) { Unuse(source);
if (target.is_synced() && !source.is_synced()) { __ mov(target, source);
__ mov(Operand(ebp, fp_relative(i)), source.reg());
}
elements_[i] = target;
} else {
// We need to move source to target.
if (is_used(target.reg())) {
// The move is blocked because the target contains valid data.
// If we are stuck with only cycles remaining, then we spill source.
// Otherwise, we just need more iterations.
if (should_break_cycles) {
SpillElementAt(i);
should_break_cycles = false;
} else { // Record a blocked move.
if (!any_moves_blocked) {
first_move_blocked = i;
}
last_move_blocked = i;
any_moves_blocked = true;
}
} else { } else {
// The move is not blocked. This frame element can be moved from // Exchange contents of registers source and target.
// its source register to its target register. elements_[use_index].set_reg(source);
if (target.is_synced() && !source.is_synced()) { elements_[index].set_reg(target);
SyncElementAt(i); register_locations_[target.code()] = index;
register_locations_[source.code()] = use_index;
__ xchg(source, target);
} }
Use(target.reg(), i);
Unuse(source.reg());
elements_[i] = target;
__ mov(target.reg(), source.reg());
any_moves_made = true;
} }
} }
}
}
// Update control flags for next iteration.
should_break_cycles = (any_moves_blocked && !any_moves_made);
if (any_moves_blocked) {
start = first_move_blocked;
end = last_move_blocked;
}
} while (any_moves_blocked);
} }
@ -354,19 +345,22 @@ void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame *expected) {
// final step and is done from the bottom up so that the backing // final step and is done from the bottom up so that the backing
// elements of copies are in their correct locations when we // elements of copies are in their correct locations when we
// encounter the copies. // encounter the copies.
for (int i = 0; i < elements_.length(); i++) { for (int i = 0; i < kNumRegisters; i++) {
FrameElement source = elements_[i]; int index = expected->register_locations_[i];
FrameElement target = expected->elements_[i]; if (index != kIllegalIndex) {
if (target.is_register() && !source.is_register()) { FrameElement source = elements_[index];
FrameElement target = expected->elements_[index];
switch (source.type()) { switch (source.type()) {
case FrameElement::INVALID: // Fall through. case FrameElement::INVALID: // Fall through.
case FrameElement::REGISTER:
UNREACHABLE(); UNREACHABLE();
break; break;
case FrameElement::REGISTER:
ASSERT(source.reg().is(target.reg()));
continue; // Go to next iteration. Skips Use(target.reg()) below.
break;
case FrameElement::MEMORY: case FrameElement::MEMORY:
ASSERT(i <= stack_pointer_); ASSERT(index <= stack_pointer_);
__ mov(target.reg(), Operand(ebp, fp_relative(i))); __ mov(target.reg(), Operand(ebp, fp_relative(index)));
break; break;
case FrameElement::CONSTANT: case FrameElement::CONSTANT:
@ -378,11 +372,25 @@ void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame *expected) {
break; break;
case FrameElement::COPY: { case FrameElement::COPY: {
FrameElement backing = elements_[source.index()]; int backing_index = source.index();
FrameElement backing = elements_[backing_index];
ASSERT(backing.is_memory() || backing.is_register()); ASSERT(backing.is_memory() || backing.is_register());
if (backing.is_memory()) { if (backing.is_memory()) {
ASSERT(source.index() <= stack_pointer_); ASSERT(backing_index <= stack_pointer_);
__ mov(target.reg(), Operand(ebp, fp_relative(source.index()))); // Code optimization if backing store should also move
// to a register: move backing store to its register first.
if (expected->elements_[backing_index].is_register()) {
FrameElement new_backing = expected->elements_[backing_index];
Register new_backing_reg = new_backing.reg();
ASSERT(!is_used(new_backing_reg));
elements_[backing_index] = new_backing;
Use(new_backing_reg, backing_index);
__ mov(new_backing_reg,
Operand(ebp, fp_relative(backing_index)));
__ mov(target.reg(), new_backing_reg);
} else {
__ mov(target.reg(), Operand(ebp, fp_relative(backing_index)));
}
} else { } else {
__ mov(target.reg(), backing.reg()); __ mov(target.reg(), backing.reg());
} }
@ -390,11 +398,11 @@ void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame *expected) {
} }
// Ensure the proper sync state. If the source was memory no // Ensure the proper sync state. If the source was memory no
// code needs to be emitted. // code needs to be emitted.
if (target.is_synced() && !source.is_memory()) { if (target.is_synced() && !source.is_synced()) {
SyncElementAt(i); __ mov(Operand(ebp, fp_relative(index)), target.reg());
} }
Use(target.reg(), i); Use(target.reg(), index);
elements_[i] = target; elements_[index] = target;
} }
} }
} }
@ -467,7 +475,7 @@ void VirtualFrame::AllocateStackSlots(int count) {
// we sync them with the actual frame to allocate space for spilling // we sync them with the actual frame to allocate space for spilling
// them later. First sync everything above the stack pointer so we can // them later. First sync everything above the stack pointer so we can
// use pushes to allocate and initialize the locals. // use pushes to allocate and initialize the locals.
SyncRange(stack_pointer_ + 1, elements_.length()); SyncRange(stack_pointer_ + 1, elements_.length() - 1);
Handle<Object> undefined = Factory::undefined_value(); Handle<Object> undefined = Factory::undefined_value();
FrameElement initial_value = FrameElement initial_value =
FrameElement::ConstantElement(undefined, FrameElement::SYNCED); FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
@ -615,6 +623,12 @@ void VirtualFrame::StoreToFrameSlotAt(int index) {
InvalidateFrameSlotAt(index); InvalidateFrameSlotAt(index);
// InvalidateFrameSlotAt can potentially change any frame element, due
// to spilling registers to allocate temporaries in order to preserve
// the copy-on-write semantics of aliased elements. Reload top from
// the frame.
top = elements_[top_index];
if (top.is_copy()) { if (top.is_copy()) {
// There are two cases based on the relative positions of the // There are two cases based on the relative positions of the
// stored-to slot and the backing slot of the top element. // stored-to slot and the backing slot of the top element.

6
deps/v8/src/virtual-frame-ia32.h → deps/v8/src/ia32/virtual-frame-ia32.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_VIRTUAL_FRAME_IA32_H_ #ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_
#define V8_VIRTUAL_FRAME_IA32_H_ #define V8_IA32_VIRTUAL_FRAME_IA32_H_
#include "register-allocator.h" #include "register-allocator.h"
@ -490,4 +490,4 @@ class VirtualFrame : public Malloced {
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_VIRTUAL_FRAME_IA32_H_ #endif // V8_IA32_VIRTUAL_FRAME_IA32_H_

4
deps/v8/src/ic-inl.h

@ -39,6 +39,7 @@ Address IC::address() {
// Get the address of the call. // Get the address of the call.
Address result = pc() - Assembler::kTargetAddrToReturnAddrDist; Address result = pc() - Assembler::kTargetAddrToReturnAddrDist;
#ifdef ENABLE_DEBUGGER_SUPPORT
// First check if any break points are active if not just return the address // First check if any break points are active if not just return the address
// of the call. // of the call.
if (!Debug::has_break_points()) return result; if (!Debug::has_break_points()) return result;
@ -55,6 +56,9 @@ Address IC::address() {
// No break point here just return the address of the call. // No break point here just return the address of the call.
return result; return result;
} }
#else
return result;
#endif
} }

37
deps/v8/src/ic.cc

@ -42,7 +42,7 @@ static char TransitionMarkFromState(IC::State state) {
switch (state) { switch (state) {
case UNINITIALIZED: return '0'; case UNINITIALIZED: return '0';
case UNINITIALIZED_IN_LOOP: return 'L'; case UNINITIALIZED_IN_LOOP: return 'L';
case PREMONOMORPHIC: return '0'; case PREMONOMORPHIC: return 'P';
case MONOMORPHIC: return '1'; case MONOMORPHIC: return '1';
case MONOMORPHIC_PROTOTYPE_FAILURE: return '^'; case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
case MEGAMORPHIC: return 'N'; case MEGAMORPHIC: return 'N';
@ -100,6 +100,7 @@ IC::IC(FrameDepth depth) {
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
Address IC::OriginalCodeAddress() { Address IC::OriginalCodeAddress() {
HandleScope scope; HandleScope scope;
// Compute the JavaScript frame for the frame pointer of this IC // Compute the JavaScript frame for the frame pointer of this IC
@ -126,7 +127,7 @@ Address IC::OriginalCodeAddress() {
int delta = original_code->instruction_start() - code->instruction_start(); int delta = original_code->instruction_start() - code->instruction_start();
return addr + delta; return addr + delta;
} }
#endif
IC::State IC::StateFrom(Code* target, Object* receiver) { IC::State IC::StateFrom(Code* target, Object* receiver) {
IC::State state = target->ic_state(); IC::State state = target->ic_state();
@ -236,13 +237,14 @@ void KeyedLoadIC::Clear(Address address, Code* target) {
// Make sure to also clear the map used in inline fast cases. If we // Make sure to also clear the map used in inline fast cases. If we
// do not clear these maps, cached code can keep objects alive // do not clear these maps, cached code can keep objects alive
// through the embedded maps. // through the embedded maps.
PatchInlinedMapCheck(address, Heap::null_value()); ClearInlinedVersion(address);
SetTargetAtAddress(address, initialize_stub()); SetTargetAtAddress(address, initialize_stub());
} }
void LoadIC::Clear(Address address, Code* target) { void LoadIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return; if (target->ic_state() == UNINITIALIZED) return;
ClearInlinedVersion(address);
SetTargetAtAddress(address, initialize_stub()); SetTargetAtAddress(address, initialize_stub());
} }
@ -356,6 +358,7 @@ Object* CallIC::LoadFunction(State state,
if (opt->IsJSFunction()) return opt; if (opt->IsJSFunction()) return opt;
} }
#ifdef ENABLE_DEBUGGER_SUPPORT
// Handle stepping into a function if step into is active. // Handle stepping into a function if step into is active.
if (Debug::StepInActive()) { if (Debug::StepInActive()) {
// Protect the result in a handle as the debugger can allocate and might // Protect the result in a handle as the debugger can allocate and might
@ -365,6 +368,7 @@ Object* CallIC::LoadFunction(State state,
Debug::HandleStepIn(function, fp(), false); Debug::HandleStepIn(function, fp(), false);
return *function; return *function;
} }
#endif
return result; return result;
} }
@ -520,6 +524,31 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
LOG(SuspectReadEvent(*name, *object)); LOG(SuspectReadEvent(*name, *object));
} }
bool can_be_inlined =
FLAG_use_ic &&
state == PREMONOMORPHIC &&
lookup.IsValid() &&
lookup.IsLoaded() &&
lookup.IsCacheable() &&
lookup.holder() == *object &&
lookup.type() == FIELD &&
!object->IsAccessCheckNeeded();
if (can_be_inlined) {
Map* map = lookup.holder()->map();
// Property's index in the properties array. If negative we have
// an inobject property.
int index = lookup.GetFieldIndex() - map->inobject_properties();
if (index < 0) {
// Index is an offset from the end of the object.
int offset = map->instance_size() + (index * kPointerSize);
if (PatchInlinedLoad(address(), map, offset)) {
set_target(megamorphic_stub());
return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
}
}
}
// Update inline cache and stub cache. // Update inline cache and stub cache.
if (FLAG_use_ic && lookup.IsLoaded()) { if (FLAG_use_ic && lookup.IsLoaded()) {
UpdateCaches(&lookup, state, object, name); UpdateCaches(&lookup, state, object, name);
@ -731,7 +760,7 @@ Object* KeyedLoadIC::Load(State state,
!object->IsJSValue() && !object->IsJSValue() &&
!JSObject::cast(*object)->HasIndexedInterceptor()) { !JSObject::cast(*object)->HasIndexedInterceptor()) {
Map* map = JSObject::cast(*object)->map(); Map* map = JSObject::cast(*object)->map();
PatchInlinedMapCheck(address(), map); PatchInlinedLoad(address(), map);
} }
} }

18
deps/v8/src/ic.h

@ -107,9 +107,11 @@ class IC {
Address fp() const { return fp_; } Address fp() const { return fp_; }
Address pc() const { return *pc_address_; } Address pc() const { return *pc_address_; }
#ifdef ENABLE_DEBUGGER_SUPPORT
// Computes the address in the original code when the code running is // Computes the address in the original code when the code running is
// containing break points (calls to DebugBreakXXX builtins). // containing break points (calls to DebugBreakXXX builtins).
Address OriginalCodeAddress(); Address OriginalCodeAddress();
#endif
// Set the call-site target. // Set the call-site target.
void set_target(Code* code) { SetTargetAtAddress(address(), code); } void set_target(Code* code) { SetTargetAtAddress(address(), code); }
@ -214,6 +216,11 @@ class LoadIC: public IC {
static void GenerateStringLength(MacroAssembler* masm); static void GenerateStringLength(MacroAssembler* masm);
static void GenerateFunctionPrototype(MacroAssembler* masm); static void GenerateFunctionPrototype(MacroAssembler* masm);
// The offset from the inlined patch site to the start of the
// inlined load instruction. It is 7 bytes (test eax, imm) plus
// 6 bytes (jne slow_label).
static const int kOffsetToLoadInstruction = 13;
private: private:
static void Generate(MacroAssembler* masm, const ExternalReference& f); static void Generate(MacroAssembler* masm, const ExternalReference& f);
@ -236,6 +243,12 @@ class LoadIC: public IC {
} }
static void Clear(Address address, Code* target); static void Clear(Address address, Code* target);
// Clear the use of the inlined version.
static void ClearInlinedVersion(Address address);
static bool PatchInlinedLoad(Address address, Object* map, int index);
friend class IC; friend class IC;
}; };
@ -252,6 +265,9 @@ class KeyedLoadIC: public IC {
static void GeneratePreMonomorphic(MacroAssembler* masm); static void GeneratePreMonomorphic(MacroAssembler* masm);
static void GenerateGeneric(MacroAssembler* masm); static void GenerateGeneric(MacroAssembler* masm);
// Clear the use of the inlined version.
static void ClearInlinedVersion(Address address);
private: private:
static void Generate(MacroAssembler* masm, const ExternalReference& f); static void Generate(MacroAssembler* masm, const ExternalReference& f);
@ -279,7 +295,7 @@ class KeyedLoadIC: public IC {
// Support for patching the map that is checked in an inlined // Support for patching the map that is checked in an inlined
// version of keyed load. // version of keyed load.
static void PatchInlinedMapCheck(Address address, Object* map); static bool PatchInlinedLoad(Address address, Object* map);
friend class IC; friend class IC;
}; };

6
deps/v8/src/interpreter-irregexp.cc

@ -130,13 +130,13 @@ static void TraceInterpreter(const byte* code_base,
static int32_t Load32Aligned(const byte* pc) { static int32_t Load32Aligned(const byte* pc) {
ASSERT((reinterpret_cast<int>(pc) & 3) == 0); ASSERT((reinterpret_cast<intptr_t>(pc) & 3) == 0);
return *reinterpret_cast<const int32_t *>(pc); return *reinterpret_cast<const int32_t *>(pc);
} }
static int32_t Load16Aligned(const byte* pc) { static int32_t Load16Aligned(const byte* pc) {
ASSERT((reinterpret_cast<int>(pc) & 1) == 0); ASSERT((reinterpret_cast<intptr_t>(pc) & 1) == 0);
return *reinterpret_cast<const uint16_t *>(pc); return *reinterpret_cast<const uint16_t *>(pc);
} }
@ -574,7 +574,7 @@ bool IrregexpInterpreter::Match(Handle<ByteArray> code_array,
AssertNoAllocation a; AssertNoAllocation a;
const byte* code_base = code_array->GetDataStartAddress(); const byte* code_base = code_array->GetDataStartAddress();
uc16 previous_char = '\n'; uc16 previous_char = '\n';
if (StringShape(*subject).IsAsciiRepresentation()) { if (subject->IsAsciiRepresentation()) {
Vector<const char> subject_vector = subject->ToAsciiVector(); Vector<const char> subject_vector = subject->ToAsciiVector();
if (start_position != 0) previous_char = subject_vector[start_position - 1]; if (start_position != 0) previous_char = subject_vector[start_position - 1];
return RawMatch(code_base, return RawMatch(code_base,

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save