Browse Source

Upgrade v8 to version 1.2.3.

v0.7.4-release
Ryan 16 years ago
parent
commit
3a41367c40
  1. 62
      deps/v8/ChangeLog
  2. 214
      deps/v8/SConstruct
  3. 168
      deps/v8/include/v8-debug.h
  4. 73
      deps/v8/include/v8.h
  5. 22
      deps/v8/samples/shell.cc
  6. 38
      deps/v8/src/SConscript
  7. 37
      deps/v8/src/accessors.cc
  8. 4
      deps/v8/src/accessors.h
  9. 185
      deps/v8/src/api.cc
  10. 8
      deps/v8/src/arm/assembler-arm-inl.h
  11. 2
      deps/v8/src/arm/assembler-arm.cc
  12. 40
      deps/v8/src/arm/assembler-arm.h
  13. 63
      deps/v8/src/arm/builtins-arm.cc
  14. 538
      deps/v8/src/arm/codegen-arm.cc
  15. 28
      deps/v8/src/arm/codegen-arm.h
  16. 13
      deps/v8/src/arm/constants-arm.h
  17. 0
      deps/v8/src/arm/cpu-arm.cc
  18. 5
      deps/v8/src/arm/debug-arm.cc
  19. 9
      deps/v8/src/arm/disasm-arm.cc
  20. 2
      deps/v8/src/arm/frames-arm.cc
  21. 6
      deps/v8/src/arm/frames-arm.h
  22. 57
      deps/v8/src/arm/ic-arm.cc
  23. 2
      deps/v8/src/arm/jump-target-arm.cc
  24. 27
      deps/v8/src/arm/macro-assembler-arm.cc
  25. 20
      deps/v8/src/arm/macro-assembler-arm.h
  26. 2
      deps/v8/src/arm/regexp-macro-assembler-arm.cc
  27. 6
      deps/v8/src/arm/regexp-macro-assembler-arm.h
  28. 8
      deps/v8/src/arm/register-allocator-arm.cc
  29. 95
      deps/v8/src/arm/simulator-arm.cc
  30. 14
      deps/v8/src/arm/simulator-arm.h
  31. 15
      deps/v8/src/arm/stub-cache-arm.cc
  32. 13
      deps/v8/src/arm/virtual-frame-arm.cc
  33. 6
      deps/v8/src/arm/virtual-frame-arm.h
  34. 167
      deps/v8/src/array.js
  35. 56
      deps/v8/src/assembler.cc
  36. 37
      deps/v8/src/assembler.h
  37. 21
      deps/v8/src/ast.cc
  38. 7
      deps/v8/src/ast.h
  39. 64
      deps/v8/src/bootstrapper.cc
  40. 3
      deps/v8/src/builtins.cc
  41. 5
      deps/v8/src/builtins.h
  42. 2
      deps/v8/src/checks.h
  43. 2
      deps/v8/src/code-stubs.h
  44. 90
      deps/v8/src/codegen.cc
  45. 20
      deps/v8/src/codegen.h
  46. 43
      deps/v8/src/compiler.cc
  47. 3
      deps/v8/src/compiler.h
  48. 6
      deps/v8/src/contexts.h
  49. 27
      deps/v8/src/d8.cc
  50. 3
      deps/v8/src/d8.h
  51. 37
      deps/v8/src/d8.js
  52. 25
      deps/v8/src/date-delay.js
  53. 5
      deps/v8/src/dateparser-inl.h
  54. 21
      deps/v8/src/debug-agent.cc
  55. 28
      deps/v8/src/debug-agent.h
  56. 75
      deps/v8/src/debug-delay.js
  57. 541
      deps/v8/src/debug.cc
  58. 188
      deps/v8/src/debug.h
  59. 26
      deps/v8/src/execution.cc
  60. 11
      deps/v8/src/execution.h
  61. 13
      deps/v8/src/factory.cc
  62. 5
      deps/v8/src/factory.h
  63. 12
      deps/v8/src/frames-inl.h
  64. 8
      deps/v8/src/frames.cc
  65. 9
      deps/v8/src/func-name-inferrer.cc
  66. 23
      deps/v8/src/func-name-inferrer.h
  67. 2
      deps/v8/src/global-handles.cc
  68. 5
      deps/v8/src/global-handles.h
  69. 110
      deps/v8/src/globals.h
  70. 72
      deps/v8/src/handles.cc
  71. 9
      deps/v8/src/handles.h
  72. 4
      deps/v8/src/heap-inl.h
  73. 161
      deps/v8/src/heap.cc
  74. 6
      deps/v8/src/ia32/assembler-ia32-inl.h
  75. 43
      deps/v8/src/ia32/assembler-ia32.cc
  76. 6
      deps/v8/src/ia32/assembler-ia32.h
  77. 28
      deps/v8/src/ia32/builtins-ia32.cc
  78. 262
      deps/v8/src/ia32/codegen-ia32.cc
  79. 633
      deps/v8/src/ia32/codegen-ia32.h
  80. 0
      deps/v8/src/ia32/cpu-ia32.cc
  81. 4
      deps/v8/src/ia32/debug-ia32.cc
  82. 0
      deps/v8/src/ia32/disasm-ia32.cc
  83. 0
      deps/v8/src/ia32/frames-ia32.cc
  84. 6
      deps/v8/src/ia32/frames-ia32.h
  85. 116
      deps/v8/src/ia32/ic-ia32.cc
  86. 12
      deps/v8/src/ia32/jump-target-ia32.cc
  87. 13
      deps/v8/src/ia32/macro-assembler-ia32.cc
  88. 371
      deps/v8/src/ia32/macro-assembler-ia32.h
  89. 16
      deps/v8/src/ia32/regexp-macro-assembler-ia32.cc
  90. 6
      deps/v8/src/ia32/regexp-macro-assembler-ia32.h
  91. 6
      deps/v8/src/ia32/register-allocator-ia32.cc
  92. 0
      deps/v8/src/ia32/simulator-ia32.cc
  93. 6
      deps/v8/src/ia32/simulator-ia32.h
  94. 16
      deps/v8/src/ia32/stub-cache-ia32.cc
  95. 160
      deps/v8/src/ia32/virtual-frame-ia32.cc
  96. 6
      deps/v8/src/ia32/virtual-frame-ia32.h
  97. 4
      deps/v8/src/ic-inl.h
  98. 37
      deps/v8/src/ic.cc
  99. 18
      deps/v8/src/ic.h
  100. 6
      deps/v8/src/interpreter-irregexp.cc

62
deps/v8/ChangeLog

@ -1,3 +1,61 @@
2009-05-11: Version 1.2.3
Fixed bug in reporting of out-of-memory situations.
Introduced hidden prototypes on certain builtin prototype objects
such as String.prototype to emulate JSC's behavior of restoring
the original function when deleting functions from those prototype
objects.
Fixed crash bug in the register allocator.
2009-05-04: Version 1.2.2
Fixed bug in array sorting for sparse arrays (issue 326).
Added support for adding a soname when building a shared library
on Linux (issue 151).
Fixed bug caused by morphing internal ASCII strings to external
two-byte strings. Slices over ASCII strings have to forward ASCII
checks to the underlying buffer string.
Allowed API call-as-function handlers to be called as
constructors.
Fixed a crash bug where an external string was disposed but a
slice of the external string survived as a symbol.
2009-04-27: Version 1.2.1
Added EcmaScript 5 JSON object.
Fix bug in preemption support on ARM.
2009-04-23: Version 1.2.0
Optimized floating-point operations on ARM.
Added a number of extensions to the debugger API.
Changed the enumeration order for unsigned integer keys to always
be numerical order.
Added a "read" extension to the shell sample.
Added support for Array.prototype.reduce and
Array.prototype.reduceRight.
Added an option to the SCons build to control Microsoft Visual C++
link-time code generation.
Fixed a number of bugs (in particular issue 315, issue 316,
issue 317 and issue 318).
2009-04-15: Version 1.1.10
Fixed crash bug that occurred when loading a const variable in the
@ -27,13 +85,13 @@
Changed test-debug/ThreadedDebugging to be non-flaky (issue 96).
Fixed step-in handling for Function.prototype.apply and call in
Fixed step-in handling for Function.prototype.apply and call in
the debugger (issue 269).
Fixed v8::Object::DeleteHiddenValue to not bail out when there
are no hidden properties.
Added workaround for crash bug, where external symbol table
Added workaround for crash bug, where external symbol table
entries with deleted resources would lead to NPEs when looking
up in the symbol table.

214
deps/v8/SConstruct

@ -35,6 +35,7 @@ root_dir = dirname(File('SConstruct').rfile().abspath)
sys.path.append(join(root_dir, 'tools'))
import js2c, utils
# ANDROID_TOP is the top of the Android checkout, fetched from the environment
# variable 'TOP'. You will also need to set the CXX, CC, AR and RANLIB
# environment variables to the cross-compiling tools.
@ -83,7 +84,8 @@ ANDROID_LINKFLAGS = ['-nostdlib',
LIBRARY_FLAGS = {
'all': {
'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING']
'CPPDEFINES': ['ENABLE_LOGGING_AND_PROFILING'],
'CPPPATH': [join(root_dir, 'src')]
},
'gcc': {
'all': {
@ -94,6 +96,7 @@ LIBRARY_FLAGS = {
'CCFLAGS': ['-g', '-O0'],
'CPPDEFINES': ['ENABLE_DISASSEMBLER', 'DEBUG'],
'os:android': {
'CPPDEFINES': ['ENABLE_DEBUGGER_SUPPORT'],
'CCFLAGS': ['-mthumb']
}
},
@ -102,13 +105,13 @@ LIBRARY_FLAGS = {
'-ffunction-sections'],
'os:android': {
'CCFLAGS': ['-mthumb', '-Os'],
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG', 'ENABLE_DEBUGGER_SUPPORT']
}
},
'os:linux': {
'CCFLAGS': ['-ansi'],
'library:shared': {
'LIBS': ['pthread', 'rt']
'LIBS': ['pthread']
}
},
'os:macos': {
@ -129,9 +132,30 @@ LIBRARY_FLAGS = {
'-Wstrict-aliasing=2'],
'CPPPATH': ANDROID_INCLUDES,
},
'wordsize:32': {
'arch:x64': {
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64']
}
},
'wordsize:64': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
'arch:ia32': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'arch:arm': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
}
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
},
'arch:arm': {
'CPPDEFINES': ['V8_TARGET_ARCH_ARM']
},
'arch:x64': {
'CPPDEFINES': ['V8_TARGET_ARCH_X64']
},
'prof:oprofile': {
'CPPDEFINES': ['ENABLE_OPROFILE_AGENT']
@ -148,6 +172,9 @@ LIBRARY_FLAGS = {
'ARFLAGS': ['/NOLOGO'],
'CCPDBFLAGS': ['/Zi']
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
},
'mode:debug': {
'CCFLAGS': ['/Od', '/Gm'],
'CPPDEFINES': ['_DEBUG', 'ENABLE_DISASSEMBLER', 'DEBUG'],
@ -160,16 +187,20 @@ LIBRARY_FLAGS = {
}
},
'mode:release': {
'CCFLAGS': ['/O2', '/GL'],
'LINKFLAGS': ['/OPT:REF', '/OPT:ICF', '/LTCG'],
'ARFLAGS': ['/LTCG'],
'CCFLAGS': ['/O2'],
'LINKFLAGS': ['/OPT:REF', '/OPT:ICF'],
'msvcrt:static': {
'CCFLAGS': ['/MT']
},
'msvcrt:shared': {
'CCFLAGS': ['/MD']
},
'msvcltcg:on': {
'CCFLAGS': ['/GL'],
'LINKFLAGS': ['/LTCG'],
'ARFLAGS': ['/LTCG'],
}
},
}
}
}
@ -181,17 +212,16 @@ V8_EXTRA_FLAGS = {
'WARNINGFLAGS': ['-Wall', '-Werror', '-W',
'-Wno-unused-parameter']
},
'arch:arm': {
'CPPDEFINES': ['ARM']
},
'arch:android': {
'CPPDEFINES': ['ARM']
},
'os:win32': {
'WARNINGFLAGS': ['-pedantic', '-Wno-long-long']
},
'os:linux': {
'WARNINGFLAGS': ['-pedantic']
'WARNINGFLAGS': ['-pedantic'],
'library:shared': {
'soname:on': {
'LINKFLAGS': ['-Wl,-soname,${SONAME}']
}
}
},
'os:macos': {
'WARNINGFLAGS': ['-pedantic']
@ -209,7 +239,7 @@ V8_EXTRA_FLAGS = {
'LIBS': ['winmm', 'ws2_32']
},
'arch:arm': {
'CPPDEFINES': ['ARM'],
'CPPDEFINES': ['V8_TARGET_ARCH_ARM'],
# /wd4996 is to silence the warning about sscanf
# used by the arm simulator.
'WARNINGFLAGS': ['/wd4996']
@ -224,7 +254,7 @@ V8_EXTRA_FLAGS = {
MKSNAPSHOT_EXTRA_FLAGS = {
'gcc': {
'os:linux': {
'LIBS': ['pthread', 'rt'],
'LIBS': ['pthread'],
},
'os:macos': {
'LIBS': ['pthread'],
@ -238,6 +268,7 @@ MKSNAPSHOT_EXTRA_FLAGS = {
},
'msvc': {
'all': {
'CPPDEFINES': ['_HAS_EXCEPTIONS=0'],
'LIBS': ['winmm', 'ws2_32']
}
}
@ -268,7 +299,7 @@ CCTEST_EXTRA_FLAGS = {
'LIBPATH': [abspath('.')]
},
'os:linux': {
'LIBS': ['pthread', 'rt'],
'LIBS': ['pthread'],
},
'os:macos': {
'LIBS': ['pthread'],
@ -279,10 +310,34 @@ CCTEST_EXTRA_FLAGS = {
'os:win32': {
'LIBS': ['winmm', 'ws2_32']
},
'wordsize:64': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
'os:android': {
'CPPDEFINES': ['ANDROID', '__ARM_ARCH_5__', '__ARM_ARCH_5T__',
'__ARM_ARCH_5E__', '__ARM_ARCH_5TE__'],
'CCFLAGS': ANDROID_FLAGS,
'CPPPATH': ANDROID_INCLUDES,
'LIBPATH': [ANDROID_TOP + '/out/target/product/generic/obj/lib'],
'LINKFLAGS': ANDROID_LINKFLAGS,
'LIBS': ['c', 'stdc++', 'm'],
'mode:release': {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
},
'wordsize:32': {
'arch:x64': {
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64']
}
},
'wordsize:64': {
'arch:ia32': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'arch:arm': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
}
}
},
'msvc': {
'all': {
@ -291,6 +346,9 @@ CCTEST_EXTRA_FLAGS = {
},
'library:shared': {
'CPPDEFINES': ['USING_V8_SHARED']
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
}
}
}
@ -307,7 +365,7 @@ SAMPLE_FLAGS = {
'CCFLAGS': ['-fno-rtti', '-fno-exceptions']
},
'os:linux': {
'LIBS': ['pthread', 'rt'],
'LIBS': ['pthread'],
},
'os:macos': {
'LIBS': ['pthread'],
@ -330,9 +388,21 @@ SAMPLE_FLAGS = {
'CPPDEFINES': ['SK_RELEASE', 'NDEBUG']
}
},
'wordsize:32': {
'arch:x64': {
'CCFLAGS': ['-m64'],
'LINKFLAGS': ['-m64']
}
},
'wordsize:64': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
'arch:ia32': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
},
'arch:arm': {
'CCFLAGS': ['-m32'],
'LINKFLAGS': ['-m32']
}
},
'mode:release': {
'CCFLAGS': ['-O2']
@ -359,14 +429,21 @@ SAMPLE_FLAGS = {
},
'mode:release': {
'CCFLAGS': ['/O2'],
'LINKFLAGS': ['/OPT:REF', '/OPT:ICF', '/LTCG'],
'LINKFLAGS': ['/OPT:REF', '/OPT:ICF'],
'msvcrt:static': {
'CCFLAGS': ['/MT']
},
'msvcrt:shared': {
'CCFLAGS': ['/MD']
},
'msvcltcg:on': {
'CCFLAGS': ['/GL'],
'LINKFLAGS': ['/LTCG'],
}
},
'arch:ia32': {
'CPPDEFINES': ['V8_TARGET_ARCH_IA32']
},
'mode:debug': {
'CCFLAGS': ['/Od'],
'LINKFLAGS': ['/DEBUG'],
@ -387,7 +464,7 @@ D8_FLAGS = {
'LIBS': ['readline']
},
'os:linux': {
'LIBS': ['pthread', 'rt'],
'LIBS': ['pthread'],
},
'os:macos': {
'LIBS': ['pthread'],
@ -443,17 +520,17 @@ SIMPLE_OPTIONS = {
'toolchain': {
'values': ['gcc', 'msvc'],
'default': TOOLCHAIN_GUESS,
'help': 'the toolchain to use'
'help': 'the toolchain to use (' + TOOLCHAIN_GUESS + ')'
},
'os': {
'values': ['freebsd', 'linux', 'macos', 'win32', 'android'],
'default': OS_GUESS,
'help': 'the os to build for'
'help': 'the os to build for (' + OS_GUESS + ')'
},
'arch': {
'values':['arm', 'ia32'],
'values':['arm', 'ia32', 'x64'],
'default': ARCH_GUESS,
'help': 'the architecture to build for'
'help': 'the architecture to build for (' + ARCH_GUESS + ')'
},
'snapshot': {
'values': ['on', 'off', 'nobuild'],
@ -470,10 +547,20 @@ SIMPLE_OPTIONS = {
'default': 'static',
'help': 'the type of library to produce'
},
'soname': {
'values': ['on', 'off'],
'default': 'off',
'help': 'turn on setting soname for Linux shared library'
},
'msvcrt': {
'values': ['static', 'shared'],
'default': 'static',
'help': 'the type of MSVCRT library to use'
'help': 'the type of Microsoft Visual C++ runtime library to use'
},
'msvcltcg': {
'values': ['on', 'off'],
'default': 'on',
'help': 'use Microsoft Visual C++ link-time code generation'
},
'wordsize': {
'values': ['64', '32'],
@ -515,6 +602,49 @@ def GetOptions():
return result
def GetVersionComponents():
MAJOR_VERSION_PATTERN = re.compile(r"#define\s+MAJOR_VERSION\s+(.*)")
MINOR_VERSION_PATTERN = re.compile(r"#define\s+MINOR_VERSION\s+(.*)")
BUILD_NUMBER_PATTERN = re.compile(r"#define\s+BUILD_NUMBER\s+(.*)")
PATCH_LEVEL_PATTERN = re.compile(r"#define\s+PATCH_LEVEL\s+(.*)")
patterns = [MAJOR_VERSION_PATTERN,
MINOR_VERSION_PATTERN,
BUILD_NUMBER_PATTERN,
PATCH_LEVEL_PATTERN]
source = open(join(root_dir, 'src', 'version.cc')).read()
version_components = []
for pattern in patterns:
match = pattern.search(source)
if match:
version_components.append(match.group(1).strip())
else:
version_components.append('0')
return version_components
def GetVersion():
version_components = GetVersionComponents()
if version_components[len(version_components) - 1] == '0':
version_components.pop()
return '.'.join(version_components)
def GetSpecificSONAME():
SONAME_PATTERN = re.compile(r"#define\s+SONAME\s+\"(.*)\"")
source = open(join(root_dir, 'src', 'version.cc')).read()
match = SONAME_PATTERN.search(source)
if match:
return match.group(1).strip()
else:
return ''
def SplitList(str):
return [ s for s in str.split(",") if len(s) > 0 ]
@ -537,6 +667,12 @@ def VerifyOptions(env):
Abort("Profiling on windows only supported for static library.")
if env['prof'] == 'oprofile' and env['os'] != 'linux':
Abort("OProfile is only supported on Linux.")
if env['os'] == 'win32' and env['soname'] == 'on':
Abort("Shared Object soname not applicable for Windows.")
if env['soname'] == 'on' and env['library'] == 'static':
Abort("Shared Object soname not applicable for static library.")
if env['arch'] == 'x64' and env['os'] != 'linux':
Abort("X64 compilation only allowed on Linux OS.")
for (name, option) in SIMPLE_OPTIONS.iteritems():
if (not option.get('default')) and (name not in ARGUMENTS):
message = ("A value for option %s must be specified (%s)." %
@ -565,13 +701,13 @@ class BuildContext(object):
def AddRelevantFlags(self, initial, flags):
result = initial.copy()
self.AppendFlags(result, flags.get('all'))
toolchain = self.options['toolchain']
if toolchain in flags:
self.AppendFlags(result, flags[toolchain].get('all'))
for option in sorted(self.options.keys()):
value = self.options[option]
self.AppendFlags(result, flags[toolchain].get(option + ':' + value))
self.AppendFlags(result, flags.get('all'))
return result
def AddRelevantSubFlags(self, options, flags):
@ -667,11 +803,23 @@ def BuildSpecific(env, mode, env_overrides):
'd8': d8_flags
}
# Generate library base name.
target_id = mode
suffix = SUFFIXES[target_id]
library_name = 'v8' + suffix
version = GetVersion()
if context.options['soname'] == 'on':
# When building shared object with SONAME version the library name.
library_name += '-' + version
env['LIBRARY'] = library_name
# Generate library SONAME if required by the build.
if context.options['soname'] == 'on':
soname = GetSpecificSONAME()
if soname == '':
soname = 'lib' + library_name + '.so'
env['SONAME'] = soname
# Build the object files by invoking SCons recursively.
(object_files, shell_files, mksnapshot) = env.SConscript(
join('src', 'SConscript'),

168
deps/v8/include/v8-debug.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_DEBUG_H_
#define V8_DEBUG_H_
#ifndef V8_V8_DEBUG_H_
#define V8_V8_DEBUG_H_
#include "v8.h"
@ -79,48 +79,116 @@ enum DebugEvent {
};
/**
* Debug event callback function.
*
* \param event the type of the debug event that triggered the callback
* (enum DebugEvent)
* \param exec_state execution state (JavaScript object)
* \param event_data event specific data (JavaScript object)
* \param data value passed by the user to SetDebugEventListener
*/
typedef void (*DebugEventCallback)(DebugEvent event,
Handle<Object> exec_state,
Handle<Object> event_data,
Handle<Value> data);
/**
* Debug message callback function.
*
* \param message the debug message
* \param length length of the message
* \param data the data value passed when registering the message handler
* A DebugMessageHandler does not take posession of the message string,
* and must not rely on the data persisting after the handler returns.
*/
typedef void (*DebugMessageHandler)(const uint16_t* message, int length,
void* data);
/**
* Debug host dispatch callback function.
*
* \param dispatch the dispatch value
* \param data the data value passed when registering the dispatch handler
*/
typedef void (*DebugHostDispatchHandler)(void* dispatch,
void* data);
class EXPORT Debug {
public:
/**
* A client object passed to the v8 debugger whose ownership will be taken by
* it. v8 is always responsible for deleting the object.
*/
class ClientData {
public:
virtual ~ClientData() {}
};
/**
* A message object passed to the debug message handler.
*/
class Message {
public:
/**
* Check type of message.
*/
virtual bool IsEvent() const = 0;
virtual bool IsResponse() const = 0;
virtual DebugEvent GetEvent() const = 0;
/**
* Indicate whether this is a response to a continue command which will
* start the VM running after this is processed.
*/
virtual bool WillStartRunning() const = 0;
/**
* Access to execution state and event data. Don't store these cross
* callbacks as their content becomes invalid. These objects are from the
* debugger event that started the debug message loop.
*/
virtual Handle<Object> GetExecutionState() const = 0;
virtual Handle<Object> GetEventData() const = 0;
/**
* Get the debugger protocol JSON.
*/
virtual Handle<String> GetJSON() const = 0;
/**
* Get the context active when the debug event happened. Note this is not
* the current active context as the JavaScript part of the debugger is
* running in it's own context which is entered at this point.
*/
virtual Handle<Context> GetEventContext() const = 0;
/**
* Client data passed with the corresponding request if any. This is the
* client_data data value passed into Debug::SendCommand along with the
* request that led to the message or NULL if the message is an event. The
* debugger takes ownership of the data and will delete it even if there is
* no message handler.
*/
virtual ClientData* GetClientData() const = 0;
virtual ~Message() {}
};
/**
* Debug event callback function.
*
* \param event the type of the debug event that triggered the callback
* (enum DebugEvent)
* \param exec_state execution state (JavaScript object)
* \param event_data event specific data (JavaScript object)
* \param data value passed by the user to SetDebugEventListener
*/
typedef void (*EventCallback)(DebugEvent event,
Handle<Object> exec_state,
Handle<Object> event_data,
Handle<Value> data);
/**
* Debug message callback function.
*
* \param message the debug message handler message object
* \param length length of the message
* \param client_data the data value passed when registering the message handler
* A MessageHandler does not take posession of the message string,
* and must not rely on the data persisting after the handler returns.
*
* This message handler is deprecated. Use MessageHandler2 instead.
*/
typedef void (*MessageHandler)(const uint16_t* message, int length,
ClientData* client_data);
/**
* Debug message callback function.
*
* \param message the debug message handler message object
* A MessageHandler does not take posession of the message data,
* and must not rely on the data persisting after the handler returns.
*/
typedef void (*MessageHandler2)(const Message& message);
/**
* Debug host dispatch callback function.
*/
typedef void (*HostDispatchHandler)();
// Set a C debug event listener.
static bool SetDebugEventListener(DebugEventCallback that,
static bool SetDebugEventListener(EventCallback that,
Handle<Value> data = Handle<Value>());
// Set a JavaScript debug event listener.
@ -130,15 +198,17 @@ class EXPORT Debug {
// Break execution of JavaScript.
static void DebugBreak();
// Message based interface. The message protocol is JSON.
static void SetMessageHandler(DebugMessageHandler handler, void* data = NULL,
bool message_handler_thread = true);
static void SendCommand(const uint16_t* command, int length);
// Message based interface. The message protocol is JSON. NOTE the message
// handler thread is not supported any more parameter must be false.
static void SetMessageHandler(MessageHandler handler,
bool message_handler_thread = false);
static void SetMessageHandler2(MessageHandler2 handler);
static void SendCommand(const uint16_t* command, int length,
ClientData* client_data = NULL);
// Dispatch interface.
static void SetHostDispatchHandler(DebugHostDispatchHandler handler,
void* data = NULL);
static void SendHostDispatch(void* dispatch);
static void SetHostDispatchHandler(HostDispatchHandler handler,
int period = 100);
/**
* Run a JavaScript function in the debugger.
@ -176,4 +246,4 @@ class EXPORT Debug {
#undef EXPORT
#endif // V8_DEBUG_H_
#endif // V8_V8_DEBUG_H_

73
deps/v8/include/v8.h

@ -41,10 +41,15 @@
#include <stdio.h>
#ifdef _WIN32
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef short int16_t; // NOLINT
typedef unsigned short uint16_t; // NOLINT
typedef int int32_t;
typedef unsigned int uint32_t;
typedef unsigned short uint16_t; // NOLINT
typedef long long int64_t; // NOLINT
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
// intptr_t is defined in crtdefs.h through stdio.h.
// Setup for Windows DLL export/import. When building the V8 DLL the
// BUILDING_V8_SHARED needs to be defined. When building a program which uses
@ -529,6 +534,13 @@ class V8EXPORT Script {
* Returns the script id value.
*/
Local<Value> Id();
/**
* Associate an additional data object with the script. This is mainly used
* with the debugger as this data object is only available through the
* debugger API.
*/
void SetData(Handle<Value> data);
};
@ -540,8 +552,18 @@ class V8EXPORT Message {
Local<String> Get() const;
Local<String> GetSourceLine() const;
/**
* Returns the resource name for the script from where the function causing
* the error originates.
*/
Handle<Value> GetScriptResourceName() const;
/**
* Returns the resource data for the script from where the function causing
* the error originates.
*/
Handle<Value> GetScriptData() const;
/**
* Returns the number, 1-based, of the line where the error occurred.
*/
@ -805,14 +827,14 @@ class V8EXPORT String : public Primitive {
};
/**
* Get the ExternalStringResource for an external string. Only
* valid if IsExternal() returns true.
* Get the ExternalStringResource for an external string. Returns
* NULL if IsExternal() doesn't return true.
*/
ExternalStringResource* GetExternalStringResource() const;
/**
* Get the ExternalAsciiStringResource for an external ascii string.
* Only valid if IsExternalAscii() returns true.
* Returns NULL if IsExternalAscii() doesn't return true.
*/
ExternalAsciiStringResource* GetExternalAsciiStringResource() const;
@ -1028,6 +1050,18 @@ class V8EXPORT Object : public Value {
bool Set(Handle<Value> key,
Handle<Value> value,
PropertyAttribute attribs = None);
// Sets a local property on this object, bypassing interceptors and
// overriding accessors or read-only properties.
//
// Note that if the object has an interceptor the property will be set
// locally, but since the interceptor takes precedence the local property
// will only be returned if the interceptor doesn't return a value.
//
// Note also that this only works for named properties.
bool ForceSet(Handle<Value> key,
Handle<Value> value,
PropertyAttribute attribs = None);
Local<Value> Get(Handle<Value> key);
// TODO(1245389): Replace the type-specific versions of these
@ -1093,6 +1127,9 @@ class V8EXPORT Object : public Value {
/**
* Returns the identity hash for this object. The current implemenation uses
* a hidden property on the object to store the identity hash.
*
* The return value will never be 0. Also, it is not guaranteed to be
* unique.
*/
int GetIdentityHash();
@ -2042,6 +2079,24 @@ class V8EXPORT V8 {
*/
static void ResumeProfiler();
/**
* If logging is performed into a memory buffer (via --logfile=*), allows to
* retrieve previously written messages. This can be used for retrieving
* profiler log data in the application. This function is thread-safe.
*
* Caller provides a destination buffer that must exist during GetLogLines
* call. Only whole log lines are copied into the buffer.
*
* \param from_pos specified a point in a buffer to read from, 0 is the
* beginning of a buffer. It is assumed that caller updates its current
* position using returned size value from the previous call.
* \param dest_buf destination buffer for log data.
* \param max_size size of the destination buffer.
* \returns actual size of log data copied into buffer.
*/
static int GetLogLines(int from_pos, char* dest_buf, int max_size);
/**
* Releases any resources used by v8 and stops any utility threads
* that may be running. Note that disposing v8 is permanent, it
@ -2222,6 +2277,14 @@ class V8EXPORT Context {
/** Returns true if V8 has a current context. */
static bool InContext();
/**
* Associate an additional data object with the context. This is mainly used
* with the debugger to provide additional information on the context through
* the debugger API.
*/
void SetData(Handle<Value> data);
Local<Value> GetData();
/**
* Stack-allocated class which sets the execution context for all
* operations executed within a local scope.

22
deps/v8/samples/shell.cc

@ -38,6 +38,7 @@ bool ExecuteString(v8::Handle<v8::String> source,
bool print_result,
bool report_exceptions);
v8::Handle<v8::Value> Print(const v8::Arguments& args);
v8::Handle<v8::Value> Read(const v8::Arguments& args);
v8::Handle<v8::Value> Load(const v8::Arguments& args);
v8::Handle<v8::Value> Quit(const v8::Arguments& args);
v8::Handle<v8::Value> Version(const v8::Arguments& args);
@ -52,6 +53,8 @@ int RunMain(int argc, char* argv[]) {
v8::Handle<v8::ObjectTemplate> global = v8::ObjectTemplate::New();
// Bind the global 'print' function to the C++ Print callback.
global->Set(v8::String::New("print"), v8::FunctionTemplate::New(Print));
// Bind the global 'read' function to the C++ Read callback.
global->Set(v8::String::New("read"), v8::FunctionTemplate::New(Read));
// Bind the global 'load' function to the C++ Load callback.
global->Set(v8::String::New("load"), v8::FunctionTemplate::New(Load));
// Bind the 'quit' function
@ -135,6 +138,25 @@ v8::Handle<v8::Value> Print(const v8::Arguments& args) {
}
// The callback that is invoked by v8 whenever the JavaScript 'read'
// function is called. This function loads the content of the file named in
// the argument into a JavaScript string.
v8::Handle<v8::Value> Read(const v8::Arguments& args) {
if (args.Length() != 1) {
return v8::ThrowException(v8::String::New("Bad parameters"));
}
v8::String::Utf8Value file(args[0]);
if (*file == NULL) {
return v8::ThrowException(v8::String::New("Error loading file"));
}
v8::Handle<v8::String> source = ReadFile(*file);
if (source.IsEmpty()) {
return v8::ThrowException(v8::String::New("Error loading file"));
}
return source;
}
// The callback that is invoked by v8 whenever the JavaScript 'load'
// function is called. Loads, compiles and executes its argument
// JavaScript file.

38
deps/v8/src/SConscript

@ -50,23 +50,36 @@ SOURCES = {
'scopeinfo.cc', 'scopes.cc', 'serialize.cc', 'snapshot-common.cc',
'spaces.cc', 'string-stream.cc', 'stub-cache.cc', 'token.cc', 'top.cc',
'unicode.cc', 'usage-analyzer.cc', 'utils.cc', 'v8-counters.cc',
'v8.cc', 'v8threads.cc', 'variables.cc', 'virtual-frame.cc', 'zone.cc'
'v8.cc', 'v8threads.cc', 'variables.cc', 'version.cc',
'virtual-frame.cc', 'zone.cc'
],
'arch:arm': [
'assembler-arm.cc', 'builtins-arm.cc', 'codegen-arm.cc', 'cpu-arm.cc',
'disasm-arm.cc', 'debug-arm.cc', 'frames-arm.cc', 'ic-arm.cc',
'jump-target-arm.cc', 'macro-assembler-arm.cc',
'regexp-macro-assembler-arm.cc', 'register-allocator-arm.cc',
'stub-cache-arm.cc', 'virtual-frame-arm.cc'
'arm/assembler-arm.cc', 'arm/builtins-arm.cc',
'arm/codegen-arm.cc', 'arm/cpu-arm.cc', 'arm/disasm-arm.cc',
'arm/debug-arm.cc', 'arm/frames-arm.cc', 'arm/ic-arm.cc',
'arm/jump-target-arm.cc', 'arm/macro-assembler-arm.cc',
'arm/regexp-macro-assembler-arm.cc',
'arm/register-allocator-arm.cc', 'arm/stub-cache-arm.cc',
'arm/virtual-frame-arm.cc'
],
'arch:ia32': [
'assembler-ia32.cc', 'builtins-ia32.cc', 'codegen-ia32.cc',
'cpu-ia32.cc', 'disasm-ia32.cc', 'debug-ia32.cc', 'frames-ia32.cc',
'ic-ia32.cc', 'jump-target-ia32.cc', 'macro-assembler-ia32.cc',
'regexp-macro-assembler-ia32.cc', 'register-allocator-ia32.cc',
'stub-cache-ia32.cc', 'virtual-frame-ia32.cc'
'ia32/assembler-ia32.cc', 'ia32/builtins-ia32.cc',
'ia32/codegen-ia32.cc', 'ia32/cpu-ia32.cc', 'ia32/disasm-ia32.cc',
'ia32/debug-ia32.cc', 'ia32/frames-ia32.cc', 'ia32/ic-ia32.cc',
'ia32/jump-target-ia32.cc', 'ia32/macro-assembler-ia32.cc',
'ia32/regexp-macro-assembler-ia32.cc',
'ia32/register-allocator-ia32.cc', 'ia32/stub-cache-ia32.cc',
'ia32/virtual-frame-ia32.cc'
],
'simulator:arm': ['simulator-arm.cc'],
'arch:x64': [
'x64/assembler-x64.cc', 'x64/builtins-x64.cc',
'x64/codegen-x64.cc', 'x64/cpu-x64.cc', 'x64/disasm-x64.cc',
'x64/debug-x64.cc', 'x64/frames-x64.cc', 'x64/ic-x64.cc',
'x64/jump-target-x64.cc', 'x64/macro-assembler-x64.cc',
# 'x64/regexp-macro-assembler-x64.cc',
'x64/stub-cache-x64.cc'
],
'simulator:arm': ['arm/simulator-arm.cc'],
'os:freebsd': ['platform-freebsd.cc', 'platform-posix.cc'],
'os:linux': ['platform-linux.cc', 'platform-posix.cc'],
'os:android': ['platform-linux.cc', 'platform-posix.cc'],
@ -121,6 +134,7 @@ debug-delay.js
mirror-delay.js
date-delay.js
regexp-delay.js
json-delay.js
'''.split()

37
deps/v8/src/accessors.cc

@ -251,6 +251,24 @@ const AccessorDescriptor Accessors::ScriptColumnOffset = {
};
//
// Accessors::ScriptData
//
Object* Accessors::ScriptGetData(Object* object, void*) {
Object* script = JSValue::cast(object)->value();
return Script::cast(script)->data();
}
const AccessorDescriptor Accessors::ScriptData = {
ScriptGetData,
IllegalSetter,
0
};
//
// Accessors::ScriptType
//
@ -289,6 +307,25 @@ const AccessorDescriptor Accessors::ScriptLineEnds = {
};
//
// Accessors::ScriptGetContextData
//
Object* Accessors::ScriptGetContextData(Object* object, void*) {
HandleScope scope;
Handle<Script> script(Script::cast(JSValue::cast(object)->value()));
return script->context_data();
}
const AccessorDescriptor Accessors::ScriptContextData = {
ScriptGetContextData,
IllegalSetter,
0
};
//
// Accessors::FunctionPrototype
//

4
deps/v8/src/accessors.h

@ -45,8 +45,10 @@ namespace v8 { namespace internal {
V(ScriptId) \
V(ScriptLineOffset) \
V(ScriptColumnOffset) \
V(ScriptData) \
V(ScriptType) \
V(ScriptLineEnds) \
V(ScriptContextData) \
V(ObjectPrototype)
// Accessors contains all predefined proxy accessors.
@ -84,8 +86,10 @@ class Accessors : public AllStatic {
static Object* ScriptGetSource(Object* object, void*);
static Object* ScriptGetLineOffset(Object* object, void*);
static Object* ScriptGetColumnOffset(Object* object, void*);
static Object* ScriptGetData(Object* object, void*);
static Object* ScriptGetType(Object* object, void*);
static Object* ScriptGetLineEnds(Object* object, void*);
static Object* ScriptGetContextData(Object* object, void*);
static Object* ObjectGetPrototype(Object* receiver, void*);
static Object* ObjectSetPrototype(JSObject* receiver, Object* value, void*);

185
deps/v8/src/api.cc

@ -37,6 +37,7 @@
#include "serialize.h"
#include "snapshot.h"
#include "v8threads.h"
#include "version.h"
#define LOG_API(expr) LOG(ApiEntryCall(expr))
@ -444,6 +445,40 @@ void Context::Exit() {
}
void Context::SetData(v8::Handle<Value> data) {
if (IsDeadCheck("v8::Context::SetData()")) return;
ENTER_V8;
{
HandleScope scope;
i::Handle<i::Context> env = Utils::OpenHandle(this);
i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
ASSERT(env->IsGlobalContext());
if (env->IsGlobalContext()) {
env->set_data(*raw_data);
}
}
}
v8::Local<v8::Value> Context::GetData() {
if (IsDeadCheck("v8::Context::GetData()")) return v8::Local<Value>();
ENTER_V8;
i::Object* raw_result = NULL;
{
HandleScope scope;
i::Handle<i::Context> env = Utils::OpenHandle(this);
ASSERT(env->IsGlobalContext());
if (env->IsGlobalContext()) {
raw_result = env->data();
} else {
return Local<Value>();
}
}
i::Handle<i::Object> result(raw_result);
return Utils::ToLocal(result);
}
void** v8::HandleScope::RawClose(void** value) {
if (!ApiCheck(!is_closed_,
"v8::HandleScope::Close()",
@ -1108,6 +1143,19 @@ Local<Value> Script::Id() {
}
void Script::SetData(v8::Handle<Value> data) {
ON_BAILOUT("v8::Script::SetData()", return);
LOG_API("Script::SetData");
{
HandleScope scope;
i::Handle<i::JSFunction> fun = Utils::OpenHandle(this);
i::Handle<i::Object> raw_data = Utils::OpenHandle(*data);
i::Handle<i::Script> script(i::Script::cast(fun->shared()->script()));
script->set_data(*raw_data);
}
}
// --- E x c e p t i o n s ---
@ -1199,6 +1247,22 @@ v8::Handle<Value> Message::GetScriptResourceName() const {
}
v8::Handle<Value> Message::GetScriptData() const {
if (IsDeadCheck("v8::Message::GetScriptResourceData()")) {
return Local<Value>();
}
ENTER_V8;
HandleScope scope;
i::Handle<i::JSObject> obj =
i::Handle<i::JSObject>::cast(Utils::OpenHandle(this));
// Return this.script.data.
i::Handle<i::JSValue> script =
i::Handle<i::JSValue>::cast(GetProperty(obj, "script"));
i::Handle<i::Object> data(i::Script::cast(script->value())->data());
return scope.Close(Utils::ToLocal(data));
}
static i::Handle<i::Object> CallV8HeapFunction(const char* name,
i::Handle<i::Object> recv,
int argc,
@ -1806,6 +1870,26 @@ bool v8::Object::Set(v8::Handle<Value> key, v8::Handle<Value> value,
}
bool v8::Object::ForceSet(v8::Handle<Value> key,
v8::Handle<Value> value,
v8::PropertyAttribute attribs) {
ON_BAILOUT("v8::Object::ForceSet()", return false);
ENTER_V8;
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
i::Handle<i::Object> key_obj = Utils::OpenHandle(*key);
i::Handle<i::Object> value_obj = Utils::OpenHandle(*value);
EXCEPTION_PREAMBLE();
i::Handle<i::Object> obj = i::ForceSetProperty(
self,
key_obj,
value_obj,
static_cast<PropertyAttributes>(attribs));
has_pending_exception = obj.is_null();
EXCEPTION_BAILOUT_CHECK(false);
return true;
}
Local<Value> v8::Object::Get(v8::Handle<Value> key) {
ON_BAILOUT("v8::Object::Get()", return Local<v8::Value>());
ENTER_V8;
@ -2023,7 +2107,12 @@ int v8::Object::GetIdentityHash() {
if (hash->IsSmi()) {
hash_value = i::Smi::cast(*hash)->value();
} else {
hash_value = random() & i::Smi::kMaxValue; // Limit range to fit a smi.
int attempts = 0;
do {
hash_value = random() & i::Smi::kMaxValue; // Limit range to fit a smi.
attempts++;
} while (hash_value == 0 && attempts < 30);
hash_value = hash_value != 0 ? hash_value : 1; // never return 0
i::SetProperty(hidden_props,
hash_symbol,
i::Handle<i::Object>(i::Smi::FromInt(hash_value)),
@ -2266,9 +2355,12 @@ v8::String::ExternalStringResource*
v8::String::GetExternalStringResource() const {
EnsureInitialized("v8::String::GetExternalStringResource()");
i::Handle<i::String> str = Utils::OpenHandle(this);
ASSERT(str->IsExternalTwoByteString());
void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
return reinterpret_cast<ExternalStringResource*>(resource);
if (i::StringShape(*str).IsExternalTwoByte()) {
void* resource = i::Handle<i::ExternalTwoByteString>::cast(str)->resource();
return reinterpret_cast<ExternalStringResource*>(resource);
} else {
return NULL;
}
}
@ -2276,9 +2368,12 @@ v8::String::ExternalAsciiStringResource*
v8::String::GetExternalAsciiStringResource() const {
EnsureInitialized("v8::String::GetExternalAsciiStringResource()");
i::Handle<i::String> str = Utils::OpenHandle(this);
ASSERT(str->IsExternalAsciiString());
void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
return reinterpret_cast<ExternalAsciiStringResource*>(resource);
if (i::StringShape(*str).IsExternalAscii()) {
void* resource = i::Handle<i::ExternalAsciiString>::cast(str)->resource();
return reinterpret_cast<ExternalAsciiStringResource*>(resource);
} else {
return NULL;
}
}
@ -2373,7 +2468,9 @@ bool v8::V8::Dispose() {
const char* v8::V8::GetVersion() {
return "1.1.10.4";
static v8::internal::EmbeddedVector<char, 128> buffer;
v8::internal::Version::GetString(buffer);
return buffer.start();
}
@ -2589,10 +2686,13 @@ Local<Value> v8::External::Wrap(void* data) {
ENTER_V8;
if ((reinterpret_cast<intptr_t>(data) & kAlignedPointerMask) == 0) {
uintptr_t data_ptr = reinterpret_cast<uintptr_t>(data);
int data_value = static_cast<int>(data_ptr >> kAlignedPointerShift);
intptr_t data_value =
static_cast<intptr_t>(data_ptr >> kAlignedPointerShift);
STATIC_ASSERT(sizeof(data_ptr) == sizeof(data_value));
i::Handle<i::Object> obj(i::Smi::FromInt(data_value));
return Utils::ToLocal(obj);
if (i::Smi::IsIntptrValid(data_value)) {
i::Handle<i::Object> obj(i::Smi::FromIntptr(data_value));
return Utils::ToLocal(obj);
}
}
return ExternalNewImpl(data);
}
@ -2603,7 +2703,8 @@ void* v8::External::Unwrap(v8::Handle<v8::Value> value) {
i::Handle<i::Object> obj = Utils::OpenHandle(*value);
if (obj->IsSmi()) {
// The external value was an aligned pointer.
uintptr_t result = i::Smi::cast(*obj)->value() << kAlignedPointerShift;
uintptr_t result = static_cast<uintptr_t>(
i::Smi::cast(*obj)->value()) << kAlignedPointerShift;
return reinterpret_cast<void*>(result);
}
return ExternalValueImpl(obj);
@ -3021,6 +3122,11 @@ void V8::ResumeProfiler() {
#endif
}
int V8::GetLogLines(int from_pos, char* dest_buf, int max_size) {
#ifdef ENABLE_LOGGING_AND_PROFILING
return i::Logger::GetLogLines(from_pos, dest_buf, max_size);
#endif
}
String::Utf8Value::Utf8Value(v8::Handle<v8::Value> obj) {
EnsureInitialized("v8::String::Utf8Value::Utf8Value()");
@ -3180,8 +3286,8 @@ Local<Value> Exception::Error(v8::Handle<v8::String> raw_message) {
// --- D e b u g S u p p o r t ---
bool Debug::SetDebugEventListener(DebugEventCallback that, Handle<Value> data) {
#ifdef ENABLE_DEBUGGER_SUPPORT
bool Debug::SetDebugEventListener(EventCallback that, Handle<Value> data) {
EnsureInitialized("v8::Debug::SetDebugEventListener()");
ON_BAILOUT("v8::Debug::SetDebugEventListener()", return false);
ENTER_V8;
@ -3211,31 +3317,54 @@ void Debug::DebugBreak() {
}
void Debug::SetMessageHandler(v8::DebugMessageHandler handler, void* data,
static v8::Debug::MessageHandler message_handler = NULL;
static void MessageHandlerWrapper(const v8::Debug::Message& message) {
if (message_handler) {
v8::String::Value json(message.GetJSON());
message_handler(*json, json.length(), message.GetClientData());
}
}
void Debug::SetMessageHandler(v8::Debug::MessageHandler handler,
bool message_handler_thread) {
EnsureInitialized("v8::Debug::SetMessageHandler");
ENTER_V8;
i::Debugger::SetMessageHandler(handler, data, message_handler_thread);
}
// Message handler thread not supported any more. Parameter temporally left in
// the API for client compatability reasons.
CHECK(!message_handler_thread);
void Debug::SendCommand(const uint16_t* command, int length) {
if (!i::V8::HasBeenSetup()) return;
i::Debugger::ProcessCommand(i::Vector<const uint16_t>(command, length));
// TODO(sgjesse) support the old message handler API through a simple wrapper.
message_handler = handler;
if (message_handler != NULL) {
i::Debugger::SetMessageHandler(MessageHandlerWrapper);
} else {
i::Debugger::SetMessageHandler(NULL);
}
}
void Debug::SetHostDispatchHandler(v8::DebugHostDispatchHandler handler,
void* data) {
EnsureInitialized("v8::Debug::SetHostDispatchHandler");
void Debug::SetMessageHandler2(v8::Debug::MessageHandler2 handler) {
EnsureInitialized("v8::Debug::SetMessageHandler");
ENTER_V8;
i::Debugger::SetHostDispatchHandler(handler, data);
i::Debugger::SetMessageHandler(handler);
}
void Debug::SendHostDispatch(void* dispatch) {
void Debug::SendCommand(const uint16_t* command, int length,
ClientData* client_data) {
if (!i::V8::HasBeenSetup()) return;
i::Debugger::ProcessHostDispatch(dispatch);
i::Debugger::ProcessCommand(i::Vector<const uint16_t>(command, length),
client_data);
}
void Debug::SetHostDispatchHandler(HostDispatchHandler handler,
int period) {
EnsureInitialized("v8::Debug::SetHostDispatchHandler");
ENTER_V8;
i::Debugger::SetHostDispatchHandler(handler, period);
}
@ -3263,7 +3392,7 @@ Handle<Value> Debug::Call(v8::Handle<v8::Function> fun,
bool Debug::EnableAgent(const char* name, int port) {
return i::Debugger::StartAgent(name, port);
}
#endif // ENABLE_DEBUGGER_SUPPORT
namespace internal {

8
deps/v8/src/assembler-arm-inl.h → deps/v8/src/arm/assembler-arm-inl.h

@ -34,10 +34,10 @@
// significantly by Google Inc.
// Copyright 2006-2008 the V8 project authors. All rights reserved.
#ifndef V8_ASSEMBLER_ARM_INL_H_
#define V8_ASSEMBLER_ARM_INL_H_
#ifndef V8_ARM_ASSEMBLER_ARM_INL_H_
#define V8_ARM_ASSEMBLER_ARM_INL_H_
#include "assembler-arm.h"
#include "arm/assembler-arm.h"
#include "cpu.h"
@ -246,4 +246,4 @@ void Assembler::set_target_address_at(Address pc, Address target) {
} } // namespace v8::internal
#endif // V8_ASSEMBLER_ARM_INL_H_
#endif // V8_ARM_ASSEMBLER_ARM_INL_H_

2
deps/v8/src/assembler-arm.cc → deps/v8/src/arm/assembler-arm.cc

@ -36,7 +36,7 @@
#include "v8.h"
#include "assembler-arm-inl.h"
#include "arm/assembler-arm-inl.h"
#include "serialize.h"
namespace v8 { namespace internal {

40
deps/v8/src/assembler-arm.h → deps/v8/src/arm/assembler-arm.h

@ -37,8 +37,8 @@
// A light-weight ARM Assembler
// Generates user mode instructions for the ARM architecture up to version 5
#ifndef V8_ASSEMBLER_ARM_H_
#define V8_ASSEMBLER_ARM_H_
#ifndef V8_ARM_ASSEMBLER_ARM_H_
#define V8_ARM_ASSEMBLER_ARM_H_
#include "assembler.h"
@ -164,23 +164,23 @@ enum Coprocessor {
// Condition field in instructions
enum Condition {
eq = 0 << 28,
ne = 1 << 28,
cs = 2 << 28,
hs = 2 << 28,
cc = 3 << 28,
lo = 3 << 28,
mi = 4 << 28,
pl = 5 << 28,
vs = 6 << 28,
vc = 7 << 28,
hi = 8 << 28,
ls = 9 << 28,
ge = 10 << 28,
lt = 11 << 28,
gt = 12 << 28,
le = 13 << 28,
al = 14 << 28
eq = 0 << 28, // Z set equal.
ne = 1 << 28, // Z clear not equal.
cs = 2 << 28, // C set unsigned higher or same.
hs = 2 << 28, // C set unsigned higher or same.
cc = 3 << 28, // C clear unsigned lower.
lo = 3 << 28, // C clear unsigned lower.
mi = 4 << 28, // N set negative.
pl = 5 << 28, // N clear positive or zero.
vs = 6 << 28, // V set overflow.
vc = 7 << 28, // V clear no overflow.
hi = 8 << 28, // C set, Z clear unsigned higher.
ls = 9 << 28, // C clear or Z set unsigned lower or same.
ge = 10 << 28, // N == V greater or equal.
lt = 11 << 28, // N != V less than.
gt = 12 << 28, // Z clear, N == V greater than.
le = 13 << 28, // Z set or N != V less then or equal
al = 14 << 28 // always.
};
@ -786,4 +786,4 @@ class Assembler : public Malloced {
} } // namespace v8::internal
#endif // V8_ASSEMBLER_ARM_H_
#endif // V8_ARM_ASSEMBLER_ARM_H_

63
deps/v8/src/builtins-arm.cc → deps/v8/src/arm/builtins-arm.cc

@ -34,7 +34,7 @@
namespace v8 { namespace internal {
#define __ masm->
#define __ ACCESS_MASM(masm)
void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
@ -58,6 +58,16 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- sp[...]: constructor arguments
// -----------------------------------
Label non_function_call;
// Check that the function is not a smi.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &non_function_call);
// Check that the function is a JSFunction.
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r2, Operand(JS_FUNCTION_TYPE));
__ b(ne, &non_function_call);
// Enter a construct frame.
__ EnterConstructFrame();
@ -169,7 +179,17 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ LeaveConstructFrame();
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize));
__ mov(pc, Operand(lr));
__ Jump(lr);
// r0: number of arguments
// r1: called object
__ bind(&non_function_call);
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
}
@ -218,8 +238,9 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
__ mov(r5, Operand(r4));
__ mov(r6, Operand(r4));
__ mov(r7, Operand(r4));
if (kR9Available == 1)
if (kR9Available == 1) {
__ mov(r9, Operand(r4));
}
// Invoke the code and pass argc as r0.
__ mov(r0, Operand(r3));
@ -234,7 +255,7 @@ static void Generate_JSEntryTrampolineHelper(MacroAssembler* masm,
// Exit the JS frame and remove the parameters (except function), and return.
// Respect ABI stack constraint.
__ LeaveInternalFrame();
__ mov(pc, lr);
__ Jump(lr);
// r0: result
}
@ -416,15 +437,35 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
__ push(r0);
__ InvokeBuiltin(Builtins::APPLY_PREPARE, CALL_JS);
// Eagerly check for stack-overflow before starting to push the arguments.
// r0: number of arguments
Label okay;
Label no_preemption, retry_preemption;
__ bind(&retry_preemption);
ExternalReference stack_guard_limit_address =
ExternalReference::address_of_stack_guard_limit();
__ mov(r2, Operand(stack_guard_limit_address));
__ ldr(r2, MemOperand(r2));
__ cmp(sp, r2);
__ b(hi, &no_preemption);
// We have encountered a preemption or stack overflow already before we push
// the array contents. Save r0 which is the Smi-tagged length of the array.
__ push(r0);
// Runtime routines expect at least one argument, so give it a Smi.
__ mov(r0, Operand(Smi::FromInt(0)));
__ push(r0);
__ CallRuntime(Runtime::kStackGuard, 1);
// Since we returned, it wasn't a stack overflow. Restore r0 and try again.
__ pop(r0);
__ b(&retry_preemption);
__ bind(&no_preemption);
// Eagerly check for stack-overflow before starting to push the arguments.
// r0: number of arguments.
// r2: stack limit.
Label okay;
__ sub(r2, sp, r2);
__ sub(r2, r2, Operand(3 * kPointerSize)); // limit, index, receiver
__ cmp(r2, Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
__ b(hi, &okay);
@ -523,7 +564,7 @@ void Builtins::Generate_FunctionApply(MacroAssembler* masm) {
// Tear down the internal frame and remove function, receiver and args.
__ LeaveInternalFrame();
__ add(sp, sp, Operand(3 * kPointerSize));
__ mov(pc, lr);
__ Jump(lr);
}
@ -642,14 +683,14 @@ void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// Exit frame and return.
LeaveArgumentsAdaptorFrame(masm);
__ mov(pc, lr);
__ Jump(lr);
// -------------------------------------------
// Dont adapt arguments.
// -------------------------------------------
__ bind(&dont_adapt_arguments);
__ mov(pc, r3);
__ Jump(r3);
}

538
deps/v8/src/codegen-arm.cc → deps/v8/src/arm/codegen-arm.cc

@ -38,7 +38,8 @@
namespace v8 { namespace internal {
#define __ masm_->
#define __ ACCESS_MASM(masm_)
// -------------------------------------------------------------------------
// CodeGenState implementation.
@ -146,13 +147,13 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
frame_->EmitPush(r0);
frame_->CallRuntime(Runtime::kNewContext, 1); // r0 holds the result
if (kDebug) {
JumpTarget verified_true(this);
__ cmp(r0, Operand(cp));
verified_true.Branch(eq);
__ stop("NewContext: r0 is expected to be the same as cp");
verified_true.Bind();
}
#ifdef DEBUG
JumpTarget verified_true(this);
__ cmp(r0, Operand(cp));
verified_true.Branch(eq);
__ stop("NewContext: r0 is expected to be the same as cp");
verified_true.Bind();
#endif
// Update context local.
__ str(cp, frame_->Context());
}
@ -276,7 +277,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
frame_->Exit();
__ add(sp, sp, Operand((scope_->num_parameters() + 1) * kPointerSize));
__ mov(pc, lr);
__ Jump(lr);
}
// Code generation state must be reset.
@ -653,37 +654,27 @@ void CodeGenerator::ToBoolean(JumpTarget* true_target,
}
class GetPropertyStub : public CodeStub {
public:
GetPropertyStub() { }
private:
Major MajorKey() { return GetProperty; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class SetPropertyStub : public CodeStub {
public:
SetPropertyStub() { }
private:
Major MajorKey() { return SetProperty; }
int MinorKey() { return 0; }
void Generate(MacroAssembler* masm);
};
class GenericBinaryOpStub : public CodeStub {
public:
explicit GenericBinaryOpStub(Token::Value op) : op_(op) { }
GenericBinaryOpStub(Token::Value op,
OverwriteMode mode)
: op_(op), mode_(mode) { }
private:
Token::Value op_;
OverwriteMode mode_;
// Minor key encoding in 16 bits.
class ModeBits: public BitField<OverwriteMode, 0, 2> {};
class OpBits: public BitField<Token::Value, 2, 14> {};
Major MajorKey() { return GenericBinaryOp; }
int MinorKey() { return static_cast<int>(op_); }
int MinorKey() {
// Encode the parameters in a unique 16 bit value.
return OpBits::encode(op_)
| ModeBits::encode(mode_);
}
void Generate(MacroAssembler* masm);
const char* GetName() {
@ -708,7 +699,8 @@ class GenericBinaryOpStub : public CodeStub {
};
void CodeGenerator::GenericBinaryOperation(Token::Value op) {
void CodeGenerator::GenericBinaryOperation(Token::Value op,
OverwriteMode overwrite_mode) {
VirtualFrame::SpilledScope spilled_scope(this);
// sp[0] : y
// sp[1] : x
@ -727,7 +719,7 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op) {
case Token::SAR: {
frame_->EmitPop(r0); // r0 : y
frame_->EmitPop(r1); // r1 : x
GenericBinaryOpStub stub(op);
GenericBinaryOpStub stub(op, overwrite_mode);
frame_->CallStub(&stub, 0);
break;
}
@ -767,11 +759,13 @@ class DeferredInlineSmiOperation: public DeferredCode {
DeferredInlineSmiOperation(CodeGenerator* generator,
Token::Value op,
int value,
bool reversed)
bool reversed,
OverwriteMode overwrite_mode)
: DeferredCode(generator),
op_(op),
value_(value),
reversed_(reversed) {
reversed_(reversed),
overwrite_mode_(overwrite_mode) {
set_comment("[ DeferredInlinedSmiOperation");
}
@ -781,6 +775,7 @@ class DeferredInlineSmiOperation: public DeferredCode {
Token::Value op_;
int value_;
bool reversed_;
OverwriteMode overwrite_mode_;
};
@ -844,7 +839,7 @@ void DeferredInlineSmiOperation::Generate() {
break;
}
GenericBinaryOpStub igostub(op_);
GenericBinaryOpStub igostub(op_, overwrite_mode_);
Result arg0 = generator()->allocator()->Allocate(r1);
ASSERT(arg0.is_valid());
Result arg1 = generator()->allocator()->Allocate(r0);
@ -856,7 +851,8 @@ void DeferredInlineSmiOperation::Generate() {
void CodeGenerator::SmiOperation(Token::Value op,
Handle<Object> value,
bool reversed) {
bool reversed,
OverwriteMode mode) {
VirtualFrame::SpilledScope spilled_scope(this);
// NOTE: This is an attempt to inline (a bit) more of the code for
// some possible smi operations (like + and -) when (at least) one
@ -875,7 +871,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
switch (op) {
case Token::ADD: {
DeferredCode* deferred =
new DeferredInlineSmiOperation(this, op, int_value, reversed);
new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
__ add(r0, r0, Operand(value), SetCC);
deferred->enter()->Branch(vs);
@ -887,7 +883,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
case Token::SUB: {
DeferredCode* deferred =
new DeferredInlineSmiOperation(this, op, int_value, reversed);
new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
if (!reversed) {
__ sub(r0, r0, Operand(value), SetCC);
@ -905,7 +901,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
case Token::BIT_XOR:
case Token::BIT_AND: {
DeferredCode* deferred =
new DeferredInlineSmiOperation(this, op, int_value, reversed);
new DeferredInlineSmiOperation(this, op, int_value, reversed, mode);
__ tst(r0, Operand(kSmiTagMask));
deferred->enter()->Branch(ne);
switch (op) {
@ -925,12 +921,12 @@ void CodeGenerator::SmiOperation(Token::Value op,
__ mov(ip, Operand(value));
frame_->EmitPush(ip);
frame_->EmitPush(r0);
GenericBinaryOperation(op);
GenericBinaryOperation(op, mode);
} else {
int shift_value = int_value & 0x1f; // least significant 5 bits
DeferredCode* deferred =
new DeferredInlineSmiOperation(this, op, shift_value, false);
new DeferredInlineSmiOperation(this, op, shift_value, false, mode);
__ tst(r0, Operand(kSmiTagMask));
deferred->enter()->Branch(ne);
__ mov(r2, Operand(r0, ASR, kSmiTagSize)); // remove tags
@ -982,7 +978,7 @@ void CodeGenerator::SmiOperation(Token::Value op,
frame_->EmitPush(ip);
frame_->EmitPush(r0);
}
GenericBinaryOperation(op);
GenericBinaryOperation(op, mode);
break;
}
@ -1427,13 +1423,13 @@ void CodeGenerator::VisitWithEnterStatement(WithEnterStatement* node) {
} else {
frame_->CallRuntime(Runtime::kPushContext, 1);
}
if (kDebug) {
JumpTarget verified_true(this);
__ cmp(r0, Operand(cp));
verified_true.Branch(eq);
__ stop("PushContext: r0 is expected to be the same as cp");
verified_true.Bind();
}
#ifdef DEBUG
JumpTarget verified_true(this);
__ cmp(r0, Operand(cp));
verified_true.Branch(eq);
__ stop("PushContext: r0 is expected to be the same as cp");
verified_true.Bind();
#endif
// Update context local.
__ str(cp, frame_->Context());
ASSERT(frame_->height() == original_height);
@ -1487,8 +1483,8 @@ void CodeGenerator::GenerateFastCaseSwitchJumpTable(
// Test for a Smi value in a HeapNumber.
__ tst(r0, Operand(kSmiTagMask));
is_smi.Branch(eq);
__ ldr(r1, MemOperand(r0, HeapObject::kMapOffset - kHeapObjectTag));
__ ldrb(r1, MemOperand(r1, Map::kInstanceTypeOffset - kHeapObjectTag));
__ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset));
__ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset));
__ cmp(r1, Operand(HEAP_NUMBER_TYPE));
default_target->Branch(ne);
frame_->EmitPush(r0);
@ -2339,7 +2335,9 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
VirtualFrame::SpilledScope spilled_scope(this);
Comment cmnt(masm_, "[ DebuggerStatament");
CodeForStatementPosition(node);
#ifdef ENABLE_DEBUGGER_SUPPORT
frame_->CallRuntime(Runtime::kDebugBreak, 0);
#endif
// Ignore the return value.
ASSERT(frame_->height() == original_height);
}
@ -2523,7 +2521,9 @@ void CodeGenerator::LoadFromGlobalSlotCheckExtensions(Slot* slot,
if (s->is_eval_scope()) {
Label next, fast;
if (!context.is(tmp)) __ mov(tmp, Operand(context));
if (!context.is(tmp)) {
__ mov(tmp, Operand(context));
}
__ bind(&next);
// Terminate at global context.
__ ldr(tmp2, FieldMemOperand(tmp, HeapObject::kMapOffset));
@ -2934,15 +2934,24 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
LoadAndSpill(node->value());
} else {
// +=, *= and similar binary assignments.
// Get the old value of the lhs.
target.GetValueAndSpill(NOT_INSIDE_TYPEOF);
Literal* literal = node->value()->AsLiteral();
bool overwrite =
(node->value()->AsBinaryOperation() != NULL &&
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
if (literal != NULL && literal->handle()->IsSmi()) {
SmiOperation(node->binary_op(), literal->handle(), false);
SmiOperation(node->binary_op(),
literal->handle(),
false,
overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
frame_->EmitPush(r0);
} else {
LoadAndSpill(node->value());
GenericBinaryOperation(node->binary_op());
GenericBinaryOperation(node->binary_op(),
overwrite ? OVERWRITE_RIGHT : NO_OVERWRITE);
frame_->EmitPush(r0);
}
}
@ -3822,19 +3831,39 @@ void CodeGenerator::VisitBinaryOperation(BinaryOperation* node) {
// is a literal small integer.
Literal* lliteral = node->left()->AsLiteral();
Literal* rliteral = node->right()->AsLiteral();
// NOTE: The code below assumes that the slow cases (calls to runtime)
// never return a constant/immutable object.
bool overwrite_left =
(node->left()->AsBinaryOperation() != NULL &&
node->left()->AsBinaryOperation()->ResultOverwriteAllowed());
bool overwrite_right =
(node->right()->AsBinaryOperation() != NULL &&
node->right()->AsBinaryOperation()->ResultOverwriteAllowed());
if (rliteral != NULL && rliteral->handle()->IsSmi()) {
LoadAndSpill(node->left());
SmiOperation(node->op(), rliteral->handle(), false);
SmiOperation(node->op(),
rliteral->handle(),
false,
overwrite_right ? OVERWRITE_RIGHT : NO_OVERWRITE);
} else if (lliteral != NULL && lliteral->handle()->IsSmi()) {
LoadAndSpill(node->right());
SmiOperation(node->op(), lliteral->handle(), true);
SmiOperation(node->op(),
lliteral->handle(),
true,
overwrite_left ? OVERWRITE_LEFT : NO_OVERWRITE);
} else {
OverwriteMode overwrite_mode = NO_OVERWRITE;
if (overwrite_left) {
overwrite_mode = OVERWRITE_LEFT;
} else if (overwrite_right) {
overwrite_mode = OVERWRITE_RIGHT;
}
LoadAndSpill(node->left());
LoadAndSpill(node->right());
GenericBinaryOperation(node->op());
GenericBinaryOperation(node->op(), overwrite_mode);
}
frame_->EmitPush(r0);
}
@ -4067,7 +4096,8 @@ bool CodeGenerator::HasValidEntryRegisters() { return true; }
#undef __
#define __ masm->
#define __ ACCESS_MASM(masm)
Handle<String> Reference::GetName() {
ASSERT(type_ == NAMED);
@ -4305,167 +4335,80 @@ void Reference::SetValue(InitState init_state) {
}
void GetPropertyStub::Generate(MacroAssembler* masm) {
// sp[0]: key
// sp[1]: receiver
Label slow, fast;
// Get the key and receiver object from the stack.
__ ldm(ia, sp, r0.bit() | r1.bit());
// Check that the key is a smi.
__ tst(r0, Operand(kSmiTagMask));
__ b(ne, &slow);
__ mov(r0, Operand(r0, ASR, kSmiTagSize));
// Check that the object isn't a smi.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &slow);
// Check that the object is some kind of JS object EXCEPT JS Value type.
// In the case that the object is a value-wrapper object,
// we enter the runtime system to make sure that indexing into string
// objects work as intended.
ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE);
__ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset));
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r2, Operand(JS_OBJECT_TYPE));
__ b(lt, &slow);
// Get the elements array of the object.
__ ldr(r1, FieldMemOperand(r1, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
__ cmp(r3, Operand(Factory::hash_table_map()));
__ b(eq, &slow);
// Check that the key (index) is within bounds.
__ ldr(r3, FieldMemOperand(r1, Array::kLengthOffset));
__ cmp(r0, Operand(r3));
__ b(lo, &fast);
// Slow case: Push extra copies of the arguments (2).
__ bind(&slow);
__ ldm(ia, sp, r0.bit() | r1.bit());
__ stm(db_w, sp, r0.bit() | r1.bit());
// Do tail-call to runtime routine.
__ TailCallRuntime(ExternalReference(Runtime::kGetProperty), 2);
// Fast case: Do the load.
__ bind(&fast);
__ add(r3, r1, Operand(Array::kHeaderSize - kHeapObjectTag));
__ ldr(r0, MemOperand(r3, r0, LSL, kPointerSizeLog2));
__ cmp(r0, Operand(Factory::the_hole_value()));
// In case the loaded value is the_hole we have to consult GetProperty
// to ensure the prototype chain is searched.
__ b(eq, &slow);
__ StubReturn(1);
}
void SetPropertyStub::Generate(MacroAssembler* masm) {
// r0 : value
// sp[0] : key
// sp[1] : receiver
Label slow, fast, array, extra, exit;
// Get the key and the object from the stack.
__ ldm(ia, sp, r1.bit() | r3.bit()); // r1 = key, r3 = receiver
// Check that the key is a smi.
__ tst(r1, Operand(kSmiTagMask));
__ b(ne, &slow);
// Check that the object isn't a smi.
__ tst(r3, Operand(kSmiTagMask));
__ b(eq, &slow);
// Get the type of the object from its map.
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ ldrb(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset));
// Check if the object is a JS array or not.
__ cmp(r2, Operand(JS_ARRAY_TYPE));
__ b(eq, &array);
// Check that the object is some kind of JS object.
__ cmp(r2, Operand(FIRST_JS_OBJECT_TYPE));
__ b(lt, &slow);
// Object case: Check key against length in the elements array.
__ ldr(r3, FieldMemOperand(r3, JSObject::kElementsOffset));
// Check that the object is in fast mode (not dictionary).
__ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
__ cmp(r2, Operand(Factory::hash_table_map()));
__ b(eq, &slow);
// Untag the key (for checking against untagged length in the fixed array).
__ mov(r1, Operand(r1, ASR, kSmiTagSize));
// Compute address to store into and check array bounds.
__ add(r2, r3, Operand(Array::kHeaderSize - kHeapObjectTag));
__ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2));
__ ldr(ip, FieldMemOperand(r3, Array::kLengthOffset));
__ cmp(r1, Operand(ip));
__ b(lo, &fast);
// Slow case: Push extra copies of the arguments (3).
static void HandleBinaryOpSlowCases(MacroAssembler* masm,
Label* not_smi,
const Builtins::JavaScript& builtin,
Token::Value operation,
int swi_number,
OverwriteMode mode) {
Label slow;
if (mode == NO_OVERWRITE) {
__ bind(not_smi);
}
__ bind(&slow);
__ ldm(ia, sp, r1.bit() | r3.bit()); // r0 == value, r1 == key, r3 == object
__ stm(db_w, sp, r0.bit() | r1.bit() | r3.bit());
// Do tail-call to runtime routine.
__ TailCallRuntime(ExternalReference(Runtime::kSetProperty), 3);
// Extra capacity case: Check if there is extra capacity to
// perform the store and update the length. Used for adding one
// element to the array by writing to array[array.length].
// r0 == value, r1 == key, r2 == elements, r3 == object
__ bind(&extra);
__ b(ne, &slow); // do not leave holes in the array
__ mov(r1, Operand(r1, ASR, kSmiTagSize)); // untag
__ ldr(ip, FieldMemOperand(r2, Array::kLengthOffset));
__ cmp(r1, Operand(ip));
__ b(hs, &slow);
__ mov(r1, Operand(r1, LSL, kSmiTagSize)); // restore tag
__ add(r1, r1, Operand(1 << kSmiTagSize)); // and increment
__ str(r1, FieldMemOperand(r3, JSArray::kLengthOffset));
__ mov(r3, Operand(r2));
// NOTE: Computing the address to store into must take the fact
// that the key has been incremented into account.
int displacement = Array::kHeaderSize - kHeapObjectTag -
((1 << kSmiTagSize) * 2);
__ add(r2, r2, Operand(displacement));
__ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
__ b(&fast);
// Array case: Get the length and the elements array from the JS
// array. Check that the array is in fast mode; if it is the
// length is always a smi.
// r0 == value, r3 == object
__ bind(&array);
__ ldr(r2, FieldMemOperand(r3, JSObject::kElementsOffset));
__ ldr(r1, FieldMemOperand(r2, HeapObject::kMapOffset));
__ cmp(r1, Operand(Factory::hash_table_map()));
__ b(eq, &slow);
__ push(r1);
__ push(r0);
__ mov(r0, Operand(1)); // Set number of arguments.
__ InvokeBuiltin(builtin, JUMP_JS); // Tail call.
// Check the key against the length in the array, compute the
// address to store into and fall through to fast case.
__ ldr(r1, MemOperand(sp));
// r0 == value, r1 == key, r2 == elements, r3 == object.
__ ldr(ip, FieldMemOperand(r3, JSArray::kLengthOffset));
__ cmp(r1, Operand(ip));
__ b(hs, &extra);
__ mov(r3, Operand(r2));
__ add(r2, r2, Operand(Array::kHeaderSize - kHeapObjectTag));
__ add(r2, r2, Operand(r1, LSL, kPointerSizeLog2 - kSmiTagSize));
// Fast case: Do the store.
// r0 == value, r2 == address to store into, r3 == elements
__ bind(&fast);
__ str(r0, MemOperand(r2));
// Skip write barrier if the written value is a smi.
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &exit);
// Update write barrier for the elements array address.
__ sub(r1, r2, Operand(r3));
__ RecordWrite(r3, r1, r2);
__ bind(&exit);
__ StubReturn(1);
// Could it be a double-double op? If we already have a place to put
// the answer then we can do the op and skip the builtin and runtime call.
if (mode != NO_OVERWRITE) {
__ bind(not_smi);
__ tst(r0, Operand(kSmiTagMask));
__ b(eq, &slow); // We can't handle a Smi-double combination yet.
__ tst(r1, Operand(kSmiTagMask));
__ b(eq, &slow); // We can't handle a Smi-double combination yet.
// Get map of r0 into r2.
__ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset));
// Get type of r0 into r3.
__ ldrb(r3, FieldMemOperand(r2, Map::kInstanceTypeOffset));
__ cmp(r3, Operand(HEAP_NUMBER_TYPE));
__ b(ne, &slow);
// Get type of r1 into r3.
__ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
// Check they are both the same map (heap number map).
__ cmp(r2, r3);
__ b(ne, &slow);
// Both are doubles.
// Calling convention says that second double is in r2 and r3.
__ ldr(r2, FieldMemOperand(r0, HeapNumber::kValueOffset));
__ ldr(r3, FieldMemOperand(r0, HeapNumber::kValueOffset + kPointerSize));
__ push(lr);
if (mode == OVERWRITE_LEFT) {
__ push(r1);
} else {
__ push(r0);
}
// Calling convention says that first double is in r0 and r1.
__ ldr(r0, FieldMemOperand(r1, HeapNumber::kValueOffset));
__ ldr(r1, FieldMemOperand(r1, HeapNumber::kValueOffset + kPointerSize));
// Call C routine that may not cause GC or other trouble.
__ mov(r5, Operand(ExternalReference::double_fp_operation(operation)));
#if !defined(__arm__)
// Notify the simulator that we are calling an add routine in C.
__ swi(swi_number);
#else
// Actually call the add routine written in C.
__ Call(r5);
#endif
// Store answer in the overwritable heap number.
__ pop(r4);
#if !defined(__ARM_EABI__) && defined(__arm__)
// Double returned in fp coprocessor register 0 and 1, encoded as register
// cr8. Offsets must be divisible by 4 for coprocessor so we need to
// substract the tag from r4.
__ sub(r5, r4, Operand(kHeapObjectTag));
__ stc(p1, cr8, MemOperand(r5, HeapNumber::kValueOffset));
#else
// Double returned in fp coprocessor register 0 and 1.
__ str(r0, FieldMemOperand(r4, HeapNumber::kValueOffset));
__ str(r1, FieldMemOperand(r4, HeapNumber::kValueOffset + kPointerSize));
#endif
__ mov(r0, Operand(r4));
// And we are done.
__ pop(pc);
}
}
@ -4474,89 +4417,84 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// r0 : y
// result : r0
// All ops need to know whether we are dealing with two Smis. Set up r2 to
// tell us that.
__ orr(r2, r1, Operand(r0)); // r2 = x | y;
switch (op_) {
case Token::ADD: {
Label slow, exit;
// fast path
__ orr(r2, r1, Operand(r0)); // r2 = x | y;
__ add(r0, r1, Operand(r0), SetCC); // add y optimistically
// go slow-path in case of overflow
__ b(vs, &slow);
// go slow-path in case of non-smi operands
ASSERT(kSmiTag == 0); // adjust code below
Label not_smi;
// Fast path.
ASSERT(kSmiTag == 0); // Adjust code below.
__ tst(r2, Operand(kSmiTagMask));
__ b(eq, &exit);
// slow path
__ bind(&slow);
__ sub(r0, r0, Operand(r1)); // revert optimistic add
__ push(r1);
__ push(r0);
__ mov(r0, Operand(1)); // set number of arguments
__ InvokeBuiltin(Builtins::ADD, JUMP_JS);
// done
__ bind(&exit);
__ b(ne, &not_smi);
__ add(r0, r1, Operand(r0), SetCC); // Add y optimistically.
// Return if no overflow.
__ Ret(vc);
__ sub(r0, r0, Operand(r1)); // Revert optimistic add.
HandleBinaryOpSlowCases(masm,
&not_smi,
Builtins::ADD,
Token::ADD,
assembler::arm::simulator_fp_add,
mode_);
break;
}
case Token::SUB: {
Label slow, exit;
// fast path
__ orr(r2, r1, Operand(r0)); // r2 = x | y;
__ sub(r3, r1, Operand(r0), SetCC); // subtract y optimistically
// go slow-path in case of overflow
__ b(vs, &slow);
// go slow-path in case of non-smi operands
ASSERT(kSmiTag == 0); // adjust code below
Label not_smi;
// Fast path.
ASSERT(kSmiTag == 0); // Adjust code below.
__ tst(r2, Operand(kSmiTagMask));
__ mov(r0, Operand(r3), LeaveCC, eq); // conditionally set r0 to result
__ b(eq, &exit);
// slow path
__ bind(&slow);
__ push(r1);
__ push(r0);
__ mov(r0, Operand(1)); // set number of arguments
__ InvokeBuiltin(Builtins::SUB, JUMP_JS);
// done
__ bind(&exit);
__ b(ne, &not_smi);
__ sub(r0, r1, Operand(r0), SetCC); // Subtract y optimistically.
// Return if no overflow.
__ Ret(vc);
__ sub(r0, r1, Operand(r0)); // Revert optimistic subtract.
HandleBinaryOpSlowCases(masm,
&not_smi,
Builtins::SUB,
Token::SUB,
assembler::arm::simulator_fp_sub,
mode_);
break;
}
case Token::MUL: {
Label slow, exit;
// tag check
__ orr(r2, r1, Operand(r0)); // r2 = x | y;
Label not_smi, slow;
ASSERT(kSmiTag == 0); // adjust code below
__ tst(r2, Operand(kSmiTagMask));
__ b(ne, &slow);
// remove tag from one operand (but keep sign), so that result is smi
__ b(ne, &not_smi);
// Remove tag from one operand (but keep sign), so that result is Smi.
__ mov(ip, Operand(r0, ASR, kSmiTagSize));
// do multiplication
__ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1
// go slow on overflows (overflow bit is not set)
// Do multiplication
__ smull(r3, r2, r1, ip); // r3 = lower 32 bits of ip*r1.
// Go slow on overflows (overflow bit is not set).
__ mov(ip, Operand(r3, ASR, 31));
__ cmp(ip, Operand(r2)); // no overflow if higher 33 bits are identical
__ b(ne, &slow);
// go slow on zero result to handle -0
// Go slow on zero result to handle -0.
__ tst(r3, Operand(r3));
__ mov(r0, Operand(r3), LeaveCC, ne);
__ b(ne, &exit);
// slow case
__ Ret(ne);
// Slow case.
__ bind(&slow);
__ push(r1);
__ push(r0);
__ mov(r0, Operand(1)); // set number of arguments
__ InvokeBuiltin(Builtins::MUL, JUMP_JS);
// done
__ bind(&exit);
HandleBinaryOpSlowCases(masm,
&not_smi,
Builtins::MUL,
Token::MUL,
assembler::arm::simulator_fp_mul,
mode_);
break;
}
case Token::BIT_OR:
case Token::BIT_AND:
case Token::BIT_XOR: {
Label slow, exit;
// tag check
__ orr(r2, r1, Operand(r0)); // r2 = x | y;
Label slow;
ASSERT(kSmiTag == 0); // adjust code below
__ tst(r2, Operand(kSmiTagMask));
__ b(ne, &slow);
@ -4566,7 +4504,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::BIT_XOR: __ eor(r0, r0, Operand(r1)); break;
default: UNREACHABLE();
}
__ b(&exit);
__ Ret();
__ bind(&slow);
__ push(r1); // restore stack
__ push(r0);
@ -4584,16 +4522,13 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
default:
UNREACHABLE();
}
__ bind(&exit);
break;
}
case Token::SHL:
case Token::SHR:
case Token::SAR: {
Label slow, exit;
// tag check
__ orr(r2, r1, Operand(r0)); // r2 = x | y;
Label slow;
ASSERT(kSmiTag == 0); // adjust code below
__ tst(r2, Operand(kSmiTagMask));
__ b(ne, &slow);
@ -4633,7 +4568,7 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// tag result and store it in r0
ASSERT(kSmiTag == 0); // adjust code below
__ mov(r0, Operand(r3, LSL, kSmiTagSize));
__ b(&exit);
__ Ret();
// slow case
__ bind(&slow);
__ push(r1); // restore stack
@ -4645,13 +4580,13 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::SHL: __ InvokeBuiltin(Builtins::SHL, JUMP_JS); break;
default: UNREACHABLE();
}
__ bind(&exit);
break;
}
default: UNREACHABLE();
}
__ Ret();
// This code should be unreachable.
__ stop("Unreachable");
}
@ -4661,7 +4596,9 @@ void StackCheckStub::Generate(MacroAssembler* masm) {
__ ldr(ip, MemOperand(ip));
__ cmp(sp, Operand(ip));
__ b(hs, &within_limit);
// Do tail-call to runtime routine.
// Do tail-call to runtime routine. Runtime routines expect at least one
// argument, so give it a Smi.
__ mov(r0, Operand(Smi::FromInt(0)));
__ push(r0);
__ TailCallRuntime(ExternalReference(Runtime::kStackGuard), 1);
__ bind(&within_limit);
@ -4721,7 +4658,11 @@ void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
__ mov(cp, Operand(0), LeaveCC, eq);
// Restore cp otherwise.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
if (kDebug && FLAG_debug_code) __ mov(lr, Operand(pc));
#ifdef DEBUG
if (FLAG_debug_code) {
__ mov(lr, Operand(pc));
}
#endif
__ pop(pc);
}
@ -4784,7 +4725,11 @@ void CEntryStub::GenerateThrowOutOfMemory(MacroAssembler* masm) {
__ mov(cp, Operand(0), LeaveCC, eq);
// Restore cp otherwise.
__ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
if (kDebug && FLAG_debug_code) __ mov(lr, Operand(pc));
#ifdef DEBUG
if (FLAG_debug_code) {
__ mov(lr, Operand(pc));
}
#endif
__ pop(pc);
}
@ -5043,9 +4988,11 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
}
__ ldr(ip, MemOperand(ip)); // deref address
// Branch and link to JSEntryTrampoline
// Branch and link to JSEntryTrampoline. We don't use the double underscore
// macro for the add instruction because we don't want the coverage tool
// inserting instructions here after we read the pc.
__ mov(lr, Operand(pc));
__ add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
masm->add(pc, ip, Operand(Code::kHeaderSize - kHeapObjectTag));
// Unlink this frame from the handler chain. When reading the
// address of the next handler, there is no need to use the address
@ -5057,6 +5004,7 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// No need to restore registers
__ add(sp, sp, Operand(StackHandlerConstants::kSize));
__ bind(&exit); // r0 holds result
// Restore the top frame descriptors from the stack.
__ pop(r3);
@ -5068,7 +5016,9 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) {
// Restore callee-saved registers and return.
#ifdef DEBUG
if (FLAG_debug_code) __ mov(lr, Operand(pc));
if (FLAG_debug_code) {
__ mov(lr, Operand(pc));
}
#endif
__ ldm(ia_w, sp, kCalleeSaved | pc.bit());
}
@ -5084,13 +5034,13 @@ void ArgumentsAccessStub::GenerateReadLength(MacroAssembler* masm) {
// Nothing to do: The formal number of parameters has already been
// passed in register r0 by calling function. Just return it.
__ mov(pc, lr);
__ Jump(lr);
// Arguments adaptor case: Read the arguments length from the
// adaptor frame and return it.
__ bind(&adaptor);
__ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ mov(pc, lr);
__ Jump(lr);
}
@ -5122,7 +5072,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ sub(r3, r0, r1);
__ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(r0, MemOperand(r3, kDisplacement));
__ mov(pc, lr);
__ Jump(lr);
// Arguments adaptor case: Check index against actual arguments
// limit found in the arguments adaptor frame. Use unsigned
@ -5136,7 +5086,7 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
__ sub(r3, r0, r1);
__ add(r3, r2, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize));
__ ldr(r0, MemOperand(r3, kDisplacement));
__ mov(pc, lr);
__ Jump(lr);
// Slow-case: Handle non-smi or out-of-bounds access to arguments
// by calling the runtime system.

28
deps/v8/src/codegen-arm.h → deps/v8/src/arm/codegen-arm.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_CODEGEN_ARM_H_
#define V8_CODEGEN_ARM_H_
#ifndef V8_ARM_CODEGEN_ARM_H_
#define V8_ARM_CODEGEN_ARM_H_
namespace v8 { namespace internal {
@ -35,9 +35,6 @@ class DeferredCode;
class RegisterAllocator;
class RegisterFile;
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
@ -292,10 +289,13 @@ class CodeGenerator: public AstVisitor {
void ToBoolean(JumpTarget* true_target, JumpTarget* false_target);
void GenericBinaryOperation(Token::Value op);
void GenericBinaryOperation(Token::Value op, OverwriteMode overwrite_mode);
void Comparison(Condition cc, bool strict = false);
void SmiOperation(Token::Value op, Handle<Object> value, bool reversed);
void SmiOperation(Token::Value op,
Handle<Object> value,
bool reversed,
OverwriteMode mode);
void CallWithArguments(ZoneList<Expression*>* arguments, int position);
@ -303,7 +303,17 @@ class CodeGenerator: public AstVisitor {
void Branch(bool if_true, JumpTarget* target);
void CheckStack();
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*);
const char* name;
};
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node);
static bool PatchInlineRuntimeEntry(Handle<String> name,
const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry);
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
@ -433,6 +443,8 @@ class CodeGenerator: public AstVisitor {
// in a spilled state.
bool in_spilled_code_;
static InlineRuntimeLUT kInlineRuntimeLUT[];
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
@ -443,4 +455,4 @@ class CodeGenerator: public AstVisitor {
} } // namespace v8::internal
#endif // V8_CODEGEN_ARM_H_
#endif // V8_ARM_CODEGEN_ARM_H_

13
deps/v8/src/constants-arm.h → deps/v8/src/arm/constants-arm.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_CONSTANTS_ARM_H_
#define V8_CONSTANTS_ARM_H_
#ifndef V8_ARM_CONSTANTS_ARM_H_
#define V8_ARM_CONSTANTS_ARM_H_
namespace assembler { namespace arm {
@ -106,7 +106,12 @@ enum SoftwareInterruptCodes {
call_rt_r5 = 0x10,
call_rt_r2 = 0x11,
// break point
break_point = 0x20
break_point = 0x20,
// FP operations. These simulate calling into C for a moment to do fp ops.
// They should trash all caller-save registers.
simulator_fp_add = 0x21,
simulator_fp_sub = 0x22,
simulator_fp_mul = 0x23
};
@ -232,4 +237,4 @@ class Instr {
} } // namespace assembler::arm
#endif // V8_CONSTANTS_ARM_H_
#endif // V8_ARM_CONSTANTS_ARM_H_

0
deps/v8/src/cpu-arm.cc → deps/v8/src/arm/cpu-arm.cc

5
deps/v8/src/debug-arm.cc → deps/v8/src/arm/debug-arm.cc

@ -32,7 +32,7 @@
namespace v8 { namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Currently debug break is not supported in frame exit code on ARM.
bool BreakLocationIterator::IsDebugBreakAtReturn() {
return false;
@ -58,7 +58,7 @@ bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
}
#define __ masm->
#define __ ACCESS_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
@ -191,5 +191,6 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
#undef __
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal

9
deps/v8/src/disasm-arm.cc → deps/v8/src/arm/disasm-arm.cc

@ -261,6 +261,15 @@ void Decoder::PrintSoftwareInterrupt(SoftwareInterruptCodes swi) {
case break_point:
Print("break_point");
return;
case simulator_fp_add:
Print("simulator_fp_add");
return;
case simulator_fp_mul:
Print("simulator_fp_mul");
return;
case simulator_fp_sub:
Print("simulator_fp_sub");
return;
default:
out_buffer_pos_ += v8i::OS::SNPrintF(out_buffer_ + out_buffer_pos_,
"%d",

2
deps/v8/src/frames-arm.cc → deps/v8/src/arm/frames-arm.cc

@ -28,7 +28,7 @@
#include "v8.h"
#include "frames-inl.h"
#include "assembler-arm-inl.h"
#include "arm/assembler-arm-inl.h"
namespace v8 { namespace internal {

6
deps/v8/src/frames-arm.h → deps/v8/src/arm/frames-arm.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_FRAMES_ARM_H_
#define V8_FRAMES_ARM_H_
#ifndef V8_ARM_FRAMES_ARM_H_
#define V8_ARM_FRAMES_ARM_H_
namespace v8 { namespace internal {
@ -376,4 +376,4 @@ inline Object* JavaScriptFrame::function_slot_object() const {
} } // namespace v8::internal
#endif // V8_FRAMES_ARM_H_
#endif // V8_ARM_FRAMES_ARM_H_

57
deps/v8/src/ic-arm.cc → deps/v8/src/arm/ic-arm.cc

@ -39,7 +39,7 @@ namespace v8 { namespace internal {
// Static IC stub generators.
//
#define __ masm->
#define __ ACCESS_MASM(masm)
// Helper function used from LoadIC/CallIC GenerateNormal.
@ -96,7 +96,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
// Compute the masked index: (hash + i + i * i) & mask.
__ ldr(t1, FieldMemOperand(r2, String::kLengthOffset));
__ mov(t1, Operand(t1, LSR, String::kHashShift));
if (i > 0) __ add(t1, t1, Operand(Dictionary::GetProbeOffset(i)));
if (i > 0) {
__ add(t1, t1, Operand(Dictionary::GetProbeOffset(i)));
}
__ and_(t1, t1, Operand(r3));
// Scale the index by multiplying by the element size.
@ -125,27 +127,20 @@ static void GenerateDictionaryLoad(MacroAssembler* masm,
}
// Helper function used to check that a value is either not a function
// or is loaded if it is a function.
static void GenerateCheckNonFunctionOrLoaded(MacroAssembler* masm,
Label* miss,
Register value,
Register scratch) {
// Helper function used to check that a value is either not an object
// or is loaded if it is an object.
static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm,
Label* miss,
Register value,
Register scratch) {
Label done;
// Check if the value is a Smi.
__ tst(value, Operand(kSmiTagMask));
__ b(eq, &done);
// Check if the value is a function.
__ ldr(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
__ cmp(scratch, Operand(JS_FUNCTION_TYPE));
__ b(ne, &done);
// Check if the function has been loaded.
__ ldr(scratch,
FieldMemOperand(value, JSFunction::kSharedFunctionInfoOffset));
__ ldr(scratch,
FieldMemOperand(scratch, SharedFunctionInfo::kLazyLoadDataOffset));
__ cmp(scratch, Operand(Factory::undefined_value()));
// Check if the object has been loaded.
__ ldr(scratch, FieldMemOperand(value, JSObject::kMapOffset));
__ ldrb(scratch, FieldMemOperand(scratch, Map::kBitField2Offset));
__ tst(scratch, Operand(1 << Map::kNeedsLoading));
__ b(ne, miss);
__ bind(&done);
}
@ -282,9 +277,9 @@ static void GenerateNormalHelper(MacroAssembler* masm,
__ b(ne, miss);
// Check that the function has been loaded.
__ ldr(r0, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r0, FieldMemOperand(r0, SharedFunctionInfo::kLazyLoadDataOffset));
__ cmp(r0, Operand(Factory::undefined_value()));
__ ldr(r0, FieldMemOperand(r1, JSObject::kMapOffset));
__ ldrb(r0, FieldMemOperand(r0, Map::kBitField2Offset));
__ tst(r0, Operand(1 << Map::kNeedsLoading));
__ b(ne, miss);
// Patch the receiver with the global proxy if necessary.
@ -468,7 +463,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
__ bind(&probe);
GenerateDictionaryLoad(masm, &miss, r1, r0);
GenerateCheckNonFunctionOrLoaded(masm, &miss, r0, r1);
GenerateCheckNonObjectOrLoaded(masm, &miss, r0, r1);
__ Ret();
// Global object access: Check access rights.
@ -502,10 +497,18 @@ void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
}
// TODO(181): Implement map patching once loop nesting is tracked on
// the ARM platform so we can generate inlined fast-case code for
// array indexing in loops.
void KeyedLoadIC::PatchInlinedMapCheck(Address address, Object* value) { }
// TODO(181): Implement map patching once loop nesting is tracked on the
// ARM platform so we can generate inlined fast-case code loads in
// loops.
void LoadIC::ClearInlinedVersion(Address address) {}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
return false;
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
return false;
}
Object* KeyedLoadIC_Miss(Arguments args);

2
deps/v8/src/jump-target-arm.cc → deps/v8/src/arm/jump-target-arm.cc

@ -35,7 +35,7 @@ namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
#define __ masm_->
#define __ ACCESS_MASM(masm_)
void JumpTarget::DoJump() {
ASSERT(cgen_ != NULL);

27
deps/v8/src/macro-assembler-arm.cc → deps/v8/src/arm/macro-assembler-arm.cc

@ -168,11 +168,11 @@ void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
}
void MacroAssembler::Ret() {
void MacroAssembler::Ret(Condition cond) {
#if USE_BX
bx(lr);
bx(lr, cond);
#else
mov(pc, Operand(lr));
mov(pc, Operand(lr), LeaveCC, cond);
#endif
}
@ -320,16 +320,19 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
add(r6, fp, Operand(r4, LSL, kPointerSizeLog2));
add(r6, r6, Operand(ExitFrameConstants::kPPDisplacement - kPointerSize));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) {
// Use sp as base to push.
CopyRegistersFromMemoryToStack(sp, kJSCallerSaved);
}
#endif
}
void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) {
@ -339,6 +342,7 @@ void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
add(r3, fp, Operand(kOffset));
CopyRegistersFromStackToMemory(r3, r2, kJSCallerSaved);
}
#endif
// Clear top frame.
mov(r3, Operand(0));
@ -348,9 +352,9 @@ void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
// Restore current context from top and clear it in debug mode.
mov(ip, Operand(ExternalReference(Top::k_context_address)));
ldr(cp, MemOperand(ip));
if (kDebug) {
str(r3, MemOperand(ip));
}
#ifdef DEBUG
str(r3, MemOperand(ip));
#endif
// Pop the arguments, restore registers, and return.
mov(sp, Operand(fp)); // respect ABI stack constraint
@ -491,6 +495,7 @@ void MacroAssembler::InvokeFunction(Register fun,
}
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::SaveRegistersToMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
// Copy the content of registers to memory location.
@ -548,7 +553,7 @@ void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
}
}
}
#endif
void MacroAssembler::PushTryHandler(CodeLocation try_location,
HandlerType type) {
@ -674,10 +679,10 @@ void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
// Load current lexical context from the stack frame.
ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
// In debug mode, make sure the lexical context is set.
if (kDebug) {
cmp(scratch, Operand(0));
Check(ne, "we should not have an empty lexical context");
}
#ifdef DEBUG
cmp(scratch, Operand(0));
Check(ne, "we should not have an empty lexical context");
#endif
// Load the global context of the current context.
int offset = Context::kHeaderSize + Context::GLOBAL_INDEX * kPointerSize;

20
deps/v8/src/macro-assembler-arm.h → deps/v8/src/arm/macro-assembler-arm.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_MACRO_ASSEMBLER_ARM_H_
#define V8_MACRO_ASSEMBLER_ARM_H_
#ifndef V8_ARM_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_MACRO_ASSEMBLER_ARM_H_
#include "assembler.h"
@ -86,7 +86,7 @@ class MacroAssembler: public Assembler {
void Call(Register target, Condition cond = al);
void Call(byte* target, RelocInfo::Mode rmode, Condition cond = al);
void Call(Handle<Code> code, RelocInfo::Mode rmode, Condition cond = al);
void Ret();
void Ret(Condition cond = al);
// Jumps to the label at the index given by the Smi in "index".
void SmiJumpTable(Register index, Vector<Label*> targets);
@ -138,6 +138,7 @@ class MacroAssembler: public Assembler {
InvokeFlag flag);
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
@ -147,7 +148,7 @@ class MacroAssembler: public Assembler {
void CopyRegistersFromStackToMemory(Register base,
Register scratch,
RegList regs);
#endif
// ---------------------------------------------------------------------------
// Exception handling
@ -297,7 +298,16 @@ static inline MemOperand FieldMemOperand(Register object, int offset) {
}
#ifdef GENERATED_CODE_COVERAGE
#define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
#define ACCESS_MASM(masm) masm->stop(__FILE_LINE__); masm->
#else
#define ACCESS_MASM(masm) masm->
#endif
} } // namespace v8::internal
#endif // V8_MACRO_ASSEMBLER_ARM_H_
#endif // V8_ARM_MACRO_ASSEMBLER_ARM_H_

2
deps/v8/src/regexp-macro-assembler-arm.cc → deps/v8/src/arm/regexp-macro-assembler-arm.cc

@ -28,7 +28,7 @@
#include "v8.h"
#include "ast.h"
#include "regexp-macro-assembler.h"
#include "regexp-macro-assembler-arm.h"
#include "arm/regexp-macro-assembler-arm.h"
namespace v8 { namespace internal {

6
deps/v8/src/regexp-macro-assembler-arm.h → deps/v8/src/arm/regexp-macro-assembler-arm.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef REGEXP_MACRO_ASSEMBLER_ARM_H_
#define REGEXP_MACRO_ASSEMBLER_ARM_H_
#ifndef V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
#define V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_
namespace v8 { namespace internal {
@ -38,4 +38,4 @@ class RegExpMacroAssemblerARM: public RegExpMacroAssembler {
}} // namespace v8::internal
#endif /* REGEXP_MACRO_ASSEMBLER_ARM_H_ */
#endif // V8_ARM_REGEXP_MACRO_ASSEMBLER_ARM_H_

8
deps/v8/src/register-allocator-arm.cc → deps/v8/src/arm/register-allocator-arm.cc

@ -66,6 +66,14 @@ void RegisterAllocator::UnuseReserved(RegisterFile* register_file) {
}
bool RegisterAllocator::IsReserved(int reg_code) {
return (reg_code == sp.code())
|| (reg_code == fp.code())
|| (reg_code == cp.code())
|| (reg_code == pc.code());
}
void RegisterAllocator::Initialize() {
Reset();
// The following registers are live on function entry, saved in the

95
deps/v8/src/simulator-arm.cc → deps/v8/src/arm/simulator-arm.cc

@ -30,8 +30,8 @@
#include "v8.h"
#include "disasm.h"
#include "constants-arm.h"
#include "simulator-arm.h"
#include "arm/constants-arm.h"
#include "arm/simulator-arm.h"
#if !defined(__arm__)
@ -90,12 +90,44 @@ Debugger::~Debugger() {
}
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
static void InitializeCoverage() {
char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
if (file_name != NULL) {
coverage_log = fopen(file_name, "aw+");
}
}
void Debugger::Stop(Instr* instr) {
char* str = reinterpret_cast<char*>(instr->InstructionBits() & 0x0fffffff);
if (strlen(str) > 0) {
if (coverage_log != NULL) {
fprintf(coverage_log, "%s\n", str);
fflush(coverage_log);
}
instr->SetInstructionBits(0xe1a00000); // Overwrite with nop.
}
sim_->set_pc(sim_->get_pc() + Instr::kInstrSize);
}
#else // ndef GENERATED_CODE_COVERAGE
static void InitializeCoverage() {
}
void Debugger::Stop(Instr* instr) {
const char* str = (const char*)(instr->InstructionBits() & 0x0fffffff);
PrintF("Simulator hit %s\n", str);
sim_->set_pc(sim_->get_pc() + Instr::kInstrSize);
Debug();
}
#endif
static const char* reg_names[] = { "r0", "r1", "r2", "r3",
@ -375,6 +407,7 @@ Simulator::Simulator() {
// access violation if the simulator ever tries to execute it.
registers_[pc] = bad_lr;
registers_[lr] = bad_lr;
InitializeCoverage();
}
@ -427,6 +460,37 @@ int32_t Simulator::get_pc() const {
}
// For use in calls that take two double values, constructed from r0, r1, r2
// and r3.
void Simulator::GetFpArgs(double* x, double* y) {
// We use a char buffer to get around the strict-aliasing rules which
// otherwise allow the compiler to optimize away the copy.
char buffer[2 * sizeof(registers_[0])];
// Registers 0 and 1 -> x.
memcpy(buffer, registers_, sizeof(buffer));
memcpy(x, buffer, sizeof(buffer));
// Registers 2 and 3 -> y.
memcpy(buffer, registers_ + 2, sizeof(buffer));
memcpy(y, buffer, sizeof(buffer));
}
void Simulator::SetFpResult(const double& result) {
char buffer[2 * sizeof(registers_[0])];
memcpy(buffer, &result, sizeof(buffer));
// result -> registers 0 and 1.
memcpy(registers_, buffer, sizeof(buffer));
}
void Simulator::TrashCallerSaveRegisters() {
// We don't trash the registers with the return value.
registers_[2] = 0x50Bad4U;
registers_[3] = 0x50Bad4U;
registers_[12] = 0x50Bad4U;
}
// The ARM cannot do unaligned reads and writes. On some ARM platforms an
// interrupt is caused. On others it does a funky rotation thing. For now we
// simply disallow unaligned reads, but at some point we may want to move to
@ -862,7 +926,8 @@ typedef int64_t (*SimulatorRuntimeCall)(intptr_t arg0, intptr_t arg1);
// Software interrupt instructions are used by the simulator to call into the
// C-based V8 runtime.
void Simulator::SoftwareInterrupt(Instr* instr) {
switch (instr->SwiField()) {
int swi = instr->SwiField();
switch (swi) {
case call_rt_r5: {
SimulatorRuntimeCall target =
reinterpret_cast<SimulatorRuntimeCall>(get_register(r5));
@ -894,6 +959,30 @@ void Simulator::SoftwareInterrupt(Instr* instr) {
dbg.Debug();
break;
}
{
double x, y, z;
case simulator_fp_add:
GetFpArgs(&x, &y);
z = x + y;
SetFpResult(z);
TrashCallerSaveRegisters();
set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
break;
case simulator_fp_sub:
GetFpArgs(&x, &y);
z = x - y;
SetFpResult(z);
TrashCallerSaveRegisters();
set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
break;
case simulator_fp_mul:
GetFpArgs(&x, &y);
z = x * y;
SetFpResult(z);
TrashCallerSaveRegisters();
set_pc(reinterpret_cast<int32_t>(instr) + Instr::kInstrSize);
break;
}
default: {
UNREACHABLE();
break;

14
deps/v8/src/simulator-arm.h → deps/v8/src/arm/simulator-arm.h

@ -33,14 +33,14 @@
// which will start execution in the Simulator or forwards to the real entry
// on a ARM HW platform.
#ifndef V8_SIMULATOR_ARM_H_
#define V8_SIMULATOR_ARM_H_
#ifndef V8_ARM_SIMULATOR_ARM_H_
#define V8_ARM_SIMULATOR_ARM_H_
#if defined(__arm__)
// When running without a simulator we call the entry directly.
#define CALL_GENERATED_CODE(entry, p0, p1, p2, p3, p4) \
entry(p0, p1, p2, p3, p4)
reinterpret_cast<Object*>(entry(p0, p1, p2, p3, p4))
// Calculated the stack limit beyond which we will throw stack overflow errors.
// This macro must be called from a C++ method. It relies on being able to take
@ -174,6 +174,12 @@ class Simulator {
// Executes one instruction.
void InstructionDecode(Instr* instr);
// For use in calls that take two double values, constructed from r0, r1, r2
// and r3.
void GetFpArgs(double* x, double* y);
void SetFpResult(const double& result);
void TrashCallerSaveRegisters();
// architecture state
int32_t registers_[16];
bool n_flag_;
@ -195,4 +201,4 @@ class Simulator {
#endif // defined(__arm__)
#endif // V8_SIMULATOR_ARM_H_
#endif // V8_ARM_SIMULATOR_ARM_H_

15
deps/v8/src/stub-cache-arm.cc → deps/v8/src/arm/stub-cache-arm.cc

@ -33,7 +33,7 @@
namespace v8 { namespace internal {
#define __ masm->
#define __ ACCESS_MASM(masm)
static void ProbeTable(MacroAssembler* masm,
@ -183,7 +183,7 @@ void StubCompiler::GenerateLoadField(MacroAssembler* masm,
// Check that the maps haven't changed.
Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
GenerateFastPropertyLoad(masm, r0, reg, holder, index);
__ Ret();
}
@ -203,7 +203,7 @@ void StubCompiler::GenerateLoadConstant(MacroAssembler* masm,
// Check that the maps haven't changed.
Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Return the constant value.
__ mov(r0, Operand(Handle<Object>(value)));
@ -226,7 +226,7 @@ void StubCompiler::GenerateLoadCallback(MacroAssembler* masm,
// Check that the maps haven't changed.
Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Push the arguments on the JS stack of the caller.
__ push(receiver); // receiver
@ -256,7 +256,7 @@ void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
// Check that the maps haven't changed.
Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Push the arguments on the JS stack of the caller.
__ push(receiver); // receiver
@ -456,8 +456,7 @@ void StubCompiler::GenerateLoadMiss(MacroAssembler* masm, Code::Kind kind) {
#undef __
#define __ masm()->
#define __ ACCESS_MASM(masm())
Object* StubCompiler::CompileLazyCompile(Code::Flags flags) {
@ -511,7 +510,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
// Do the right check and compute the holder register.
Register reg =
__ CheckMaps(JSObject::cast(object), r0, holder, r3, r2, &miss);
masm()->CheckMaps(JSObject::cast(object), r0, holder, r3, r2, &miss);
GenerateFastPropertyLoad(masm(), r1, reg, holder, index);
// Check that the function really is a function.

13
deps/v8/src/virtual-frame-arm.cc → deps/v8/src/arm/virtual-frame-arm.cc

@ -36,7 +36,8 @@ namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// VirtualFrame implementation.
#define __ masm_->
#define __ ACCESS_MASM(masm_)
// On entry to a function, the virtual frame already contains the
// receiver and the parameters. All initial frame elements are in
@ -70,6 +71,16 @@ void VirtualFrame::SyncElementByPushing(int index) {
}
void VirtualFrame::SyncRange(int begin, int end) {
// All elements are in memory on ARM (ie, synced).
#ifdef DEBUG
for (int i = begin; i <= end; i++) {
ASSERT(elements_[i].is_synced());
}
#endif
}
void VirtualFrame::MergeTo(VirtualFrame* expected) {
Comment cmnt(masm_, "[ Merge frame");
// We should always be merging the code generator's current frame to an

6
deps/v8/src/virtual-frame-arm.h → deps/v8/src/arm/virtual-frame-arm.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_VIRTUAL_FRAME_ARM_H_
#define V8_VIRTUAL_FRAME_ARM_H_
#ifndef V8_ARM_VIRTUAL_FRAME_ARM_H_
#define V8_ARM_VIRTUAL_FRAME_ARM_H_
#include "register-allocator.h"
@ -477,4 +477,4 @@ class VirtualFrame : public Malloced {
} } // namespace v8::internal
#endif // V8_VIRTUAL_FRAME_ARM_H_
#endif // V8_ARM_VIRTUAL_FRAME_ARM_H_

167
deps/v8/src/array.js

@ -709,32 +709,91 @@ function ArraySort(comparefn) {
QuickSort(a, high_start, to);
}
var old_length = ToUint32(this.length);
if (old_length < 2) return this;
// Copies elements in the range 0..length from obj's prototype chain
// to obj itself, if obj has holes. Returns one more than the maximal index
// of a prototype property.
function CopyFromPrototype(obj, length) {
var max = 0;
for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
var indices = %GetArrayKeys(proto, length);
if (indices.length > 0) {
if (indices[0] == -1) {
// It's an interval.
var proto_length = indices[1];
for (var i = 0; i < proto_length; i++) {
if (!obj.hasOwnProperty(i) && proto.hasOwnProperty(i)) {
obj[i] = proto[i];
if (i >= max) { max = i + 1; }
}
}
} else {
for (var i = 0; i < indices.length; i++) {
var index = indices[i];
if (!IS_UNDEFINED(index) &&
!obj.hasOwnProperty(index) && proto.hasOwnProperty(index)) {
obj[index] = proto[index];
if (index >= max) { max = index + 1; }
}
}
}
}
}
return max;
}
%RemoveArrayHoles(this);
// Set a value of "undefined" on all indices in the range from..to
// where a prototype of obj has an element. I.e., shadow all prototype
// elements in that range.
function ShadowPrototypeElements(obj, from, to) {
for (var proto = obj.__proto__; proto; proto = proto.__proto__) {
var indices = %GetArrayKeys(proto, to);
if (indices.length > 0) {
if (indices[0] == -1) {
// It's an interval.
var proto_length = indices[1];
for (var i = from; i < proto_length; i++) {
if (proto.hasOwnProperty(i)) {
obj[i] = void 0;
}
}
} else {
for (var i = 0; i < indices.length; i++) {
var index = indices[i];
if (!IS_UNDEFINED(index) && from <= index &&
proto.hasOwnProperty(index)) {
obj[index] = void 0;
}
}
}
}
}
}
var length = ToUint32(this.length);
// Move undefined elements to the end of the array.
for (var i = 0; i < length; ) {
if (IS_UNDEFINED(this[i])) {
length--;
this[i] = this[length];
this[length] = void 0;
} else {
i++;
}
if (length < 2) return this;
var is_array = IS_ARRAY(this);
var max_prototype_element;
if (!is_array) {
// For compatibility with JSC, we also sort elements inherited from
// the prototype chain on non-Array objects.
// We do this by copying them to this object and sorting only
// local elements. This is not very efficient, but sorting with
// inherited elements happens very, very rarely, if at all.
// The specification allows "implementation dependent" behavior
// if an element on the prototype chain has an element that
// might interact with sorting.
max_prototype_element = CopyFromPrototype(this, length);
}
QuickSort(this, 0, length);
var num_non_undefined = %RemoveArrayHoles(this, length);
// We only changed the length of the this object (in
// RemoveArrayHoles) if it was an array. We are not allowed to set
// the length of the this object if it is not an array because this
// might introduce a new length property.
if (IS_ARRAY(this)) {
this.length = old_length;
QuickSort(this, 0, num_non_undefined);
if (!is_array && (num_non_undefined + 1 < max_prototype_element)) {
// For compatibility with JSC, we shadow any elements in the prototype
// chain that has become exposed by sort moving a hole to its position.
ShadowPrototypeElements(this, num_non_undefined, max_prototype_element);
}
return this;
@ -879,6 +938,62 @@ function ArrayLastIndexOf(element, index) {
}
function ArrayReduce(callback, current) {
if (!IS_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
}
// Pull out the length so that modifications to the length in the
// loop will not affect the looping.
var length = this.length;
var i = 0;
find_initial: if (%_ArgumentsLength() < 2) {
for (; i < length; i++) {
current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
i++;
break find_initial;
}
}
throw MakeTypeError('reduce_no_initial', []);
}
for (; i < length; i++) {
var element = this[i];
if (!IS_UNDEFINED(element) || i in this) {
current = callback.call(null, current, element, i, this);
}
}
return current;
}
function ArrayReduceRight(callback, current) {
if (!IS_FUNCTION(callback)) {
throw MakeTypeError('called_non_callable', [callback]);
}
var i = this.length - 1;
find_initial: if (%_ArgumentsLength() < 2) {
for (; i >= 0; i--) {
current = this[i];
if (!IS_UNDEFINED(current) || i in this) {
i--;
break find_initial;
}
}
throw MakeTypeError('reduce_no_initial', []);
}
for (; i >= 0; i--) {
var element = this[i];
if (!IS_UNDEFINED(element) || i in this) {
current = callback.call(null, current, element, i, this);
}
}
return current;
}
// -------------------------------------------------------------------
@ -890,7 +1005,6 @@ function UpdateFunctionLengths(lengths) {
// -------------------------------------------------------------------
function SetupArray() {
// Setup non-enumerable constructor property on the Array.prototype
// object.
@ -898,7 +1012,7 @@ function SetupArray() {
// Setup non-enumerable functions of the Array.prototype object and
// set their names.
InstallFunctions($Array.prototype, DONT_ENUM, $Array(
InstallFunctionsOnHiddenPrototype($Array.prototype, DONT_ENUM, $Array(
"toString", ArrayToString,
"toLocaleString", ArrayToLocaleString,
"join", ArrayJoin,
@ -917,8 +1031,9 @@ function SetupArray() {
"every", ArrayEvery,
"map", ArrayMap,
"indexOf", ArrayIndexOf,
"lastIndexOf", ArrayLastIndexOf
));
"lastIndexOf", ArrayLastIndexOf,
"reduce", ArrayReduce,
"reduceRight", ArrayReduceRight));
// Manipulate the length of some of the functions to meet
// expectations set by ECMA-262 or Mozilla.
@ -930,7 +1045,9 @@ function SetupArray() {
ArrayMap: 1,
ArrayIndexOf: 1,
ArrayLastIndexOf: 1,
ArrayPush: 1
ArrayPush: 1,
ArrayReduce: 1,
ArrayReduceRight: 1
});
}

56
deps/v8/src/assembler.cc

@ -521,10 +521,10 @@ ExternalReference::ExternalReference(Runtime::Function* f)
ExternalReference::ExternalReference(const IC_Utility& ic_utility)
: address_(ic_utility.address()) {}
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference::ExternalReference(const Debug_Address& debug_address)
: address_(debug_address.address()) {}
#endif
ExternalReference::ExternalReference(StatsCounter* counter)
: address_(reinterpret_cast<Address>(counter->GetInternalPointer())) {}
@ -557,29 +557,71 @@ ExternalReference ExternalReference::address_of_regexp_stack_limit() {
}
ExternalReference ExternalReference::debug_break() {
return ExternalReference(FUNCTION_ADDR(Debug::Break));
}
ExternalReference ExternalReference::new_space_start() {
return ExternalReference(Heap::NewSpaceStart());
}
ExternalReference ExternalReference::new_space_allocation_top_address() {
return ExternalReference(Heap::NewSpaceAllocationTopAddress());
}
ExternalReference ExternalReference::heap_always_allocate_scope_depth() {
return ExternalReference(Heap::always_allocate_scope_depth_address());
}
ExternalReference ExternalReference::new_space_allocation_limit_address() {
return ExternalReference(Heap::NewSpaceAllocationLimitAddress());
}
static double add_two_doubles(double x, double y) {
return x + y;
}
static double sub_two_doubles(double x, double y) {
return x - y;
}
static double mul_two_doubles(double x, double y) {
return x * y;
}
ExternalReference ExternalReference::double_fp_operation(
Token::Value operation) {
typedef double BinaryFPOperation(double x, double y);
BinaryFPOperation* function = NULL;
switch (operation) {
case Token::ADD:
function = &add_two_doubles;
break;
case Token::SUB:
function = &sub_two_doubles;
break;
case Token::MUL:
function = &mul_two_doubles;
break;
default:
UNREACHABLE();
}
return ExternalReference(FUNCTION_ADDR(function));
}
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference ExternalReference::debug_break() {
return ExternalReference(FUNCTION_ADDR(Debug::Break));
}
ExternalReference ExternalReference::debug_step_in_fp_address() {
return ExternalReference(Debug::step_in_fp_addr());
}
#endif
} } // namespace v8::internal

37
deps/v8/src/assembler.h

@ -38,6 +38,7 @@
#include "runtime.h"
#include "top.h"
#include "zone-inl.h"
#include "token.h"
namespace v8 { namespace internal {
@ -340,29 +341,15 @@ class RelocIterator: public Malloced {
};
// A stack-allocated code region logs a name for the code generated
// while the region is in effect. This information is used by the
// profiler to categorize ticks within generated code.
class CodeRegion BASE_EMBEDDED {
public:
inline CodeRegion(Assembler* assm, const char *name) : assm_(assm) {
LOG(BeginCodeRegionEvent(this, assm, name));
}
inline ~CodeRegion() {
LOG(EndCodeRegionEvent(this, assm_));
}
private:
Assembler* assm_;
};
//------------------------------------------------------------------------------
// External function
//----------------------------------------------------------------------------
class IC_Utility;
class Debug_Address;
class SCTableReference;
#ifdef ENABLE_DEBUGGER_SUPPORT
class Debug_Address;
#endif
// An ExternalReference represents a C++ address called from the generated
// code. All references to C++ functions and must be encapsulated in an
@ -380,7 +367,9 @@ class ExternalReference BASE_EMBEDDED {
explicit ExternalReference(const IC_Utility& ic_utility);
#ifdef ENABLE_DEBUGGER_SUPPORT
explicit ExternalReference(const Debug_Address& debug_address);
#endif
explicit ExternalReference(StatsCounter* counter);
@ -403,9 +392,6 @@ class ExternalReference BASE_EMBEDDED {
// Static variable RegExpStack::limit_address()
static ExternalReference address_of_regexp_stack_limit();
// Function Debug::Break()
static ExternalReference debug_break();
// Static variable Heap::NewSpaceStart()
static ExternalReference new_space_start();
static ExternalReference heap_always_allocate_scope_depth();
@ -414,11 +400,18 @@ class ExternalReference BASE_EMBEDDED {
static ExternalReference new_space_allocation_top_address();
static ExternalReference new_space_allocation_limit_address();
// Used to check if single stepping is enabled in generated code.
static ExternalReference debug_step_in_fp_address();
static ExternalReference double_fp_operation(Token::Value operation);
Address address() const {return address_;}
#ifdef ENABLE_DEBUGGER_SUPPORT
// Function Debug::Break()
static ExternalReference debug_break();
// Used to check if single stepping is enabled in generated code.
static ExternalReference debug_step_in_fp_address();
#endif
private:
explicit ExternalReference(void* address)
: address_(reinterpret_cast<Address>(address)) {}

21
deps/v8/src/ast.cc

@ -152,6 +152,27 @@ ObjectLiteral::Property::Property(bool is_getter, FunctionLiteral* value) {
}
bool ObjectLiteral::IsValidJSON() {
int length = properties()->length();
for (int i = 0; i < length; i++) {
Property* prop = properties()->at(i);
if (!prop->value()->IsValidJSON())
return false;
}
return true;
}
bool ArrayLiteral::IsValidJSON() {
int length = values()->length();
for (int i = 0; i < length; i++) {
if (!values()->at(i)->IsValidJSON())
return false;
}
return true;
}
void TargetCollector::AddTarget(BreakTarget* target) {
// Add the label to the collector, but discard duplicates.
int length = targets_->length();

7
deps/v8/src/ast.h

@ -155,6 +155,7 @@ class Expression: public Node {
public:
virtual Expression* AsExpression() { return this; }
virtual bool IsValidJSON() { return false; }
virtual bool IsValidLeftHandSide() { return false; }
// Mark the expression as being compiled as an expression
@ -625,6 +626,8 @@ class Literal: public Expression {
return handle_.is_identical_to(other->handle_);
}
virtual bool IsValidJSON() { return true; }
// Identity testers.
bool IsNull() const { return handle_.is_identical_to(Factory::null_value()); }
bool IsTrue() const { return handle_.is_identical_to(Factory::true_value()); }
@ -653,6 +656,8 @@ class MaterializedLiteral: public Expression {
// constants and simple object and array literals.
bool is_simple() const { return is_simple_; }
virtual bool IsValidJSON() { return true; }
int depth() const { return depth_; }
private:
@ -704,6 +709,7 @@ class ObjectLiteral: public MaterializedLiteral {
virtual ObjectLiteral* AsObjectLiteral() { return this; }
virtual void Accept(AstVisitor* v);
virtual bool IsValidJSON();
Handle<FixedArray> constant_properties() const {
return constant_properties_;
@ -751,6 +757,7 @@ class ArrayLiteral: public MaterializedLiteral {
virtual void Accept(AstVisitor* v);
virtual ArrayLiteral* AsArrayLiteral() { return this; }
virtual bool IsValidJSON();
Handle<FixedArray> literals() const { return literals_; }
ZoneList<Expression*>* values() const { return values_; }

64
deps/v8/src/bootstrapper.cc

@ -530,7 +530,7 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
global_context()->function_instance_map()->set_prototype(*empty_function);
// Allocate the function map first and then patch the prototype later
Handle<Map> empty_fm = Factory::CopyMap(fm);
Handle<Map> empty_fm = Factory::CopyMapDropDescriptors(fm);
empty_fm->set_instance_descriptors(*function_map_descriptors);
empty_fm->set_prototype(global_context()->object_function()->prototype());
empty_function->set_map(*empty_fm);
@ -741,6 +741,19 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
global_context()->set_regexp_function(*regexp_fun);
}
{ // -- J S O N
Handle<String> name = Factory::NewStringFromAscii(CStrVector("JSON"));
Handle<JSFunction> cons = Factory::NewFunction(
name,
Factory::the_hole_value());
cons->SetInstancePrototype(global_context()->initial_object_prototype());
cons->SetInstanceClassName(*name);
Handle<JSObject> json_object = Factory::NewJSObject(cons, TENURED);
ASSERT(json_object->IsJSObject());
SetProperty(global, name, json_object, DONT_ENUM);
global_context()->set_json_object(*json_object);
}
{ // --- arguments_boilerplate_
// Make sure we can recognize argument objects at runtime.
// This is done by introducing an anonymous function with
@ -820,6 +833,9 @@ void Genesis::CreateRoots(v8::Handle<v8::ObjectTemplate> global_template,
// Initialize the out of memory slot.
global_context()->set_out_of_memory(Heap::false_value());
// Initialize the data slot.
global_context()->set_data(Heap::undefined_value());
}
@ -832,12 +848,16 @@ bool Genesis::CompileBuiltin(int index) {
bool Genesis::CompileNative(Vector<const char> name, Handle<String> source) {
HandleScope scope;
#ifdef ENABLE_DEBUGGER_SUPPORT
Debugger::set_compiling_natives(true);
#endif
bool result =
CompileScriptCached(name, source, &natives_cache, NULL, true);
ASSERT(Top::has_pending_exception() != result);
if (!result) Top::clear_pending_exception();
#ifdef ENABLE_DEBUGGER_SUPPORT
Debugger::set_compiling_natives(false);
#endif
return result;
}
@ -853,9 +873,7 @@ bool Genesis::CompileScriptCached(Vector<const char> name,
// If we can't find the function in the cache, we compile a new
// function and insert it into the cache.
if (!cache->Lookup(name, &boilerplate)) {
#ifdef DEBUG
ASSERT(StringShape(*source).IsAsciiRepresentation());
#endif
ASSERT(source->IsAsciiRepresentation());
Handle<String> script_name = Factory::NewStringFromUtf8(name);
boilerplate =
Compiler::Compile(source, script_name, 0, 0, extension, NULL);
@ -1015,6 +1033,13 @@ bool Genesis::InstallNatives() {
Factory::LookupAsciiSymbol("column_offset"),
proxy_column_offset,
common_attributes);
Handle<Proxy> proxy_data = Factory::NewProxy(&Accessors::ScriptData);
script_descriptors =
Factory::CopyAppendProxyDescriptor(
script_descriptors,
Factory::LookupAsciiSymbol("data"),
proxy_data,
common_attributes);
Handle<Proxy> proxy_type = Factory::NewProxy(&Accessors::ScriptType);
script_descriptors =
Factory::CopyAppendProxyDescriptor(
@ -1030,6 +1055,14 @@ bool Genesis::InstallNatives() {
Factory::LookupAsciiSymbol("line_ends"),
proxy_line_ends,
common_attributes);
Handle<Proxy> proxy_context_data =
Factory::NewProxy(&Accessors::ScriptContextData);
script_descriptors =
Factory::CopyAppendProxyDescriptor(
script_descriptors,
Factory::LookupAsciiSymbol("context_data"),
proxy_context_data,
common_attributes);
Handle<Map> script_map = Handle<Map>(script_fun->initial_map());
script_map->set_instance_descriptors(*script_descriptors);
@ -1057,6 +1090,10 @@ bool Genesis::InstallNatives() {
Natives::GetIndex("regexp"),
Top::global_context(),
Handle<Context>(Top::context()->runtime_context()));
SetupLazy(Handle<JSObject>(global_context()->json_object()),
Natives::GetIndex("json"),
Top::global_context(),
Handle<Context>(Top::context()->runtime_context()));
} else if (strlen(FLAG_natives_file) != 0) {
// Otherwise install natives from natives file if file exists and
@ -1132,6 +1169,7 @@ bool Genesis::InstallSpecialObjects() {
Handle<JSObject>(js_global->builtins()), DONT_ENUM);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
// Expose the debug global object in global if a name for it is specified.
if (FLAG_expose_debug_as != NULL && strlen(FLAG_expose_debug_as) != 0) {
// If loading fails we just bail out without installing the
@ -1149,6 +1187,7 @@ bool Genesis::InstallSpecialObjects() {
SetProperty(js_global, debug_string,
Handle<Object>(Debug::debug_context()->global_proxy()), DONT_ENUM);
}
#endif
return true;
}
@ -1403,7 +1442,7 @@ void Genesis::MakeFunctionInstancePrototypeWritable() {
Handle<DescriptorArray> function_map_descriptors =
ComputeFunctionInstanceDescriptor(false, true);
Handle<Map> fm = Factory::CopyMap(Top::function_map());
Handle<Map> fm = Factory::CopyMapDropDescriptors(Top::function_map());
fm->set_instance_descriptors(*function_map_descriptors);
Top::context()->global_context()->set_function_map(*fm);
}
@ -1442,11 +1481,20 @@ void Genesis::BuildSpecialFunctionTable() {
Handle<JSFunction> function =
Handle<JSFunction>(
JSFunction::cast(global->GetProperty(Heap::Array_symbol())));
Handle<JSObject> prototype =
Handle<JSObject> visible_prototype =
Handle<JSObject>(JSObject::cast(function->prototype()));
AddSpecialFunction(prototype, "pop",
// Remember to put push and pop on the hidden prototype if it's there.
Handle<JSObject> push_and_pop_prototype;
Handle<Object> superproto(visible_prototype->GetPrototype());
if (superproto->IsJSObject() &&
JSObject::cast(*superproto)->map()->is_hidden_prototype()) {
push_and_pop_prototype = Handle<JSObject>::cast(superproto);
} else {
push_and_pop_prototype = visible_prototype;
}
AddSpecialFunction(push_and_pop_prototype, "pop",
Handle<Code>(Builtins::builtin(Builtins::ArrayPop)));
AddSpecialFunction(prototype, "push",
AddSpecialFunction(push_and_pop_prototype, "push",
Handle<Code>(Builtins::builtin(Builtins::ArrayPush)));
}

3
deps/v8/src/builtins.cc

@ -559,6 +559,7 @@ static void Generate_KeyedStoreIC_Initialize(MacroAssembler* masm) {
}
#ifdef ENABLE_DEBUGGER_SUPPORT
static void Generate_LoadIC_DebugBreak(MacroAssembler* masm) {
Debug::GenerateLoadICDebugBreak(masm);
}
@ -597,7 +598,7 @@ static void Generate_Return_DebugBreakEntry(MacroAssembler* masm) {
static void Generate_StubNoRegisters_DebugBreak(MacroAssembler* masm) {
Debug::GenerateStubNoRegistersDebugBreak(masm);
}
#endif
Object* Builtins::builtins_[builtin_count] = { NULL, };
const char* Builtins::names_[builtin_count] = { NULL, };

5
deps/v8/src/builtins.h

@ -83,6 +83,7 @@ namespace v8 { namespace internal {
V(FunctionApply, BUILTIN, UNINITIALIZED)
#ifdef ENABLE_DEBUGGER_SUPPORT
// Define list of builtins used by the debugger implemented in assembly.
#define BUILTIN_LIST_DEBUG_A(V) \
V(Return_DebugBreak, BUILTIN, DEBUG_BREAK) \
@ -93,7 +94,9 @@ namespace v8 { namespace internal {
V(KeyedLoadIC_DebugBreak, KEYED_LOAD_IC, DEBUG_BREAK) \
V(StoreIC_DebugBreak, STORE_IC, DEBUG_BREAK) \
V(KeyedStoreIC_DebugBreak, KEYED_STORE_IC, DEBUG_BREAK)
#else
#define BUILTIN_LIST_DEBUG_A(V)
#endif
// Define list of builtins implemented in JavaScript.
#define BUILTINS_LIST_JS(V) \

2
deps/v8/src/checks.h

@ -254,7 +254,7 @@ template <int> class StaticAssertionHelper { };
#define ASSERT_TAG_ALIGNED(address) \
ASSERT((reinterpret_cast<int>(address) & kHeapObjectTagMask) == 0)
ASSERT((reinterpret_cast<intptr_t>(address) & kHeapObjectTagMask) == 0)
#define ASSERT_SIZE_TAG_ALIGNED(size) ASSERT((size & kHeapObjectTagMask) == 0)

2
deps/v8/src/code-stubs.h

@ -72,7 +72,7 @@ class CodeStub BASE_EMBEDDED {
protected:
static const int kMajorBits = 5;
static const int kMinorBits = kBitsPerPointer - kMajorBits - kSmiTagSize;
static const int kMinorBits = kBitsPerInt - kSmiTagSize - kMajorBits;
private:
// Generates the assembler code for the stub.

90
deps/v8/src/codegen.cc

@ -304,8 +304,10 @@ Handle<JSFunction> CodeGenerator::BuildBoilerplate(FunctionLiteral* node) {
node->is_expression(), false, script_,
node->inferred_name());
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger that a new function has been added.
Debugger::OnNewFunction(function);
#endif
// Set the expected number of properties for instances and return
// the resulting function.
@ -384,57 +386,69 @@ void CodeGenerator::ProcessDeclarations(ZoneList<Declaration*>* declarations) {
}
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*);
const char* name;
// Special cases: These 'runtime calls' manipulate the current
// frame and are only used 1 or two places, so we generate them
// inline instead of generating calls to them. They are used
// for implementing Function.prototype.call() and
// Function.prototype.apply().
CodeGenerator::InlineRuntimeLUT CodeGenerator::kInlineRuntimeLUT[] = {
{&CodeGenerator::GenerateIsSmi, "_IsSmi"},
{&CodeGenerator::GenerateIsNonNegativeSmi, "_IsNonNegativeSmi"},
{&CodeGenerator::GenerateIsArray, "_IsArray"},
{&CodeGenerator::GenerateArgumentsLength, "_ArgumentsLength"},
{&CodeGenerator::GenerateArgumentsAccess, "_Arguments"},
{&CodeGenerator::GenerateValueOf, "_ValueOf"},
{&CodeGenerator::GenerateSetValueOf, "_SetValueOf"},
{&CodeGenerator::GenerateFastCharCodeAt, "_FastCharCodeAt"},
{&CodeGenerator::GenerateObjectEquals, "_ObjectEquals"},
{&CodeGenerator::GenerateLog, "_Log"}
};
CodeGenerator::InlineRuntimeLUT* CodeGenerator::FindInlineRuntimeLUT(
Handle<String> name) {
const int entries_count =
sizeof(kInlineRuntimeLUT) / sizeof(InlineRuntimeLUT);
for (int i = 0; i < entries_count; i++) {
InlineRuntimeLUT* entry = &kInlineRuntimeLUT[i];
if (name->IsEqualTo(CStrVector(entry->name))) {
return entry;
}
}
return NULL;
}
bool CodeGenerator::CheckForInlineRuntimeCall(CallRuntime* node) {
ZoneList<Expression*>* args = node->arguments();
// Special cases: These 'runtime calls' manipulate the current
// frame and are only used 1 or two places, so we generate them
// inline instead of generating calls to them. They are used
// for implementing Function.prototype.call() and
// Function.prototype.apply().
static const InlineRuntimeLUT kInlineRuntimeLUT[] = {
{&v8::internal::CodeGenerator::GenerateIsSmi,
"_IsSmi"},
{&v8::internal::CodeGenerator::GenerateIsNonNegativeSmi,
"_IsNonNegativeSmi"},
{&v8::internal::CodeGenerator::GenerateIsArray,
"_IsArray"},
{&v8::internal::CodeGenerator::GenerateArgumentsLength,
"_ArgumentsLength"},
{&v8::internal::CodeGenerator::GenerateArgumentsAccess,
"_Arguments"},
{&v8::internal::CodeGenerator::GenerateValueOf,
"_ValueOf"},
{&v8::internal::CodeGenerator::GenerateSetValueOf,
"_SetValueOf"},
{&v8::internal::CodeGenerator::GenerateFastCharCodeAt,
"_FastCharCodeAt"},
{&v8::internal::CodeGenerator::GenerateObjectEquals,
"_ObjectEquals"},
{&v8::internal::CodeGenerator::GenerateLog,
"_Log"}
};
Handle<String> name = node->name();
if (name->length() > 0 && name->Get(0) == '_') {
for (unsigned i = 0;
i < sizeof(kInlineRuntimeLUT) / sizeof(InlineRuntimeLUT);
i++) {
const InlineRuntimeLUT* entry = kInlineRuntimeLUT + i;
if (name->IsEqualTo(CStrVector(entry->name))) {
((*this).*(entry->method))(args);
return true;
}
InlineRuntimeLUT* entry = FindInlineRuntimeLUT(name);
if (entry != NULL) {
((*this).*(entry->method))(args);
return true;
}
}
return false;
}
bool CodeGenerator::PatchInlineRuntimeEntry(Handle<String> name,
const CodeGenerator::InlineRuntimeLUT& new_entry,
CodeGenerator::InlineRuntimeLUT* old_entry) {
InlineRuntimeLUT* entry = FindInlineRuntimeLUT(name);
if (entry == NULL) return false;
if (old_entry != NULL) {
old_entry->name = entry->name;
old_entry->method = entry->method;
}
entry->name = new_entry.name;
entry->method = new_entry.method;
return true;
}
void CodeGenerator::GenerateFastCaseSwitchStatement(SwitchStatement* node,
int min_index,
int range,

20
deps/v8/src/codegen.h

@ -59,7 +59,9 @@
// ComputeCallInitializeInLoop
// ProcessDeclarations
// DeclareGlobals
// FindInlineRuntimeLUT
// CheckForInlineRuntimeCall
// PatchInlineRuntimeEntry
// GenerateFastCaseSwitchStatement
// GenerateFastCaseSwitchCases
// TryGenerateFastCaseSwitchStatement
@ -71,10 +73,17 @@
// CodeForStatementPosition
// CodeForSourcePosition
#ifdef ARM
#include "codegen-arm.h"
#else
#include "codegen-ia32.h"
// Mode to overwrite BinaryExpression values.
enum OverwriteMode { NO_OVERWRITE, OVERWRITE_LEFT, OVERWRITE_RIGHT };
#if V8_TARGET_ARCH_IA32
#include "ia32/codegen-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/codegen-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/codegen-arm.h"
#endif
namespace v8 { namespace internal {
@ -111,6 +120,9 @@ class DeferredCode: public ZoneObject {
JumpTarget* enter() { return &enter_; }
void BindExit() { exit_.Bind(0); }
void BindExit(Result* result) { exit_.Bind(result, 1); }
void BindExit(Result* result0, Result* result1) {
exit_.Bind(result0, result1, 2);
}
void BindExit(Result* result0, Result* result1, Result* result2) {
exit_.Bind(result0, result1, result2, 3);
}

43
deps/v8/src/compiler.cc

@ -80,8 +80,20 @@ static Handle<Code> MakeCode(FunctionLiteral* literal,
}
static bool IsValidJSON(FunctionLiteral* lit) {
if (!lit->body()->length() == 1)
return false;
Statement* stmt = lit->body()->at(0);
if (stmt->AsExpressionStatement() == NULL)
return false;
Expression *expr = stmt->AsExpressionStatement()->expression();
return expr->IsValidJSON();
}
static Handle<JSFunction> MakeFunction(bool is_global,
bool is_eval,
bool is_json,
Handle<Script> script,
Handle<Context> context,
v8::Extension* extension,
@ -92,8 +104,12 @@ static Handle<JSFunction> MakeFunction(bool is_global,
StackGuard guard;
PostponeInterruptsScope postpone;
ASSERT(!i::Top::global_context().is_null());
script->set_context_data((*i::Top::global_context())->data());
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger
Debugger::OnBeforeCompile(script);
#endif
// Only allow non-global compiles for eval.
ASSERT(is_eval || is_global);
@ -107,6 +123,19 @@ static Handle<JSFunction> MakeFunction(bool is_global,
return Handle<JSFunction>::null();
}
// When parsing JSON we do an ordinary parse and then afterwards
// check the AST to ensure it was well-formed. If not we give a
// syntax error.
if (is_json && !IsValidJSON(lit)) {
HandleScope scope;
Handle<JSArray> args = Factory::NewJSArray(1);
Handle<Object> source(script->source());
SetElement(args, 0, source);
Handle<Object> result = Factory::NewSyntaxError("invalid_json", args);
Top::Throw(*result, NULL);
return Handle<JSFunction>::null();
}
// Measure how long it takes to do the compilation; only take the
// rest of the function into account to avoid overlap with the
// parsing statistics.
@ -160,8 +189,10 @@ static Handle<JSFunction> MakeFunction(bool is_global,
// the instances of the function.
SetExpectedNofPropertiesFromEstimate(fun, lit->expected_property_count());
#ifdef ENABLE_DEBUGGER_SUPPORT
// Notify debugger
Debugger::OnAfterCompile(script, fun);
#endif
return fun;
}
@ -210,6 +241,7 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source,
// Compile the function and add it to the cache.
result = MakeFunction(true,
false,
false,
script,
Handle<Context>::null(),
@ -233,7 +265,8 @@ Handle<JSFunction> Compiler::Compile(Handle<String> source,
Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
Handle<Context> context,
int line_offset,
bool is_global) {
bool is_global,
bool is_json) {
int source_length = source->length();
Counters::total_eval_size.Increment(source_length);
Counters::total_compile_size.Increment(source_length);
@ -252,7 +285,13 @@ Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
// Create a script object describing the script to be compiled.
Handle<Script> script = Factory::NewScript(source);
script->set_line_offset(Smi::FromInt(line_offset));
result = MakeFunction(is_global, true, script, context, NULL, NULL);
result = MakeFunction(is_global,
true,
is_json,
script,
context,
NULL,
NULL);
if (!result.is_null()) {
CompilationCache::PutEval(source, context, entry, result);
}

3
deps/v8/src/compiler.h

@ -60,7 +60,8 @@ class Compiler : public AllStatic {
static Handle<JSFunction> CompileEval(Handle<String> source,
Handle<Context> context,
int line_offset,
bool is_global);
bool is_global,
bool is_json);
// Compile from function info (used for lazy compilation). Returns
// true on success and false if the compilation resulted in a stack

6
deps/v8/src/contexts.h

@ -64,6 +64,7 @@ enum ContextLookupFlags {
V(OBJECT_FUNCTION_INDEX, JSFunction, object_function) \
V(ARRAY_FUNCTION_INDEX, JSFunction, array_function) \
V(DATE_FUNCTION_INDEX, JSFunction, date_function) \
V(JSON_OBJECT_INDEX, JSObject, json_object) \
V(REGEXP_FUNCTION_INDEX, JSFunction, regexp_function) \
V(INITIAL_OBJECT_PROTOTYPE_INDEX, JSObject, initial_object_prototype) \
V(CREATE_DATE_FUN_INDEX, JSFunction, create_date_fun) \
@ -93,7 +94,8 @@ enum ContextLookupFlags {
V(SCRIPT_FUNCTION_INDEX, JSFunction, script_function) \
V(CONTEXT_EXTENSION_FUNCTION_INDEX, JSFunction, context_extension_function) \
V(OUT_OF_MEMORY_INDEX, Object, out_of_memory) \
V(MAP_CACHE_INDEX, Object, map_cache)
V(MAP_CACHE_INDEX, Object, map_cache) \
V(CONTEXT_DATA_INDEX, Object, data)
// JSFunctions are pairs (context, function code), sometimes also called
// closures. A Context object is used to represent function contexts and
@ -186,6 +188,7 @@ class Context: public FixedArray {
OBJECT_FUNCTION_INDEX,
ARRAY_FUNCTION_INDEX,
DATE_FUNCTION_INDEX,
JSON_OBJECT_INDEX,
REGEXP_FUNCTION_INDEX,
CREATE_DATE_FUN_INDEX,
TO_NUMBER_FUN_INDEX,
@ -211,6 +214,7 @@ class Context: public FixedArray {
CONTEXT_EXTENSION_FUNCTION_INDEX,
OUT_OF_MEMORY_INDEX,
MAP_CACHE_INDEX,
CONTEXT_DATA_INDEX,
GLOBAL_CONTEXT_SLOTS
};

27
deps/v8/src/d8.cc

@ -163,6 +163,22 @@ Handle<Value> Shell::Print(const Arguments& args) {
}
Handle<Value> Shell::Read(const Arguments& args) {
if (args.Length() != 1) {
return ThrowException(String::New("Bad parameters"));
}
String::Utf8Value file(args[0]);
if (*file == NULL) {
return ThrowException(String::New("Error loading file"));
}
Handle<String> source = ReadFile(*file);
if (source.IsEmpty()) {
return ThrowException(String::New("Error loading file"));
}
return source;
}
Handle<Value> Shell::Load(const Arguments& args) {
for (int i = 0; i < args.Length(); i++) {
HandleScope handle_scope;
@ -246,6 +262,7 @@ Handle<Array> Shell::GetCompletions(Handle<String> text, Handle<String> full) {
}
#ifdef ENABLE_DEBUGGER_SUPPORT
Handle<Object> Shell::DebugMessageDetails(Handle<String> message) {
Context::Scope context_scope(utility_context_);
Handle<Object> global = utility_context_->Global();
@ -266,6 +283,7 @@ Handle<Value> Shell::DebugCommandToJSONRequest(Handle<String> command) {
Handle<Value> val = Handle<Function>::Cast(fun)->Call(global, kArgc, argv);
return val;
}
#endif
int32_t* Counter::Bind(const char* name, bool is_histogram) {
@ -381,6 +399,7 @@ void Shell::Initialize() {
HandleScope scope;
Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"), FunctionTemplate::New(Print));
global_template->Set(String::New("read"), FunctionTemplate::New(Read));
global_template->Set(String::New("load"), FunctionTemplate::New(Load));
global_template->Set(String::New("quit"), FunctionTemplate::New(Quit));
global_template->Set(String::New("version"), FunctionTemplate::New(Version));
@ -406,11 +425,13 @@ void Shell::Initialize() {
global_template->Set(String::New("arguments"),
Utils::ToLocal(arguments_jsarray));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Install the debugger object in the utility scope
i::Debug::Load();
i::JSObject* debug = i::Debug::debug_context()->global();
utility_context_->Global()->Set(String::New("$debug"),
Utils::ToLocal(&debug));
#endif
// Run the d8 shell utility script in the utility context
int source_index = i::NativesCollection<i::D8>::GetIndex("d8");
@ -436,8 +457,10 @@ void Shell::Initialize() {
evaluation_context_ = Context::New(NULL, global_template);
evaluation_context_->SetSecurityToken(Undefined());
#ifdef ENABLE_DEBUGGER_SUPPORT
// Set the security token of the debug context to allow access.
i::Debug::debug_context()->set_security_token(i::Heap::undefined_value());
#endif
}
@ -555,6 +578,8 @@ void ShellThread::Run() {
Handle<ObjectTemplate> global_template = ObjectTemplate::New();
global_template->Set(String::New("print"),
FunctionTemplate::New(Shell::Print));
global_template->Set(String::New("read"),
FunctionTemplate::New(Shell::Read));
global_template->Set(String::New("load"),
FunctionTemplate::New(Shell::Load));
global_template->Set(String::New("yield"),
@ -690,6 +715,7 @@ int Shell::Main(int argc, char* argv[]) {
Locker::StartPreemption(preemption_interval);
}
#ifdef ENABLE_DEBUGGER_SUPPORT
// Run the remote debugger if requested.
if (i::FLAG_remote_debugger) {
RunRemoteDebugger(i::FLAG_debugger_port);
@ -705,6 +731,7 @@ int Shell::Main(int argc, char* argv[]) {
if (i::FLAG_debugger && !i::FLAG_debugger_agent) {
v8::Debug::SetDebugEventListener(HandleDebugEvent);
}
#endif
}
if (run_shell)
RunShell();

3
deps/v8/src/d8.h

@ -132,13 +132,16 @@ class Shell: public i::AllStatic {
static int Main(int argc, char* argv[]);
static Handle<Array> GetCompletions(Handle<String> text,
Handle<String> full);
#ifdef ENABLE_DEBUGGER_SUPPORT
static Handle<Object> DebugMessageDetails(Handle<String> message);
static Handle<Value> DebugCommandToJSONRequest(Handle<String> command);
#endif
static Handle<Value> Print(const Arguments& args);
static Handle<Value> Yield(const Arguments& args);
static Handle<Value> Quit(const Arguments& args);
static Handle<Value> Version(const Arguments& args);
static Handle<Value> Read(const Arguments& args);
static Handle<Value> Load(const Arguments& args);
// The OS object on the global object contains methods for performing
// operating system calls:

37
deps/v8/src/d8.js

@ -653,17 +653,47 @@ DebugRequest.prototype.breakCommandToJSONRequest_ = function(args) {
// Process arguments if any.
if (args && args.length > 0) {
var target = args;
var type = 'function';
var line;
var column;
var condition;
var pos;
var pos = args.indexOf(' ');
// Check for breakpoint condition.
pos = args.indexOf(' ');
if (pos > 0) {
target = args.substring(0, pos);
condition = args.substring(pos + 1, args.length);
}
// Check for script breakpoint (name:line[:column]). If no ':' in break
// specification it is considered a function break point.
pos = target.indexOf(':');
if (pos > 0) {
type = 'script';
var tmp = target.substring(pos + 1, target.length);
target = target.substring(0, pos);
// Check for both line and column.
pos = tmp.indexOf(':');
if (pos > 0) {
column = parseInt(tmp.substring(pos + 1, tmp.length)) - 1;
line = parseInt(tmp.substring(0, pos)) - 1;
} else {
line = parseInt(tmp) - 1;
}
} else if (target[0] == '#' && target[target.length - 1] == '#') {
type = 'handle';
target = target.substring(1, target.length - 1);
} else {
type = 'function';
}
request.arguments = {};
request.arguments.type = 'function';
request.arguments.type = type;
request.arguments.target = target;
request.arguments.line = line;
request.arguments.column = column;
request.arguments.condition = condition;
} else {
throw new Error('Invalid break arguments.');
@ -721,6 +751,9 @@ DebugRequest.prototype.helpCommand_ = function(args) {
}
print('break location [condition]');
print(' break on named function: location is a function name');
print(' break on function: location is #<id>#');
print(' break on script position: location is name:line[:column]');
print('clear <breakpoint #>');
print('backtrace [from frame #] [to frame #]]');
print('frame <frame #>');

25
deps/v8/src/date-delay.js

@ -985,6 +985,25 @@ function DateToGMTString() {
}
function PadInt(n) {
// Format integers to have at least two digits.
return n < 10 ? '0' + n : n;
}
function DateToISOString() {
return this.getUTCFullYear() + '-' + PadInt(this.getUTCMonth() + 1) +
'-' + PadInt(this.getUTCDate()) + 'T' + PadInt(this.getUTCHours()) +
':' + PadInt(this.getUTCMinutes()) + ':' + PadInt(this.getUTCSeconds()) +
'Z';
}
function DateToJSON(key) {
return CheckJSONPrimitive(this.toISOString());
}
// -------------------------------------------------------------------
function SetupDate() {
@ -1000,7 +1019,7 @@ function SetupDate() {
// Setup non-enumerable functions of the Date prototype object and
// set their names.
InstallFunctions($Date.prototype, DONT_ENUM, $Array(
InstallFunctionsOnHiddenPrototype($Date.prototype, DONT_ENUM, $Array(
"toString", DateToString,
"toDateString", DateToDateString,
"toTimeString", DateToTimeString,
@ -1044,7 +1063,9 @@ function SetupDate() {
"toGMTString", DateToGMTString,
"toUTCString", DateToUTCString,
"getYear", DateGetYear,
"setYear", DateSetYear
"setYear", DateSetYear,
"toISOString", DateToISOString,
"toJSON", DateToJSON
));
}

5
deps/v8/src/dateparser-inl.h

@ -25,6 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_DATEPARSER_INL_H_
#define V8_DATEPARSER_INL_H_
namespace v8 { namespace internal {
template <typename Char>
@ -104,3 +107,5 @@ bool DateParser::Parse(Vector<Char> str, FixedArray* out) {
}
} } // namespace v8::internal
#endif // V8_DATEPARSER_INL_H_

21
deps/v8/src/debug-agent.cc

@ -29,16 +29,17 @@
#include "v8.h"
#include "debug-agent.h"
#ifdef ENABLE_DEBUGGER_SUPPORT
namespace v8 { namespace internal {
// Public V8 debugger API message handler function. This function just delegates
// to the debugger agent through it's data parameter.
void DebuggerAgentMessageHandler(const uint16_t* message, int length,
void *data) {
reinterpret_cast<DebuggerAgent*>(data)->DebuggerMessage(message, length);
void DebuggerAgentMessageHandler(const v8::Debug::Message& message) {
DebuggerAgent::instance_->DebuggerMessage(message);
}
// static
DebuggerAgent* DebuggerAgent::instance_ = NULL;
// Debugger agent main thread.
void DebuggerAgent::Run() {
@ -105,7 +106,7 @@ void DebuggerAgent::CreateSession(Socket* client) {
// Create a new session and hook up the debug message handler.
session_ = new DebuggerAgentSession(this, client);
v8::Debug::SetMessageHandler(DebuggerAgentMessageHandler, this);
v8::Debug::SetMessageHandler2(DebuggerAgentMessageHandler);
session_->Start();
}
@ -123,13 +124,14 @@ void DebuggerAgent::CloseSession() {
}
void DebuggerAgent::DebuggerMessage(const uint16_t* message, int length) {
void DebuggerAgent::DebuggerMessage(const v8::Debug::Message& message) {
ScopedLock with(session_access_);
// Forward the message handling to the session.
if (session_ != NULL) {
session_->DebuggerMessage(Vector<uint16_t>(const_cast<uint16_t*>(message),
length));
v8::String::Value val(message.GetJSON());
session_->DebuggerMessage(Vector<uint16_t>(const_cast<uint16_t*>(*val),
val.length()));
}
}
@ -410,5 +412,6 @@ int DebuggerAgentUtil::ReceiveAll(const Socket* conn, char* data, int len) {
return total_received;
}
} } // namespace v8::internal
#endif // ENABLE_DEBUGGER_SUPPORT

28
deps/v8/src/debug-agent.h

@ -25,15 +25,15 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_V8_DEBUG_AGENT_H_
#define V8_V8_DEBUG_AGENT_H_
#ifndef V8_DEBUG_AGENT_H_
#define V8_DEBUG_AGENT_H_
#ifdef ENABLE_DEBUGGER_SUPPORT
#include "../include/v8-debug.h"
#include "platform.h"
namespace v8 { namespace internal {
// Forward decelrations.
class DebuggerAgentSession;
@ -46,15 +46,21 @@ class DebuggerAgent: public Thread {
: name_(StrDup(name)), port_(port),
server_(OS::CreateSocket()), terminate_(false),
session_access_(OS::CreateMutex()), session_(NULL),
terminate_now_(OS::CreateSemaphore(0)) {}
~DebuggerAgent() { delete server_; }
terminate_now_(OS::CreateSemaphore(0)) {
ASSERT(instance_ == NULL);
instance_ = this;
}
~DebuggerAgent() {
instance_ = NULL;
delete server_;
}
void Shutdown();
private:
void Run();
void CreateSession(Socket* socket);
void DebuggerMessage(const uint16_t* message, int length);
void DebuggerMessage(const v8::Debug::Message& message);
void CloseSession();
void OnSessionClosed(DebuggerAgentSession* session);
@ -66,9 +72,10 @@ class DebuggerAgent: public Thread {
DebuggerAgentSession* session_; // Current active session if any.
Semaphore* terminate_now_; // Semaphore to signal termination.
static DebuggerAgent* instance_;
friend class DebuggerAgentSession;
friend void DebuggerAgentMessageHandler(const uint16_t* message, int length,
void *data);
friend void DebuggerAgentMessageHandler(const v8::Debug::Message& message);
DISALLOW_COPY_AND_ASSIGN(DebuggerAgent);
};
@ -111,7 +118,8 @@ class DebuggerAgentUtil {
static int ReceiveAll(const Socket* conn, char* data, int len);
};
} } // namespace v8::internal
#endif // V8_V8_DEBUG_AGENT_H_
#endif // ENABLE_DEBUGGER_SUPPORT
#endif // V8_DEBUG_AGENT_H_

75
deps/v8/src/debug-delay.js

@ -977,6 +977,7 @@ CompileEvent.prototype.script = function() {
CompileEvent.prototype.toJSONProtocol = function() {
var o = new ProtocolMessage();
o.running = true;
if (this.before_) {
o.event = "beforeCompile";
} else {
@ -1021,6 +1022,9 @@ function MakeScriptObject_(script, include_source) {
columnOffset: script.columnOffset(),
lineCount: script.lineCount(),
};
if (!IS_UNDEFINED(script.data())) {
o.data = script.data();
}
if (include_source) {
o.source = script.source();
}
@ -1058,6 +1062,14 @@ function ProtocolMessage(request) {
}
ProtocolMessage.prototype.setOption = function(name, value) {
if (!this.options_) {
this.options_ = {};
}
this.options_[name] = value;
}
ProtocolMessage.prototype.failed = function(message) {
this.success = false;
this.message = message;
@ -1086,7 +1098,7 @@ ProtocolMessage.prototype.toJSONProtocol = function() {
if (this.body) {
json += ',"body":';
// Encode the body part.
var serializer = MakeMirrorSerializer(true);
var serializer = MakeMirrorSerializer(true, this.options_);
if (this.body instanceof Mirror) {
json += serializer.serializeValue(this.body);
} else if (this.body instanceof Array) {
@ -1130,7 +1142,7 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
try {
try {
// Convert the JSON string to an object.
request = %CompileString('(' + json_request + ')', 0)();
request = %CompileString('(' + json_request + ')', 0, false)();
// Create an initial response.
response = this.createResponse(request);
@ -1270,11 +1282,12 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
var ignoreCount = request.arguments.ignoreCount;
// Check for legal arguments.
if (!type || !target) {
if (!type || IS_UNDEFINED(target)) {
response.failed('Missing argument "type" or "target"');
return;
}
if (type != 'function' && type != 'script' && type != 'scriptId') {
if (type != 'function' && type != 'handle' &&
type != 'script' && type != 'scriptId') {
response.failed('Illegal type "' + type + '"');
return;
}
@ -1303,6 +1316,20 @@ DebugCommandProcessor.prototype.setBreakPointRequest_ =
// Set function break point.
break_point_number = Debug.setBreakPoint(f, line, column, condition);
} else if (type == 'handle') {
// Find the object pointed by the specified handle.
var handle = parseInt(target, 10);
var mirror = LookupMirror(handle);
if (!mirror) {
return response.failed('Object #' + handle + '# not found');
}
if (!mirror.isFunction()) {
return response.failed('Object #' + handle + '# is not a function');
}
// Set function break point.
break_point_number = Debug.setBreakPoint(mirror.value(),
line, column, condition);
} else if (type == 'script') {
// set script break point.
break_point_number =
@ -1547,20 +1574,24 @@ DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
}
// Pull out arguments.
var handle = request.arguments.handle;
var handles = request.arguments.handles;
// Check for legal arguments.
if (IS_UNDEFINED(handle)) {
return response.failed('Argument "handle" missing');
if (IS_UNDEFINED(handles)) {
return response.failed('Argument "handles" missing');
}
// Lookup handle.
var mirror = LookupMirror(handle);
if (mirror) {
response.body = mirror;
} else {
return response.failed('Object #' + handle + '# not found');
// Lookup handles.
var mirrors = {};
for (var i = 0; i < handles.length; i++) {
var handle = handles[i];
var mirror = LookupMirror(handle);
if (!mirror) {
return response.failed('Object #' + handle + '# not found');
}
mirrors[handle] = mirror;
}
response.body = mirrors;
};
@ -1657,6 +1688,7 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
if (!IS_UNDEFINED(request.arguments.includeSource)) {
includeSource = %ToBoolean(request.arguments.includeSource);
response.setOption('includeSource', includeSource);
}
}
@ -1667,22 +1699,7 @@ DebugCommandProcessor.prototype.scriptsRequest_ = function(request, response) {
for (var i = 0; i < scripts.length; i++) {
if (types & ScriptTypeFlag(scripts[i].type)) {
var script = {};
if (scripts[i].name) {
script.name = scripts[i].name;
}
script.id = scripts[i].id;
script.lineOffset = scripts[i].line_offset;
script.columnOffset = scripts[i].column_offset;
script.lineCount = scripts[i].lineCount();
if (includeSource) {
script.source = scripts[i].source;
} else {
script.sourceStart = scripts[i].source.substring(0, 80);
}
script.sourceLength = scripts[i].source.length;
script.type = scripts[i].type;
response.body.push(script);
response.body.push(MakeMirror(scripts[i]));
}
}
};

541
deps/v8/src/debug.cc

@ -35,12 +35,17 @@
#include "debug.h"
#include "execution.h"
#include "global-handles.h"
#include "ic.h"
#include "ic-inl.h"
#include "natives.h"
#include "stub-cache.h"
#include "log.h"
#include "../include/v8-debug.h"
namespace v8 { namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
static void PrintLn(v8::Local<v8::Value> value) {
v8::Local<v8::String> s = value->ToString();
char* data = NewArray<char>(s->Length() + 1);
@ -288,14 +293,8 @@ void BreakLocationIterator::SetDebugBreak() {
// Patch the frame exit code with a break point.
SetDebugBreakAtReturn();
} else {
// Patch the original code with the current address as the current address
// might have changed by the inline caching since the code was copied.
original_rinfo()->set_target_address(rinfo()->target_address());
// Patch the code to invoke the builtin debug break function matching the
// calling convention used by the call site.
Handle<Code> dbgbrk_code(Debug::FindDebugBreak(rinfo()));
rinfo()->set_target_address(dbgbrk_code->entry());
// Patch the IC call.
SetDebugBreakAtIC();
}
ASSERT(IsDebugBreak());
}
@ -306,8 +305,8 @@ void BreakLocationIterator::ClearDebugBreak() {
// Restore the frame exit code.
ClearDebugBreakAtReturn();
} else {
// Patch the code to the original invoke.
rinfo()->set_target_address(original_rinfo()->target_address());
// Patch the IC call.
ClearDebugBreakAtIC();
}
ASSERT(!IsDebugBreak());
}
@ -360,6 +359,37 @@ bool BreakLocationIterator::IsDebugBreak() {
}
void BreakLocationIterator::SetDebugBreakAtIC() {
// Patch the original code with the current address as the current address
// might have changed by the inline caching since the code was copied.
original_rinfo()->set_target_address(rinfo()->target_address());
RelocInfo::Mode mode = rmode();
if (RelocInfo::IsCodeTarget(mode)) {
Address target = rinfo()->target_address();
Handle<Code> code(Code::GetCodeFromTargetAddress(target));
// Patch the code to invoke the builtin debug break function matching the
// calling convention used by the call site.
Handle<Code> dbgbrk_code(Debug::FindDebugBreak(code, mode));
rinfo()->set_target_address(dbgbrk_code->entry());
// For stubs that refer back to an inlined version clear the cached map for
// the inlined case to always go through the IC. As long as the break point
// is set the patching performed by the runtime system will take place in
// the code copy and will therefore have no effect on the running code
// keeping it from using the inlined code.
if (code->is_keyed_load_stub()) KeyedLoadIC::ClearInlinedVersion(pc());
}
}
void BreakLocationIterator::ClearDebugBreakAtIC() {
// Patch the code to the original invoke.
rinfo()->set_target_address(original_rinfo()->target_address());
}
Object* BreakLocationIterator::BreakPointObjects() {
return debug_info_->GetBreakPointObjects(code_position());
}
@ -1055,48 +1085,42 @@ bool Debug::IsBreakStub(Code* code) {
// Find the builtin to use for invoking the debug break
Handle<Code> Debug::FindDebugBreak(RelocInfo* rinfo) {
Handle<Code> Debug::FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode) {
// Find the builtin debug break function matching the calling convention
// used by the call site.
RelocInfo::Mode mode = rinfo->rmode();
if (RelocInfo::IsCodeTarget(mode)) {
Address target = rinfo->target_address();
Code* code = Code::GetCodeFromTargetAddress(target);
if (code->is_inline_cache_stub()) {
if (code->is_call_stub()) {
return ComputeCallDebugBreak(code->arguments_count());
}
if (code->is_load_stub()) {
return Handle<Code>(Builtins::builtin(Builtins::LoadIC_DebugBreak));
}
if (code->is_store_stub()) {
return Handle<Code>(Builtins::builtin(Builtins::StoreIC_DebugBreak));
}
if (code->is_keyed_load_stub()) {
Handle<Code> result =
Handle<Code>(Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak));
return result;
}
if (code->is_keyed_store_stub()) {
Handle<Code> result =
Handle<Code>(Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak));
return result;
}
if (code->is_inline_cache_stub()) {
if (code->is_call_stub()) {
return ComputeCallDebugBreak(code->arguments_count());
}
if (RelocInfo::IsConstructCall(mode)) {
if (code->is_load_stub()) {
return Handle<Code>(Builtins::builtin(Builtins::LoadIC_DebugBreak));
}
if (code->is_store_stub()) {
return Handle<Code>(Builtins::builtin(Builtins::StoreIC_DebugBreak));
}
if (code->is_keyed_load_stub()) {
Handle<Code> result =
Handle<Code>(Builtins::builtin(Builtins::ConstructCall_DebugBreak));
Handle<Code>(Builtins::builtin(Builtins::KeyedLoadIC_DebugBreak));
return result;
}
if (code->kind() == Code::STUB) {
ASSERT(code->major_key() == CodeStub::CallFunction ||
code->major_key() == CodeStub::StackCheck);
if (code->is_keyed_store_stub()) {
Handle<Code> result =
Handle<Code>(Builtins::builtin(Builtins::StubNoRegisters_DebugBreak));
Handle<Code>(Builtins::builtin(Builtins::KeyedStoreIC_DebugBreak));
return result;
}
}
if (RelocInfo::IsConstructCall(mode)) {
Handle<Code> result =
Handle<Code>(Builtins::builtin(Builtins::ConstructCall_DebugBreak));
return result;
}
if (code->kind() == Code::STUB) {
ASSERT(code->major_key() == CodeStub::CallFunction ||
code->major_key() == CodeStub::StackCheck);
Handle<Code> result =
Handle<Code>(Builtins::builtin(Builtins::StubNoRegisters_DebugBreak));
return result;
}
UNREACHABLE();
return Handle<Code>::null();
@ -1396,17 +1420,13 @@ Handle<Object> Debugger::event_listener_data_ = Handle<Object>();
bool Debugger::compiling_natives_ = false;
bool Debugger::is_loading_debugger_ = false;
bool Debugger::never_unload_debugger_ = false;
DebugMessageThread* Debugger::message_thread_ = NULL;
v8::DebugMessageHandler Debugger::message_handler_ = NULL;
v8::Debug::MessageHandler2 Debugger::message_handler_ = NULL;
bool Debugger::message_handler_cleared_ = false;
void* Debugger::message_handler_data_ = NULL;
v8::DebugHostDispatchHandler Debugger::host_dispatch_handler_ = NULL;
void* Debugger::host_dispatch_handler_data_ = NULL;
v8::Debug::HostDispatchHandler Debugger::host_dispatch_handler_ = NULL;
int Debugger::host_dispatch_micros_ = 100 * 1000;
DebuggerAgent* Debugger::agent_ = NULL;
LockingMessageQueue Debugger::command_queue_(kQueueInitialSize);
LockingMessageQueue Debugger::message_queue_(kQueueInitialSize);
LockingCommandMessageQueue Debugger::command_queue_(kQueueInitialSize);
Semaphore* Debugger::command_received_ = OS::CreateSemaphore(0);
Semaphore* Debugger::message_received_ = OS::CreateSemaphore(0);
Handle<Object> Debugger::MakeJSObject(Vector<const char> constructor_name,
@ -1534,8 +1554,8 @@ void Debugger::OnException(Handle<Object> exception, bool uncaught) {
return;
}
// Process debug event
ProcessDebugEvent(v8::Exception, event_data, false);
// Process debug event.
ProcessDebugEvent(v8::Exception, Handle<JSObject>::cast(event_data), false);
// Return to continue execution from where the exception was thrown.
}
@ -1566,8 +1586,10 @@ void Debugger::OnDebugBreak(Handle<Object> break_points_hit,
return;
}
// Process debug event
ProcessDebugEvent(v8::Break, event_data, auto_continue);
// Process debug event.
ProcessDebugEvent(v8::Break,
Handle<JSObject>::cast(event_data),
auto_continue);
}
@ -1591,8 +1613,10 @@ void Debugger::OnBeforeCompile(Handle<Script> script) {
return;
}
// Process debug event
ProcessDebugEvent(v8::BeforeCompile, event_data, false);
// Process debug event.
ProcessDebugEvent(v8::BeforeCompile,
Handle<JSObject>::cast(event_data),
true);
}
@ -1652,8 +1676,10 @@ void Debugger::OnAfterCompile(Handle<Script> script, Handle<JSFunction> fun) {
if (caught_exception) {
return;
}
// Process debug event
ProcessDebugEvent(v8::AfterCompile, event_data, false);
// Process debug event.
ProcessDebugEvent(v8::AfterCompile,
Handle<JSObject>::cast(event_data),
true);
}
@ -1678,12 +1704,12 @@ void Debugger::OnNewFunction(Handle<JSFunction> function) {
return;
}
// Process debug event.
ProcessDebugEvent(v8::NewFunction, event_data, false);
ProcessDebugEvent(v8::NewFunction, Handle<JSObject>::cast(event_data), true);
}
void Debugger::ProcessDebugEvent(v8::DebugEvent event,
Handle<Object> event_data,
Handle<JSObject> event_data,
bool auto_continue) {
HandleScope scope;
@ -1695,7 +1721,10 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event,
}
// First notify the message handler if any.
if (message_handler_ != NULL) {
NotifyMessageHandler(event, exec_state, event_data, auto_continue);
NotifyMessageHandler(event,
Handle<JSObject>::cast(exec_state),
event_data,
auto_continue);
}
// Notify registered debug event listener. This can be either a C or a
// JavaScript function.
@ -1703,11 +1732,11 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event,
if (event_listener_->IsProxy()) {
// C debug event listener.
Handle<Proxy> callback_obj(Handle<Proxy>::cast(event_listener_));
v8::DebugEventCallback callback =
FUNCTION_CAST<v8::DebugEventCallback>(callback_obj->proxy());
v8::Debug::EventCallback callback =
FUNCTION_CAST<v8::Debug::EventCallback>(callback_obj->proxy());
callback(event,
v8::Utils::ToLocal(Handle<JSObject>::cast(exec_state)),
v8::Utils::ToLocal(Handle<JSObject>::cast(event_data)),
v8::Utils::ToLocal(event_data),
v8::Utils::ToLocal(Handle<Object>::cast(event_listener_data_)));
} else {
// JavaScript debug event listener.
@ -1718,7 +1747,7 @@ void Debugger::ProcessDebugEvent(v8::DebugEvent event,
const int argc = 4;
Object** argv[argc] = { Handle<Object>(Smi::FromInt(event)).location(),
exec_state.location(),
event_data.location(),
Handle<Object>::cast(event_data).location(),
event_listener_data_.location() };
Handle<Object> result = Execution::TryCall(fun, Top::global(),
argc, argv, &caught_exception);
@ -1748,25 +1777,26 @@ void Debugger::UnloadDebugger() {
void Debugger::NotifyMessageHandler(v8::DebugEvent event,
Handle<Object> exec_state,
Handle<Object> event_data,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
bool auto_continue) {
HandleScope scope;
if (!Debug::Load()) return;
// Process the individual events.
bool interactive = false;
bool sendEventMessage = false;
switch (event) {
case v8::Break:
interactive = true; // Break event is always interactive
sendEventMessage = !auto_continue;
break;
case v8::Exception:
interactive = true; // Exception event is always interactive
sendEventMessage = true;
break;
case v8::BeforeCompile:
break;
case v8::AfterCompile:
sendEventMessage = true;
break;
case v8::NewFunction:
break;
@ -1774,8 +1804,25 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
UNREACHABLE();
}
// Done if not interactive.
if (!interactive) return;
// The debug command interrupt flag might have been set when the command was
// added. It should be enough to clear the flag only once while we are in the
// debugger.
ASSERT(Debug::InDebugger());
StackGuard::Continue(DEBUGCOMMAND);
// Notify the debugger that a debug event has occurred unless auto continue is
// active in which case no event is send.
if (sendEventMessage) {
MessageImpl message = MessageImpl::NewEvent(
event,
auto_continue,
Handle<JSObject>::cast(exec_state),
Handle<JSObject>::cast(event_data));
InvokeMessageHandler(message);
}
if (auto_continue && !HasCommands()) {
return;
}
// Get the DebugCommandProcessor.
v8::Local<v8::Object> api_exec_state =
@ -1792,45 +1839,30 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
return;
}
// Notify the debugger that a debug event has occurred unless auto continue is
// active in which case no event is send.
if (!auto_continue) {
bool success = SendEventMessage(event_data);
if (!success) {
// If failed to notify debugger just continue running.
return;
}
}
// Process requests from the debugger.
while (true) {
// Wait for new command in the queue.
command_received_->Wait();
// The debug command interrupt flag might have been set when the command was
// added.
StackGuard::Continue(DEBUGCOMMAND);
if (Debugger::host_dispatch_handler_) {
// In case there is a host dispatch - do periodic dispatches.
if (!command_received_->Wait(host_dispatch_micros_)) {
// Timout expired, do the dispatch.
Debugger::host_dispatch_handler_();
continue;
}
} else {
// In case there is no host dispatch - just wait.
command_received_->Wait();
}
// Get the command from the queue.
Vector<uint16_t> command = command_queue_.Get();
CommandMessage command = command_queue_.Get();
Logger::DebugTag("Got request from command queue, in interactive loop.");
if (!Debugger::IsDebuggerActive()) {
// Delete command text and user data.
command.Dispose();
return;
}
// Check if the command is a host dispatch.
if (command[0] == 0) {
if (Debugger::host_dispatch_handler_) {
int32_t dispatch = (command[1] << 16) | command[2];
Debugger::host_dispatch_handler_(reinterpret_cast<void*>(dispatch),
Debugger::host_dispatch_handler_data_);
}
if (auto_continue && !HasCommands()) {
return;
}
continue;
}
// Invoke JavaScript to process the debug request.
v8::Local<v8::String> fun_name;
v8::Local<v8::Function> fun;
@ -1838,8 +1870,9 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
v8::TryCatch try_catch;
fun_name = v8::String::New("processDebugRequest");
fun = v8::Function::Cast(*cmd_processor->Get(fun_name));
request = v8::String::New(reinterpret_cast<uint16_t*>(command.start()),
command.length());
request = v8::String::New(command.text().start(),
command.text().length());
static const int kArgc = 1;
v8::Handle<Value> argv[kArgc] = { request };
v8::Local<v8::Value> response_val = fun->Call(cmd_processor, kArgc, argv);
@ -1875,13 +1908,16 @@ void Debugger::NotifyMessageHandler(v8::DebugEvent event,
response = try_catch.Exception()->ToString();
}
// Convert text result to C string.
v8::String::Value val(response);
Vector<uint16_t> str(reinterpret_cast<uint16_t*>(*val),
response->Length());
// Return the result.
SendMessage(str);
MessageImpl message = MessageImpl::NewResponse(
event,
running,
Handle<JSObject>::cast(exec_state),
Handle<JSObject>::cast(event_data),
Handle<String>(Utils::OpenHandle(*response)),
command.client_data());
InvokeMessageHandler(message);
command.Dispose();
// Return from debug event processing if either the VM is put into the
// runnning state (through a continue command) or auto continue is active
@ -1927,18 +1963,11 @@ void Debugger::SetEventListener(Handle<Object> callback,
}
void Debugger::SetMessageHandler(v8::DebugMessageHandler handler, void* data,
bool message_handler_thread) {
void Debugger::SetMessageHandler(v8::Debug::MessageHandler2 handler) {
ScopedLock with(debugger_access_);
message_handler_ = handler;
message_handler_data_ = data;
if (handler != NULL) {
if (!message_thread_ && message_handler_thread) {
message_thread_ = new DebugMessageThread();
message_thread_->Start();
}
} else {
if (handler == NULL) {
// Indicate that the message handler was recently cleared.
message_handler_cleared_ = true;
@ -1951,87 +1980,37 @@ void Debugger::SetMessageHandler(v8::DebugMessageHandler handler, void* data,
}
void Debugger::SetHostDispatchHandler(v8::DebugHostDispatchHandler handler,
void* data) {
void Debugger::SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
int period) {
host_dispatch_handler_ = handler;
host_dispatch_handler_data_ = data;
host_dispatch_micros_ = period * 1000;
}
// Calls the registered debug message handler. This callback is part of the
// public API. Messages are kept internally as Vector<uint16_t> strings, which
// are allocated in various places and deallocated by the calling function
// sometime after this call.
void Debugger::InvokeMessageHandler(Vector<uint16_t> message) {
// public API.
void Debugger::InvokeMessageHandler(MessageImpl message) {
ScopedLock with(debugger_access_);
if (message_handler_ != NULL) {
message_handler_(message.start(), message.length(), message_handler_data_);
}
}
void Debugger::SendMessage(Vector<uint16_t> message) {
if (message_thread_ == NULL) {
// If there is no message thread just invoke the message handler from the
// V8 thread.
InvokeMessageHandler(message);
} else {
// Put a copy of the message coming from V8 on the queue. The new copy of
// the event string is destroyed by the message thread.
Vector<uint16_t> message_copy = message.Clone();
Logger::DebugTag("Put message on event message_queue.");
message_queue_.Put(message_copy);
message_received_->Signal();
}
}
bool Debugger::SendEventMessage(Handle<Object> event_data) {
v8::HandleScope scope;
// Call toJSONProtocol on the debug event object.
v8::Local<v8::Object> api_event_data =
v8::Utils::ToLocal(Handle<JSObject>::cast(event_data));
v8::Local<v8::String> fun_name = v8::String::New("toJSONProtocol");
v8::Local<v8::Function> fun =
v8::Function::Cast(*api_event_data->Get(fun_name));
v8::TryCatch try_catch;
v8::Local<v8::Value> json_event = *fun->Call(api_event_data, 0, NULL);
v8::Local<v8::String> json_event_string;
if (!try_catch.HasCaught()) {
if (!json_event->IsUndefined()) {
json_event_string = json_event->ToString();
if (FLAG_trace_debug_json) {
PrintLn(json_event_string);
}
v8::String::Value val(json_event_string);
Vector<uint16_t> str(reinterpret_cast<uint16_t*>(*val),
json_event_string->Length());
SendMessage(str);
} else {
SendMessage(Vector<uint16_t>::empty());
}
} else {
PrintLn(try_catch.Exception());
return false;
message_handler_(message);
}
return true;
}
// Puts a command coming from the public API on the queue. Creates
// a copy of the command string managed by the debugger. Up to this
// point, the command data was managed by the API client. Called
// by the API client thread. This is where the API client hands off
// processing of the command to the DebugMessageThread thread.
// The new copy of the command is destroyed in HandleCommand().
void Debugger::ProcessCommand(Vector<const uint16_t> command) {
// Make a copy of the command. Need to cast away const for Clone to work.
Vector<uint16_t> command_copy =
// by the API client thread.
void Debugger::ProcessCommand(Vector<const uint16_t> command,
v8::Debug::ClientData* client_data) {
// Need to cast away const.
CommandMessage message = CommandMessage::New(
Vector<uint16_t>(const_cast<uint16_t*>(command.start()),
command.length()).Clone();
command.length()),
client_data);
Logger::DebugTag("Put command on command_queue.");
command_queue_.Put(command_copy);
command_queue_.Put(message);
command_received_->Signal();
// Set the debug command break flag to have the command processed.
@ -2046,23 +2025,6 @@ bool Debugger::HasCommands() {
}
void Debugger::ProcessHostDispatch(void* dispatch) {
// Puts a host dispatch comming from the public API on the queue.
uint16_t hack[3];
hack[0] = 0;
hack[1] = reinterpret_cast<uint32_t>(dispatch) >> 16;
hack[2] = reinterpret_cast<uint32_t>(dispatch) & 0xFFFF;
Logger::DebugTag("Put dispatch on command_queue.");
command_queue_.Put(Vector<uint16_t>(hack, 3).Clone());
command_received_->Signal();
// Set the debug command break flag to have the host dispatch processed.
if (!Debug::InDebugger()) {
StackGuard::DebugCommand();
}
}
bool Debugger::IsDebuggerActive() {
ScopedLock with(debugger_access_);
@ -2118,47 +2080,152 @@ void Debugger::StopAgent() {
}
void Debugger::TearDown() {
if (message_thread_ != NULL) {
message_thread_->Stop();
delete message_thread_;
message_thread_ = NULL;
}
MessageImpl MessageImpl::NewEvent(DebugEvent event,
bool running,
Handle<JSObject> exec_state,
Handle<JSObject> event_data) {
MessageImpl message(true, event, running,
exec_state, event_data, Handle<String>(), NULL);
return message;
}
MessageImpl MessageImpl::NewResponse(DebugEvent event,
bool running,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
Handle<String> response_json,
v8::Debug::ClientData* client_data) {
MessageImpl message(false, event, running,
exec_state, event_data, response_json, client_data);
return message;
}
MessageImpl::MessageImpl(bool is_event,
DebugEvent event,
bool running,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
Handle<String> response_json,
v8::Debug::ClientData* client_data)
: is_event_(is_event),
event_(event),
running_(running),
exec_state_(exec_state),
event_data_(event_data),
response_json_(response_json),
client_data_(client_data) {}
bool MessageImpl::IsEvent() const {
return is_event_;
}
bool MessageImpl::IsResponse() const {
return !is_event_;
}
DebugEvent MessageImpl::GetEvent() const {
return event_;
}
bool MessageImpl::WillStartRunning() const {
return running_;
}
v8::Handle<v8::Object> MessageImpl::GetExecutionState() const {
return v8::Utils::ToLocal(exec_state_);
}
v8::Handle<v8::Object> MessageImpl::GetEventData() const {
return v8::Utils::ToLocal(event_data_);
}
void DebugMessageThread::Run() {
// Sends debug events to an installed debugger message callback.
while (keep_running_) {
// Wait and Get are paired so that semaphore count equals queue length.
Debugger::message_received_->Wait();
Logger::DebugTag("Get message from event message_queue.");
Vector<uint16_t> message = Debugger::message_queue_.Get();
if (message.length() > 0) {
Debugger::InvokeMessageHandler(message);
v8::Handle<v8::String> MessageImpl::GetJSON() const {
v8::HandleScope scope;
if (IsEvent()) {
// Call toJSONProtocol on the debug event object.
Handle<Object> fun = GetProperty(event_data_, "toJSONProtocol");
if (!fun->IsJSFunction()) {
return v8::Handle<v8::String>();
}
bool caught_exception;
Handle<Object> json = Execution::TryCall(Handle<JSFunction>::cast(fun),
event_data_,
0, NULL, &caught_exception);
if (caught_exception || !json->IsString()) {
return v8::Handle<v8::String>();
}
return scope.Close(v8::Utils::ToLocal(Handle<String>::cast(json)));
} else {
return v8::Utils::ToLocal(response_json_);
}
}
void DebugMessageThread::Stop() {
keep_running_ = false;
Debugger::SendMessage(Vector<uint16_t>(NULL, 0));
Join();
v8::Handle<v8::Context> MessageImpl::GetEventContext() const {
return v8::Utils::ToLocal(Debug::debugger_entry()->GetContext());
}
v8::Debug::ClientData* MessageImpl::GetClientData() const {
return client_data_;
}
MessageQueue::MessageQueue(int size) : start_(0), end_(0), size_(size) {
messages_ = NewArray<Vector<uint16_t> >(size);
CommandMessage::CommandMessage() : text_(Vector<uint16_t>::empty()),
client_data_(NULL) {
}
MessageQueue::~MessageQueue() {
CommandMessage::CommandMessage(const Vector<uint16_t>& text,
v8::Debug::ClientData* data)
: text_(text),
client_data_(data) {
}
CommandMessage::~CommandMessage() {
}
void CommandMessage::Dispose() {
text_.Dispose();
delete client_data_;
client_data_ = NULL;
}
CommandMessage CommandMessage::New(const Vector<uint16_t>& command,
v8::Debug::ClientData* data) {
return CommandMessage(command.Clone(), data);
}
CommandMessageQueue::CommandMessageQueue(int size) : start_(0), end_(0),
size_(size) {
messages_ = NewArray<CommandMessage>(size);
}
CommandMessageQueue::~CommandMessageQueue() {
while (!IsEmpty()) {
CommandMessage m = Get();
m.Dispose();
}
DeleteArray(messages_);
}
Vector<uint16_t> MessageQueue::Get() {
CommandMessage CommandMessageQueue::Get() {
ASSERT(!IsEmpty());
int result = start_;
start_ = (start_ + 1) % size_;
@ -2166,7 +2233,7 @@ Vector<uint16_t> MessageQueue::Get() {
}
void MessageQueue::Put(const Vector<uint16_t>& message) {
void CommandMessageQueue::Put(const CommandMessage& message) {
if ((end_ + 1) % size_ == start_) {
Expand();
}
@ -2175,53 +2242,57 @@ void MessageQueue::Put(const Vector<uint16_t>& message) {
}
void MessageQueue::Expand() {
MessageQueue new_queue(size_ * 2);
void CommandMessageQueue::Expand() {
CommandMessageQueue new_queue(size_ * 2);
while (!IsEmpty()) {
new_queue.Put(Get());
}
Vector<uint16_t>* array_to_free = messages_;
CommandMessage* array_to_free = messages_;
*this = new_queue;
new_queue.messages_ = array_to_free;
// Make the new_queue empty so that it doesn't call Dispose on any messages.
new_queue.start_ = new_queue.end_;
// Automatic destructor called on new_queue, freeing array_to_free.
}
LockingMessageQueue::LockingMessageQueue(int size) : queue_(size) {
LockingCommandMessageQueue::LockingCommandMessageQueue(int size)
: queue_(size) {
lock_ = OS::CreateMutex();
}
LockingMessageQueue::~LockingMessageQueue() {
LockingCommandMessageQueue::~LockingCommandMessageQueue() {
delete lock_;
}
bool LockingMessageQueue::IsEmpty() const {
bool LockingCommandMessageQueue::IsEmpty() const {
ScopedLock sl(lock_);
return queue_.IsEmpty();
}
Vector<uint16_t> LockingMessageQueue::Get() {
CommandMessage LockingCommandMessageQueue::Get() {
ScopedLock sl(lock_);
Vector<uint16_t> result = queue_.Get();
Logger::DebugEvent("Get", result);
CommandMessage result = queue_.Get();
Logger::DebugEvent("Get", result.text());
return result;
}
void LockingMessageQueue::Put(const Vector<uint16_t>& message) {
void LockingCommandMessageQueue::Put(const CommandMessage& message) {
ScopedLock sl(lock_);
queue_.Put(message);
Logger::DebugEvent("Put", message);
Logger::DebugEvent("Put", message.text());
}
void LockingMessageQueue::Clear() {
void LockingCommandMessageQueue::Clear() {
ScopedLock sl(lock_);
queue_.Clear();
}
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal

188
deps/v8/src/debug.h

@ -25,10 +25,9 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_V8_DEBUG_H_
#define V8_V8_DEBUG_H_
#ifndef V8_DEBUG_H_
#define V8_DEBUG_H_
#include "../include/v8-debug.h"
#include "assembler.h"
#include "code-stubs.h"
#include "debug-agent.h"
@ -38,6 +37,8 @@
#include "string-stream.h"
#include "v8threads.h"
#ifdef ENABLE_DEBUGGER_SUPPORT
#include "../include/v8-debug.h"
namespace v8 { namespace internal {
@ -131,6 +132,10 @@ class BreakLocationIterator {
private:
void SetDebugBreak();
void ClearDebugBreak();
void SetDebugBreakAtIC();
void ClearDebugBreakAtIC();
bool IsDebugBreakAtReturn();
void SetDebugBreakAtReturn();
void ClearDebugBreakAtReturn();
@ -204,7 +209,7 @@ class Debug {
static bool IsBreakStub(Code* code);
// Find the builtin to use for invoking the debug break
static Handle<Code> FindDebugBreak(RelocInfo* rinfo);
static Handle<Code> FindDebugBreak(Handle<Code> code, RelocInfo::Mode mode);
static Handle<Object> GetSourceBreakLocations(
Handle<SharedFunctionInfo> shared);
@ -396,48 +401,117 @@ class Debug {
};
// A Queue of Vector<uint16_t> objects. A thread-safe version is
// LockingMessageQueue, based on this class.
class MessageQueue BASE_EMBEDDED {
// Message delivered to the message handler callback. This is either a debugger
// event or the response to a command.
class MessageImpl: public v8::Debug::Message {
public:
// Create a message object for a debug event.
static MessageImpl NewEvent(DebugEvent event,
bool running,
Handle<JSObject> exec_state,
Handle<JSObject> event_data);
// Create a message object for the response to a debug command.
static MessageImpl NewResponse(DebugEvent event,
bool running,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
Handle<String> response_json,
v8::Debug::ClientData* client_data);
// Implementation of interface v8::Debug::Message.
virtual bool IsEvent() const;
virtual bool IsResponse() const;
virtual DebugEvent GetEvent() const;
virtual bool WillStartRunning() const;
virtual v8::Handle<v8::Object> GetExecutionState() const;
virtual v8::Handle<v8::Object> GetEventData() const;
virtual v8::Handle<v8::String> GetJSON() const;
virtual v8::Handle<v8::Context> GetEventContext() const;
virtual v8::Debug::ClientData* GetClientData() const;
private:
MessageImpl(bool is_event,
DebugEvent event,
bool running,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
Handle<String> response_json,
v8::Debug::ClientData* client_data);
bool is_event_; // Does this message represent a debug event?
DebugEvent event_; // Debug event causing the break.
bool running_; // Will the VM start running after this event?
Handle<JSObject> exec_state_; // Current execution state.
Handle<JSObject> event_data_; // Data associated with the event.
Handle<String> response_json_; // Response JSON if message holds a response.
v8::Debug::ClientData* client_data_; // Client data passed with the request.
};
// Message send by user to v8 debugger or debugger output message.
// In addition to command text it may contain a pointer to some user data
// which are expected to be passed along with the command reponse to message
// handler.
class CommandMessage {
public:
explicit MessageQueue(int size);
~MessageQueue();
static CommandMessage New(const Vector<uint16_t>& command,
v8::Debug::ClientData* data);
CommandMessage();
~CommandMessage();
// Deletes user data and disposes of the text.
void Dispose();
Vector<uint16_t> text() const { return text_; }
v8::Debug::ClientData* client_data() const { return client_data_; }
private:
CommandMessage(const Vector<uint16_t>& text,
v8::Debug::ClientData* data);
Vector<uint16_t> text_;
v8::Debug::ClientData* client_data_;
};
// A Queue of CommandMessage objects. A thread-safe version is
// LockingCommandMessageQueue, based on this class.
class CommandMessageQueue BASE_EMBEDDED {
public:
explicit CommandMessageQueue(int size);
~CommandMessageQueue();
bool IsEmpty() const { return start_ == end_; }
Vector<uint16_t> Get();
void Put(const Vector<uint16_t>& message);
CommandMessage Get();
void Put(const CommandMessage& message);
void Clear() { start_ = end_ = 0; } // Queue is empty after Clear().
private:
// Doubles the size of the message queue, and copies the messages.
void Expand();
Vector<uint16_t>* messages_;
CommandMessage* messages_;
int start_;
int end_;
int size_; // The size of the queue buffer. Queue can hold size-1 messages.
};
// LockingMessageQueue is a thread-safe circular buffer of Vector<uint16_t>
// messages. The message data is not managed by LockingMessageQueue.
// LockingCommandMessageQueue is a thread-safe circular buffer of CommandMessage
// messages. The message data is not managed by LockingCommandMessageQueue.
// Pointers to the data are passed in and out. Implemented by adding a
// Mutex to MessageQueue. Includes logging of all puts and gets.
class LockingMessageQueue BASE_EMBEDDED {
// Mutex to CommandMessageQueue. Includes logging of all puts and gets.
class LockingCommandMessageQueue BASE_EMBEDDED {
public:
explicit LockingMessageQueue(int size);
~LockingMessageQueue();
explicit LockingCommandMessageQueue(int size);
~LockingCommandMessageQueue();
bool IsEmpty() const;
Vector<uint16_t> Get();
void Put(const Vector<uint16_t>& message);
CommandMessage Get();
void Put(const CommandMessage& message);
void Clear();
private:
MessageQueue queue_;
CommandMessageQueue queue_;
Mutex* lock_;
DISALLOW_COPY_AND_ASSIGN(LockingMessageQueue);
DISALLOW_COPY_AND_ASSIGN(LockingCommandMessageQueue);
};
class DebugMessageThread;
class Debugger {
public:
static void DebugRequest(const uint16_t* json_request, int length);
@ -465,36 +539,27 @@ class Debugger {
Handle<JSFunction> fun);
static void OnNewFunction(Handle<JSFunction> fun);
static void ProcessDebugEvent(v8::DebugEvent event,
Handle<Object> event_data,
Handle<JSObject> event_data,
bool auto_continue);
static void NotifyMessageHandler(v8::DebugEvent event,
Handle<Object> exec_state,
Handle<Object> event_data,
Handle<JSObject> exec_state,
Handle<JSObject> event_data,
bool auto_continue);
static void SetEventListener(Handle<Object> callback, Handle<Object> data);
static void SetMessageHandler(v8::DebugMessageHandler handler, void* data,
bool message_handler_thread);
static void TearDown();
static void SetHostDispatchHandler(v8::DebugHostDispatchHandler handler,
void* data);
static void SetMessageHandler(v8::Debug::MessageHandler2 handler);
static void SetHostDispatchHandler(v8::Debug::HostDispatchHandler handler,
int period);
// Invoke the message handler function.
static void InvokeMessageHandler(Vector< uint16_t> message);
// Send a message to the message handler eiher through the message thread or
// directly.
static void SendMessage(Vector<uint16_t> message);
// Send the JSON message for a debug event.
static bool SendEventMessage(Handle<Object> event_data);
static void InvokeMessageHandler(MessageImpl message);
// Add a debugger command to the command queue.
static void ProcessCommand(Vector<const uint16_t> command);
static void ProcessCommand(Vector<const uint16_t> command,
v8::Debug::ClientData* client_data = NULL);
// Check whether there are commands in the command queue.
static bool HasCommands();
static void ProcessHostDispatch(void* dispatch);
static Handle<Object> Call(Handle<JSFunction> fun,
Handle<Object> data,
bool* pending_exception);
@ -537,42 +602,18 @@ class Debugger {
static bool compiling_natives_; // Are we compiling natives?
static bool is_loading_debugger_; // Are we loading the debugger?
static bool never_unload_debugger_; // Can we unload the debugger?
static DebugMessageThread* message_thread_;
static v8::DebugMessageHandler message_handler_;
static v8::Debug::MessageHandler2 message_handler_;
static bool message_handler_cleared_; // Was message handler cleared?
static void* message_handler_data_;
static v8::DebugHostDispatchHandler host_dispatch_handler_;
static void* host_dispatch_handler_data_;
static v8::Debug::HostDispatchHandler host_dispatch_handler_;
static int host_dispatch_micros_;
static DebuggerAgent* agent_;
static const int kQueueInitialSize = 4;
static LockingMessageQueue command_queue_;
static LockingMessageQueue message_queue_;
static LockingCommandMessageQueue command_queue_;
static Semaphore* command_received_; // Signaled for each command received.
static Semaphore* message_received_; // Signalled for each message send.
friend class EnterDebugger;
friend class DebugMessageThread;
};
// Thread to read messages from the message queue and invoke the debug message
// handler in another thread as the V8 thread. This thread is started if the
// registration of the debug message handler requested to be called in a thread
// seperate from the V8 thread.
class DebugMessageThread: public Thread {
public:
DebugMessageThread() : keep_running_(true) {}
virtual ~DebugMessageThread() {}
// Main function of DebugMessageThread thread.
void Run();
void Stop();
private:
bool keep_running_;
DISALLOW_COPY_AND_ASSIGN(DebugMessageThread);
};
@ -646,6 +687,9 @@ class EnterDebugger BASE_EMBEDDED {
// Check whether there are any JavaScript frames on the stack.
inline bool HasJavaScriptFrames() { return has_js_frames_; }
// Get the active context from before entering the debugger.
inline Handle<Context> GetContext() { return save_.context(); }
private:
EnterDebugger* prev_; // Previous debugger entry if entered recursively.
JavaScriptFrameIterator it_;
@ -719,4 +763,6 @@ class Debug_Address {
} } // namespace v8::internal
#endif // V8_V8_DEBUG_H_
#endif // ENABLE_DEBUGGER_SUPPORT
#endif // V8_DEBUG_H_

26
deps/v8/src/execution.cc

@ -32,10 +32,12 @@
#include "api.h"
#include "codegen-inl.h"
#ifdef ARM
#include "simulator-arm.h"
#else // ia32
#include "simulator-ia32.h"
#if V8_TARGET_ARCH_IA32
#include "ia32/simulator-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/simulator-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/simulator-arm.h"
#endif
#include "debug.h"
@ -305,6 +307,7 @@ void StackGuard::Preempt() {
}
#ifdef ENABLE_DEBUGGER_SUPPORT
bool StackGuard::IsDebugBreak() {
ExecutionAccess access;
return thread_local_.interrupt_flags_ & DEBUGBREAK;
@ -331,7 +334,7 @@ void StackGuard::DebugCommand() {
set_limits(kInterruptLimit, access);
}
}
#endif
void StackGuard::Continue(InterruptFlag after_what) {
ExecutionAccess access;
@ -539,6 +542,7 @@ static Object* RuntimePreempt() {
ContextSwitcher::PreemptionReceived();
#ifdef ENABLE_DEBUGGER_SUPPORT
if (Debug::InDebugger()) {
// If currently in the debugger don't do any actual preemption but record
// that preemption occoured while in the debugger.
@ -548,11 +552,17 @@ static Object* RuntimePreempt() {
v8::Unlocker unlocker;
Thread::YieldCPU();
}
#else
// Perform preemption.
v8::Unlocker unlocker;
Thread::YieldCPU();
#endif
return Heap::undefined_value();
}
#ifdef ENABLE_DEBUGGER_SUPPORT
Object* Execution::DebugBreakHelper() {
// Just continue if breaks are disabled.
if (Debug::disable_break()) {
@ -598,12 +608,14 @@ Object* Execution::DebugBreakHelper() {
// Return to continue execution.
return Heap::undefined_value();
}
#endif
Object* Execution::HandleStackGuardInterrupt() {
#ifdef ENABLE_DEBUGGER_SUPPORT
if (StackGuard::IsDebugBreak() || StackGuard::IsDebugCommand()) {
DebugBreakHelper();
}
#endif
if (StackGuard::IsPreempted()) RuntimePreempt();
if (StackGuard::IsInterrupted()) {
// interrupt
@ -626,7 +638,7 @@ v8::Handle<v8::FunctionTemplate> GCExtension::GetNativeFunction(
v8::Handle<v8::Value> GCExtension::GC(const v8::Arguments& args) {
// All allocation spaces other than NEW_SPACE have the same effect.
Heap::CollectGarbage(0, OLD_DATA_SPACE);
Heap::CollectAllGarbage();
return v8::Undefined();
}

11
deps/v8/src/execution.h

@ -118,8 +118,9 @@ class Execution : public AllStatic {
Handle<JSFunction> fun,
Handle<Object> pos,
Handle<Object> is_global);
#ifdef ENABLE_DEBUGGER_SUPPORT
static Object* DebugBreakHelper();
#endif
// If the stack guard is triggered, but it is not an actual
// stack overflow, then handle the interruption accordingly.
@ -158,11 +159,13 @@ class StackGuard BASE_EMBEDDED {
static void Preempt();
static bool IsInterrupted();
static void Interrupt();
static bool IsDebugBreak();
static void Continue(InterruptFlag after_what);
#ifdef ENABLE_DEBUGGER_SUPPORT
static void DebugBreak();
static bool IsDebugCommand();
static void DebugCommand();
static void Continue(InterruptFlag after_what);
static bool IsDebugBreak();
static bool IsDebugCommand();
#endif
private:
// You should hold the ExecutionAccess lock when calling this method.

13
deps/v8/src/factory.cc

@ -167,14 +167,17 @@ Handle<Script> Factory::NewScript(Handle<String> source) {
Heap::SetLastScriptId(Smi::FromInt(id));
// Create and initialize script object.
Handle<Proxy> wrapper = Factory::NewProxy(0, TENURED);
Handle<Script> script = Handle<Script>::cast(NewStruct(SCRIPT_TYPE));
script->set_source(*source);
script->set_name(Heap::undefined_value());
script->set_id(Heap::last_script_id());
script->set_line_offset(Smi::FromInt(0));
script->set_column_offset(Smi::FromInt(0));
script->set_data(Heap::undefined_value());
script->set_context_data(Heap::undefined_value());
script->set_type(Smi::FromInt(SCRIPT_TYPE_NORMAL));
script->set_wrapper(*Factory::NewProxy(0, TENURED));
script->set_wrapper(*wrapper);
script->set_line_ends(Heap::undefined_value());
return script;
@ -207,14 +210,14 @@ Handle<JSObject> Factory::NewFunctionPrototype(Handle<JSFunction> function) {
}
Handle<Map> Factory::CopyMap(Handle<Map> src) {
CALL_HEAP_FUNCTION(src->Copy(), Map);
Handle<Map> Factory::CopyMapDropDescriptors(Handle<Map> src) {
CALL_HEAP_FUNCTION(src->CopyDropDescriptors(), Map);
}
Handle<Map> Factory::CopyMap(Handle<Map> src,
int extra_inobject_properties) {
Handle<Map> copy = CopyMap(src);
Handle<Map> copy = CopyMapDropDescriptors(src);
// Check that we do not overflow the instance size when adding the
// extra inobject properties.
int instance_size_delta = extra_inobject_properties * kPointerSize;
@ -671,6 +674,7 @@ Handle<Object> Factory::ToObject(Handle<Object> object,
}
#ifdef ENABLE_DEBUGGER_SUPPORT
Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
// Get the original code of the function.
Handle<Code> code(shared->code());
@ -700,6 +704,7 @@ Handle<DebugInfo> Factory::NewDebugInfo(Handle<SharedFunctionInfo> shared) {
return debug_info;
}
#endif
Handle<JSObject> Factory::NewArgumentsObject(Handle<Object> callee,

5
deps/v8/src/factory.h

@ -153,7 +153,7 @@ class Factory : public AllStatic {
static Handle<JSObject> NewFunctionPrototype(Handle<JSFunction> function);
static Handle<Map> CopyMap(Handle<Map> map);
static Handle<Map> CopyMapDropDescriptors(Handle<Map> map);
// Copy the map adding more inobject properties if possible without
// overflowing the instance size.
@ -310,8 +310,9 @@ class Factory : public AllStatic {
uint32_t key,
Handle<Object> value);
#ifdef ENABLE_DEBUGGER_SUPPORT
static Handle<DebugInfo> NewDebugInfo(Handle<SharedFunctionInfo> shared);
#endif
// Return a map using the map cache in the global context.
// The key the an ordered set of property names.

12
deps/v8/src/frames-inl.h

@ -29,12 +29,14 @@
#define V8_FRAMES_INL_H_
#include "frames.h"
#ifdef ARM
#include "frames-arm.h"
#else
#include "frames-ia32.h"
#endif
#if V8_TARGET_ARCH_IA32
#include "ia32/frames-ia32.h"
#elif V8_TARGET_ARCH_X64
#include "x64/frames-x64.h"
#elif V8_TARGET_ARCH_ARM
#include "arm/frames-arm.h"
#endif
namespace v8 { namespace internal {

8
deps/v8/src/frames.cc

@ -647,10 +647,10 @@ void EntryFrame::Iterate(ObjectVisitor* v) const {
handler->Iterate(v);
// Make sure that there's the entry frame does not contain more than
// one stack handler.
if (kDebug) {
it.Advance();
ASSERT(it.done());
}
#ifdef DEBUG
it.Advance();
ASSERT(it.done());
#endif
}

9
deps/v8/src/func-name-inferrer.cc

@ -63,11 +63,12 @@ Handle<String> FuncNameInferrer::MakeNameFromStackHelper(int pos,
}
void FuncNameInferrer::MaybeInferFunctionName() {
if (func_to_infer_ != NULL) {
func_to_infer_->set_inferred_name(MakeNameFromStack());
func_to_infer_ = NULL;
void FuncNameInferrer::InferFunctionsNames() {
Handle<String> func_name = MakeNameFromStack();
for (int i = 0; i < funcs_to_infer_.length(); ++i) {
funcs_to_infer_[i]->set_inferred_name(func_name);
}
funcs_to_infer_.Rewind(0);
}

23
deps/v8/src/func-name-inferrer.h

@ -45,7 +45,7 @@ class FuncNameInferrer BASE_EMBEDDED {
FuncNameInferrer() :
entries_stack_(10),
names_stack_(5),
func_to_infer_(NULL),
funcs_to_infer_(4),
dot_(Factory::NewStringFromAscii(CStrVector("."))) {
}
@ -57,39 +57,34 @@ class FuncNameInferrer BASE_EMBEDDED {
entries_stack_.Add(names_stack_.length());
}
void Leave() {
ASSERT(IsOpen());
names_stack_.Rewind(entries_stack_.RemoveLast());
}
void PushName(Handle<String> name) {
if (IsOpen()) {
names_stack_.Add(name);
}
}
void SetFuncToInfer(FunctionLiteral* func_to_infer) {
void AddFunction(FunctionLiteral* func_to_infer) {
if (IsOpen()) {
// If we encounter another function literal after already having
// encountered one, the second one replaces the first.
func_to_infer_ = func_to_infer;
funcs_to_infer_.Add(func_to_infer);
}
}
void InferAndLeave() {
ASSERT(IsOpen());
MaybeInferFunctionName();
Leave();
if (!funcs_to_infer_.is_empty()) {
InferFunctionsNames();
}
names_stack_.Rewind(entries_stack_.RemoveLast());
}
private:
Handle<String> MakeNameFromStack();
Handle<String> MakeNameFromStackHelper(int pos, Handle<String> prev);
void MaybeInferFunctionName();
void InferFunctionsNames();
List<int> entries_stack_;
List<Handle<String> > names_stack_;
FunctionLiteral* func_to_infer_;
List<FunctionLiteral*> funcs_to_infer_;
Handle<String> dot_;
DISALLOW_COPY_AND_ASSIGN(FuncNameInferrer);

2
deps/v8/src/global-handles.cc

@ -258,7 +258,7 @@ void GlobalHandles::IterateWeakRoots(ObjectVisitor* v) {
}
void GlobalHandles::MarkWeakRoots(WeakSlotCallback f) {
void GlobalHandles::IdentifyWeakHandles(WeakSlotCallback f) {
for (Node* current = head_; current != NULL; current = current->next()) {
if (current->state_ == Node::WEAK) {
if (f(&current->object_)) {

5
deps/v8/src/global-handles.h

@ -98,8 +98,9 @@ class GlobalHandles : public AllStatic {
// Iterates over all weak roots in heap.
static void IterateWeakRoots(ObjectVisitor* v);
// Mark the weak pointers based on the callback.
static void MarkWeakRoots(WeakSlotCallback f);
// Find all weak handles satisfying the callback predicate, mark
// them as pending.
static void IdentifyWeakHandles(WeakSlotCallback f);
// Add an object group.
// Should only used in GC callback function before a collection.

110
deps/v8/src/globals.h

@ -28,27 +28,27 @@
#ifndef V8_GLOBALS_H_
#define V8_GLOBALS_H_
// -----------------------------------------------------------------------------
// Types
// Visual Studio C++ is missing the stdint.h header file. Instead we define
// standard integer types for Windows here.
#ifdef _MSC_VER
typedef signed char int8_t;
typedef unsigned char uint8_t;
typedef short int16_t; // NOLINT
typedef unsigned short uint16_t; // NOLINT
typedef int int32_t;
typedef unsigned int uint32_t;
typedef __int64 int64_t;
typedef unsigned __int64 uint64_t;
#else // _MSC_VER
#include <stdint.h> // for intptr_t
#endif // _MSC_VER
namespace v8 { namespace internal {
// Processor architecture detection. For more info on what's defined, see:
// http://msdn.microsoft.com/en-us/library/b0084kay.aspx
// http://www.agner.org/optimize/calling_conventions.pdf
// or with gcc, run: "echo | gcc -E -dM -"
#if defined(_M_X64) || defined(__x86_64__)
#define V8_HOST_ARCH_X64 1
#define V8_HOST_ARCH_64_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(_M_IX86) || defined(__i386__)
#define V8_HOST_ARCH_IA32 1
#define V8_HOST_ARCH_32_BIT 1
#define V8_HOST_CAN_READ_UNALIGNED 1
#elif defined(__ARMEL__)
#define V8_HOST_ARCH_ARM 1
#define V8_HOST_ARCH_32_BIT 1
#else
#error Your architecture was not detected as supported by v8
#endif
// Support for alternative bool type. This is only enabled if the code is
// compiled with USE_MYBOOL defined. This catches some nasty type bugs.
// For instance, 'bool b = "false";' results in b == true! This is a hidden
@ -69,58 +69,73 @@ typedef unsigned int __my_bool__;
typedef uint8_t byte;
typedef byte* Address;
// Define our own macros for writing 64-bit constants. This is less fragile
// than defining __STDC_CONSTANT_MACROS before including <stdint.h>, and it
// works on compilers that don't have it (like MSVC).
#if V8_HOST_ARCH_64_BIT
#ifdef _MSC_VER
#define V8_UINT64_C(x) (x ## UI64)
#define V8_INT64_C(x) (x ## I64)
#define V8_PTR_PREFIX "ll"
#else
#define V8_UINT64_C(x) (x ## UL)
#define V8_INT64_C(x) (x ## L)
#define V8_PTR_PREFIX "l"
#endif
#else // V8_HOST_ARCH_64_BIT
#define V8_PTR_PREFIX ""
#endif
#define V8PRIp V8_PTR_PREFIX "x"
// Code-point values in Unicode 4.0 are 21 bits wide.
typedef uint16_t uc16;
typedef signed int uc32;
#ifndef ARM
#define CAN_READ_UNALIGNED 1
#endif
typedef int32_t uc32;
// -----------------------------------------------------------------------------
// Constants
#ifdef DEBUG
const bool kDebug = true;
#else
const bool kDebug = false;
#endif // DEBUG
const int KB = 1024;
const int MB = KB * KB;
const int GB = KB * KB * KB;
const int kMaxInt = 0x7FFFFFFF;
const int kMinInt = -kMaxInt - 1;
const uint32_t kMaxUInt32 = 0xFFFFFFFFu;
const int kCharSize = sizeof(char); // NOLINT
const int kShortSize = sizeof(short); // NOLINT
const int kIntSize = sizeof(int); // NOLINT
const int kDoubleSize = sizeof(double); // NOLINT
const int kPointerSize = sizeof(void*); // NOLINT
#if V8_HOST_ARCH_64_BIT
const int kPointerSizeLog2 = 3;
#else
const int kPointerSizeLog2 = 2;
#endif
const int kObjectAlignmentBits = 2;
const int kObjectAlignmentMask = (1 << kObjectAlignmentBits) - 1;
const int kObjectAlignment = 1 << kObjectAlignmentBits;
const int kObjectAlignmentBits = kPointerSizeLog2;
const intptr_t kObjectAlignmentMask = (1 << kObjectAlignmentBits) - 1;
const intptr_t kObjectAlignment = 1 << kObjectAlignmentBits;
// Tag information for HeapObject.
const int kHeapObjectTag = 1;
const int kHeapObjectTagSize = 2;
const int kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
const intptr_t kHeapObjectTagMask = (1 << kHeapObjectTagSize) - 1;
// Tag information for Smi.
const int kSmiTag = 0;
const int kSmiTagSize = 1;
const int kSmiTagMask = (1 << kSmiTagSize) - 1;
const intptr_t kSmiTagMask = (1 << kSmiTagSize) - 1;
// Tag information for Failure.
const int kFailureTag = 3;
const int kFailureTagSize = 2;
const int kFailureTagMask = (1 << kFailureTagSize) - 1;
const intptr_t kFailureTagMask = (1 << kFailureTagSize) - 1;
const int kBitsPerByte = 8;
@ -129,11 +144,21 @@ const int kBitsPerPointer = kPointerSize * kBitsPerByte;
const int kBitsPerInt = kIntSize * kBitsPerByte;
// Zap-value: The value used for zapping dead objects. Should be a recognizable
// illegal heap object pointer.
// Zap-value: The value used for zapping dead objects.
// Should be a recognizable hex value tagged as a heap object pointer.
#ifdef V8_HOST_ARCH_64_BIT
const Address kZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0xdeadbeedbeadbeed));
const Address kHandleZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1baddead0baddead));
const Address kFromSpaceZapValue =
reinterpret_cast<Address>(V8_UINT64_C(0x1beefdad0beefdad));
#else
const Address kZapValue = reinterpret_cast<Address>(0xdeadbeed);
const Address kHandleZapValue = reinterpret_cast<Address>(0xbaddead);
const Address kFromSpaceZapValue = reinterpret_cast<Address>(0xbeefdad);
#endif
// -----------------------------------------------------------------------------
// Forward declarations for frequently used classes
@ -146,7 +171,6 @@ class Assembler;
class BreakableStatement;
class Code;
class CodeGenerator;
class CodeRegion;
class CodeStub;
class Context;
class Debug;
@ -377,13 +401,13 @@ enum StateTag {
// Testers for test.
#define HAS_SMI_TAG(value) \
((reinterpret_cast<int>(value) & kSmiTagMask) == kSmiTag)
((reinterpret_cast<intptr_t>(value) & kSmiTagMask) == kSmiTag)
#define HAS_FAILURE_TAG(value) \
((reinterpret_cast<int>(value) & kFailureTagMask) == kFailureTag)
((reinterpret_cast<intptr_t>(value) & kFailureTagMask) == kFailureTag)
#define HAS_HEAP_OBJECT_TAG(value) \
((reinterpret_cast<int>(value) & kHeapObjectTagMask) == kHeapObjectTag)
((reinterpret_cast<intptr_t>(value) & kHeapObjectTagMask) == kHeapObjectTag)
// OBJECT_SIZE_ALIGN returns the value aligned HeapObject size
#define OBJECT_SIZE_ALIGN(value) \
@ -492,7 +516,7 @@ F FUNCTION_CAST(Address addr) {
// exception'.
//
// Bit_cast uses the memcpy exception to move the bits from a variable of one
// type o a variable of another type. Of course the end result is likely to
// type of a variable of another type. Of course the end result is likely to
// be implementation dependent. Most compilers (gcc-4.2 and MSVC 2005)
// will completely optimize bit_cast away.
//

72
deps/v8/src/handles.cc

@ -212,10 +212,20 @@ Handle<Object> SetProperty(Handle<Object> object,
}
Handle<Object> IgnoreAttributesAndSetLocalProperty(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes) {
Handle<Object> ForceSetProperty(Handle<JSObject> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes) {
CALL_HEAP_FUNCTION(
Runtime::ForceSetObjectProperty(object, key, value, attributes), Object);
}
Handle<Object> IgnoreAttributesAndSetLocalProperty(
Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
PropertyAttributes attributes) {
CALL_HEAP_FUNCTION(object->
IgnoreAttributesAndSetLocalProperty(*key, *value, attributes), Object);
}
@ -491,17 +501,6 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object) {
break;
}
// Compute the property keys.
content = UnionOfKeys(content, GetEnumPropertyKeys(current));
// Add the property keys from the interceptor.
if (current->HasNamedInterceptor()) {
v8::Handle<v8::Array> result =
GetKeysForNamedInterceptor(object, current);
if (!result.IsEmpty())
content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
}
// Compute the element keys.
Handle<FixedArray> element_keys =
Factory::NewFixedArray(current->NumberOfEnumElements());
@ -515,6 +514,17 @@ Handle<FixedArray> GetKeysInFixedArrayFor(Handle<JSObject> object) {
if (!result.IsEmpty())
content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
}
// Compute the property keys.
content = UnionOfKeys(content, GetEnumPropertyKeys(current));
// Add the property keys from the interceptor.
if (current->HasNamedInterceptor()) {
v8::Handle<v8::Array> result =
GetKeysForNamedInterceptor(object, current);
if (!result.IsEmpty())
content = AddKeysFromJSArray(content, v8::Utils::OpenHandle(*result));
}
}
}
return content;
@ -549,7 +559,7 @@ Handle<FixedArray> GetEnumPropertyKeys(Handle<JSObject> object) {
index++;
}
}
(*storage)->SortPairs(*sort_array);
(*storage)->SortPairs(*sort_array, sort_array->length());
Handle<FixedArray> bridge_storage =
Factory::NewFixedArray(DescriptorArray::kEnumCacheBridgeLength);
DescriptorArray* desc = object->map()->instance_descriptors();
@ -617,9 +627,9 @@ OptimizedObjectForAddingMultipleProperties::
}
void LoadLazy(Handle<JSFunction> fun, bool* pending_exception) {
void LoadLazy(Handle<JSObject> obj, bool* pending_exception) {
HandleScope scope;
Handle<FixedArray> info(FixedArray::cast(fun->shared()->lazy_load_data()));
Handle<FixedArray> info(FixedArray::cast(obj->map()->constructor()));
int index = Smi::cast(info->get(0))->value();
ASSERT(index >= 0);
Handle<Context> compile_context(Context::cast(info->get(1)));
@ -651,6 +661,7 @@ void LoadLazy(Handle<JSFunction> fun, bool* pending_exception) {
// We shouldn't get here if compiling the script failed.
ASSERT(!boilerplate.is_null());
#ifdef ENABLE_DEBUGGER_SUPPORT
// When the debugger running in its own context touches lazy loaded
// functions loading can be triggered. In that case ensure that the
// execution of the boilerplate is in the correct context.
@ -659,30 +670,43 @@ void LoadLazy(Handle<JSFunction> fun, bool* pending_exception) {
Top::context() == *Debug::debug_context()) {
Top::set_context(*compile_context);
}
#endif
// Reset the lazy load data before running the script to make sure
// not to get recursive lazy loading.
fun->shared()->set_lazy_load_data(Heap::undefined_value());
obj->map()->set_needs_loading(false);
obj->map()->set_constructor(info->get(3));
// Run the script.
Handle<JSFunction> script_fun(
Factory::NewFunctionFromBoilerplate(boilerplate, function_context));
Execution::Call(script_fun, receiver, 0, NULL, pending_exception);
// If lazy loading failed, restore the unloaded state of fun.
if (*pending_exception) fun->shared()->set_lazy_load_data(*info);
// If lazy loading failed, restore the unloaded state of obj.
if (*pending_exception) {
obj->map()->set_needs_loading(true);
obj->map()->set_constructor(*info);
}
}
void SetupLazy(Handle<JSFunction> fun,
void SetupLazy(Handle<JSObject> obj,
int index,
Handle<Context> compile_context,
Handle<Context> function_context) {
Handle<FixedArray> arr = Factory::NewFixedArray(3);
Handle<FixedArray> arr = Factory::NewFixedArray(4);
arr->set(0, Smi::FromInt(index));
arr->set(1, *compile_context); // Compile in this context
arr->set(2, *function_context); // Set function context to this
fun->shared()->set_lazy_load_data(*arr);
arr->set(3, obj->map()->constructor()); // Remember the constructor
Handle<Map> old_map(obj->map());
Handle<Map> new_map = Factory::CopyMapDropTransitions(old_map);
obj->set_map(*new_map);
new_map->set_needs_loading(true);
// Store the lazy loading info in the constructor field. We'll
// reestablish the constructor from the fixed array after loading.
new_map->set_constructor(*arr);
ASSERT(!obj->IsLoaded());
}
} } // namespace v8::internal

9
deps/v8/src/handles.h

@ -196,6 +196,11 @@ Handle<Object> SetProperty(Handle<Object> object,
Handle<Object> value,
PropertyAttributes attributes);
Handle<Object> ForceSetProperty(Handle<JSObject> object,
Handle<Object> key,
Handle<Object> value,
PropertyAttributes attributes);
Handle<Object> IgnoreAttributesAndSetLocalProperty(Handle<JSObject> object,
Handle<String> key,
Handle<Object> value,
@ -296,11 +301,11 @@ bool CompileLazy(Handle<JSFunction> function, ClearExceptionFlag flag);
bool CompileLazyInLoop(Handle<JSFunction> function, ClearExceptionFlag flag);
// These deal with lazily loaded properties.
void SetupLazy(Handle<JSFunction> fun,
void SetupLazy(Handle<JSObject> obj,
int index,
Handle<Context> compile_context,
Handle<Context> function_context);
void LoadLazy(Handle<JSFunction> fun, bool* pending_exception);
void LoadLazy(Handle<JSObject> obj, bool* pending_exception);
class NoHandleAllocation BASE_EMBEDDED {
public:

4
deps/v8/src/heap-inl.h

@ -251,11 +251,11 @@ void Heap::SetLastScriptId(Object* last_script_id) {
__object__ = FUNCTION_CALL; \
} \
if (!__object__->IsFailure()) RETURN_VALUE; \
if (__object__->IsOutOfMemoryFailure()) { \
if (__object__->IsOutOfMemoryFailure() || \
__object__->IsRetryAfterGC()) { \
/* TODO(1181417): Fix this. */ \
v8::internal::V8::FatalProcessOutOfMemory("CALL_AND_RETRY_2"); \
} \
ASSERT(!__object__->IsRetryAfterGC()); \
RETURN_EMPTY; \
} while (false)

161
deps/v8/src/heap.cc

@ -538,7 +538,7 @@ class ScavengeVisitor: public ObjectVisitor {
// Shared state read by the scavenge collector and set by ScavengeObject.
static Address promoted_top = NULL;
static Address promoted_rear = NULL;
#ifdef DEBUG
@ -554,24 +554,34 @@ class VerifyNonPointerSpacePointersVisitor: public ObjectVisitor {
}
}
};
#endif
void Heap::Scavenge() {
#ifdef DEBUG
if (FLAG_enable_slow_asserts) {
VerifyNonPointerSpacePointersVisitor v;
HeapObjectIterator it(code_space_);
while (it.has_next()) {
HeapObject* object = it.next();
if (object->IsCode()) {
Code::cast(object)->ConvertICTargetsFromAddressToObject();
}
static void VerifyNonPointerSpacePointers() {
// Verify that there are no pointers to new space in spaces where we
// do not expect them.
VerifyNonPointerSpacePointersVisitor v;
HeapObjectIterator code_it(Heap::code_space());
while (code_it.has_next()) {
HeapObject* object = code_it.next();
if (object->IsCode()) {
Code::cast(object)->ConvertICTargetsFromAddressToObject();
object->Iterate(&v);
Code::cast(object)->ConvertICTargetsFromObjectToAddress();
} else {
// If we find non-code objects in code space (e.g., free list
// nodes) we want to verify them as well.
object->Iterate(&v);
if (object->IsCode()) {
Code::cast(object)->ConvertICTargetsFromObjectToAddress();
}
}
}
HeapObjectIterator data_it(Heap::old_data_space());
while (data_it.has_next()) data_it.next()->Iterate(&v);
}
#endif
void Heap::Scavenge() {
#ifdef DEBUG
if (FLAG_enable_slow_asserts) VerifyNonPointerSpacePointers();
#endif
gc_state_ = SCAVENGE;
@ -596,72 +606,70 @@ void Heap::Scavenge() {
new_space_.Flip();
new_space_.ResetAllocationInfo();
// We need to sweep newly copied objects which can be in either the to space
// or the old space. For to space objects, we use a mark. Newly copied
// objects lie between the mark and the allocation top. For objects
// promoted to old space, we write their addresses downward from the top of
// the new space. Sweeping newly promoted objects requires an allocation
// pointer and a mark. Note that the allocation pointer 'top' actually
// moves downward from the high address in the to space.
// We need to sweep newly copied objects which can be either in the
// to space or promoted to the old generation. For to-space
// objects, we treat the bottom of the to space as a queue. Newly
// copied and unswept objects lie between a 'front' mark and the
// allocation pointer.
//
// Promoted objects can go into various old-generation spaces, and
// can be allocated internally in the spaces (from the free list).
// We treat the top of the to space as a queue of addresses of
// promoted objects. The addresses of newly promoted and unswept
// objects lie between a 'front' mark and a 'rear' mark that is
// updated as a side effect of promoting an object.
//
// There is guaranteed to be enough room at the top of the to space for the
// addresses of promoted objects: every object promoted frees up its size in
// bytes from the top of the new space, and objects are at least one pointer
// in size. Using the new space to record promoted addresses makes the
// scavenge collector agnostic to the allocation strategy (eg, linear or
// free-list) used in old space.
Address new_mark = new_space_.ToSpaceLow();
Address promoted_mark = new_space_.ToSpaceHigh();
promoted_top = new_space_.ToSpaceHigh();
// There is guaranteed to be enough room at the top of the to space
// for the addresses of promoted objects: every object promoted
// frees up its size in bytes from the top of the new space, and
// objects are at least one pointer in size.
Address new_space_front = new_space_.ToSpaceLow();
Address promoted_front = new_space_.ToSpaceHigh();
promoted_rear = new_space_.ToSpaceHigh();
ScavengeVisitor scavenge_visitor;
// Copy roots.
IterateRoots(&scavenge_visitor);
// Copy objects reachable from the old generation. By definition, there
// are no intergenerational pointers in code or data spaces.
// Copy objects reachable from weak pointers.
GlobalHandles::IterateWeakRoots(&scavenge_visitor);
// Copy objects reachable from the old generation. By definition,
// there are no intergenerational pointers in code or data spaces.
IterateRSet(old_pointer_space_, &ScavengePointer);
IterateRSet(map_space_, &ScavengePointer);
lo_space_->IterateRSet(&ScavengePointer);
bool has_processed_weak_pointers = false;
while (true) {
ASSERT(new_mark <= new_space_.top());
ASSERT(promoted_mark >= promoted_top);
do {
ASSERT(new_space_front <= new_space_.top());
ASSERT(promoted_front >= promoted_rear);
// The addresses new_space_front and new_space_.top() define a
// queue of unprocessed copied objects. Process them until the
// queue is empty.
while (new_space_front < new_space_.top()) {
HeapObject* object = HeapObject::FromAddress(new_space_front);
object->Iterate(&scavenge_visitor);
new_space_front += object->Size();
}
// Copy objects reachable from newly copied objects.
while (new_mark < new_space_.top() || promoted_mark > promoted_top) {
// Sweep newly copied objects in the to space. The allocation pointer
// can change during sweeping.
Address previous_top = new_space_.top();
SemiSpaceIterator new_it(new_space(), new_mark);
while (new_it.has_next()) {
new_it.next()->Iterate(&scavenge_visitor);
}
new_mark = previous_top;
// Sweep newly copied objects in the old space. The promotion 'top'
// pointer could change during sweeping.
previous_top = promoted_top;
for (Address current = promoted_mark - kPointerSize;
current >= previous_top;
current -= kPointerSize) {
HeapObject* object = HeapObject::cast(Memory::Object_at(current));
object->Iterate(&scavenge_visitor);
UpdateRSet(object);
}
promoted_mark = previous_top;
// The addresses promoted_front and promoted_rear define a queue
// of unprocessed addresses of promoted objects. Process them
// until the queue is empty.
while (promoted_front > promoted_rear) {
promoted_front -= kPointerSize;
HeapObject* object =
HeapObject::cast(Memory::Object_at(promoted_front));
object->Iterate(&scavenge_visitor);
UpdateRSet(object);
}
if (has_processed_weak_pointers) break; // We are done.
// Copy objects reachable from weak pointers.
GlobalHandles::IterateWeakRoots(&scavenge_visitor);
has_processed_weak_pointers = true;
}
// Take another spin if there are now unswept objects in new space
// (there are currently no more unswept promoted objects).
} while (new_space_front < new_space_.top());
// Set age mark.
new_space_.set_age_mark(new_mark);
new_space_.set_age_mark(new_space_.top());
LOG(ResourceEvent("scavenge", "end"));
@ -882,8 +890,8 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
if (target_space == Heap::old_pointer_space_) {
// Record the object's address at the top of the to space, to allow
// it to be swept by the scavenger.
promoted_top -= kPointerSize;
Memory::Object_at(promoted_top) = *p;
promoted_rear -= kPointerSize;
Memory::Object_at(promoted_rear) = *p;
} else {
#ifdef DEBUG
// Objects promoted to the data space should not have pointers to
@ -939,6 +947,7 @@ Object* Heap::AllocateMap(InstanceType instance_type, int instance_size) {
map->set_code_cache(empty_fixed_array());
map->set_unused_property_fields(0);
map->set_bit_field(0);
map->set_bit_field2(0);
return map;
}
@ -1409,7 +1418,6 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_formal_parameter_count(0);
share->set_instance_class_name(Object_symbol());
share->set_function_data(undefined_value());
share->set_lazy_load_data(undefined_value());
share->set_script(undefined_value());
share->set_start_position_and_type(0);
share->set_debug_info(undefined_value());
@ -1423,8 +1431,8 @@ Object* Heap::AllocateConsString(String* first,
int first_length = first->length();
int second_length = second->length();
int length = first_length + second_length;
bool is_ascii = StringShape(first).IsAsciiRepresentation()
&& StringShape(second).IsAsciiRepresentation();
bool is_ascii = first->IsAsciiRepresentation()
&& second->IsAsciiRepresentation();
// If the resulting string is small make a flat string.
if (length < String::kMinNonFlatLength) {
@ -1484,15 +1492,15 @@ Object* Heap::AllocateSlicedString(String* buffer,
Map* map;
if (length <= String::kMaxShortStringSize) {
map = StringShape(buffer).IsAsciiRepresentation() ?
map = buffer->IsAsciiRepresentation() ?
short_sliced_ascii_string_map() :
short_sliced_string_map();
} else if (length <= String::kMaxMediumStringSize) {
map = StringShape(buffer).IsAsciiRepresentation() ?
map = buffer->IsAsciiRepresentation() ?
medium_sliced_ascii_string_map() :
medium_sliced_string_map();
} else {
map = StringShape(buffer).IsAsciiRepresentation() ?
map = buffer->IsAsciiRepresentation() ?
long_sliced_ascii_string_map() :
long_sliced_string_map();
}
@ -1524,7 +1532,7 @@ Object* Heap::AllocateSubString(String* buffer,
buffer->TryFlatten();
}
Object* result = StringShape(buffer).IsAsciiRepresentation()
Object* result = buffer->IsAsciiRepresentation()
? AllocateRawAsciiString(length)
: AllocateRawTwoByteString(length);
if (result->IsFailure()) return result;
@ -2679,7 +2687,10 @@ void Heap::IterateStrongRoots(ObjectVisitor* v) {
SYNCHRONIZE_TAG("bootstrapper");
Top::Iterate(v);
SYNCHRONIZE_TAG("top");
#ifdef ENABLE_DEBUGGER_SUPPORT
Debug::Iterate(v);
#endif
SYNCHRONIZE_TAG("debug");
CompilationCache::Iterate(v);
SYNCHRONIZE_TAG("compilationcache");

6
deps/v8/src/assembler-ia32-inl.h → deps/v8/src/ia32/assembler-ia32-inl.h

@ -34,8 +34,8 @@
// A light-weight IA32 Assembler.
#ifndef V8_ASSEMBLER_IA32_INL_H_
#define V8_ASSEMBLER_IA32_INL_H_
#ifndef V8_IA32_ASSEMBLER_IA32_INL_H_
#define V8_IA32_ASSEMBLER_IA32_INL_H_
#include "cpu.h"
@ -299,4 +299,4 @@ Operand::Operand(int32_t disp, RelocInfo::Mode rmode) {
} } // namespace v8::internal
#endif // V8_ASSEMBLER_IA32_INL_H_
#endif // V8_IA32_ASSEMBLER_IA32_INL_H_

43
deps/v8/src/assembler-ia32.cc → deps/v8/src/ia32/assembler-ia32.cc

@ -283,6 +283,10 @@ bool Operand::is_reg(Register reg) const {
*pc_++ = (x)
#ifdef GENERATED_CODE_COVERAGE
static void InitCoverageLog();
#endif
// spare_buffer_
static byte* spare_buffer_ = NULL;
@ -315,9 +319,11 @@ Assembler::Assembler(void* buffer, int buffer_size) {
// Clear the buffer in debug mode unless it was provided by the
// caller in which case we can't be sure it's okay to overwrite
// existing code in it; see CodePatcher::CodePatcher(...).
if (kDebug && own_buffer_) {
#ifdef DEBUG
if (own_buffer_) {
memset(buffer_, 0xCC, buffer_size); // int3
}
#endif
// setup buffer pointers
ASSERT(buffer_ != NULL);
@ -329,6 +335,9 @@ Assembler::Assembler(void* buffer, int buffer_size) {
current_position_ = RelocInfo::kNoPosition;
written_statement_position_ = current_statement_position_;
written_position_ = current_position_;
#ifdef GENERATED_CODE_COVERAGE
InitCoverageLog();
#endif
}
@ -2073,9 +2082,9 @@ void Assembler::GrowBuffer() {
// Clear the buffer in debug mode. Use 'int3' instructions to make
// sure to get into problems if we ever run uninitialized code.
if (kDebug) {
memset(desc.buffer, 0xCC, desc.buffer_size);
}
#ifdef DEBUG
memset(desc.buffer, 0xCC, desc.buffer_size);
#endif
// copy the data
int pc_delta = desc.buffer - buffer_;
@ -2202,4 +2211,30 @@ void Assembler::WriteInternalReference(int position, const Label& bound_label) {
long_at_put(position, label_loc);
}
#ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL;
static void InitCoverageLog() {
char* file_name = getenv("V8_GENERATED_CODE_COVERAGE_LOG");
if (file_name != NULL) {
coverage_log = fopen(file_name, "aw+");
}
}
void LogGeneratedCodeCoverage(const char* file_line) {
const char* return_address = (&file_line)[-1];
char* push_insn = const_cast<char*>(return_address - 12);
push_insn[0] = 0xeb; // Relative branch insn.
push_insn[1] = 13; // Skip over coverage insns.
if (coverage_log != NULL) {
fprintf(coverage_log, "%s\n", file_line);
fflush(coverage_log);
}
}
#endif
} } // namespace v8::internal

6
deps/v8/src/assembler-ia32.h → deps/v8/src/ia32/assembler-ia32.h

@ -34,8 +34,8 @@
// A light-weight IA32 Assembler.
#ifndef V8_ASSEMBLER_IA32_H_
#define V8_ASSEMBLER_IA32_H_
#ifndef V8_IA32_ASSEMBLER_IA32_H_
#define V8_IA32_ASSEMBLER_IA32_H_
namespace v8 { namespace internal {
@ -860,4 +860,4 @@ class EnsureSpace BASE_EMBEDDED {
} } // namespace v8::internal
#endif // V8_ASSEMBLER_IA32_H_
#endif // V8_IA32_ASSEMBLER_IA32_H_

28
deps/v8/src/builtins-ia32.cc → deps/v8/src/ia32/builtins-ia32.cc

@ -32,7 +32,7 @@
namespace v8 { namespace internal {
#define __ masm->
#define __ ACCESS_MASM(masm)
void Builtins::Generate_Adaptor(MacroAssembler* masm, CFunctionId id) {
@ -54,6 +54,14 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// -- edi: constructor function
// -----------------------------------
Label non_function_call;
// Check that function is not a smi.
__ test(edi, Immediate(kSmiTagMask));
__ j(zero, &non_function_call);
// Check that function is a JSFunction.
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &non_function_call);
// Enter a construct frame.
__ EnterConstructFrame();
@ -69,16 +77,12 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
Label rt_call, allocated;
if (FLAG_inline_new) {
Label undo_allocation;
#ifdef ENABLE_DEBUGGER_SUPPORT
ExternalReference debug_step_in_fp =
ExternalReference::debug_step_in_fp_address();
__ cmp(Operand::StaticVariable(debug_step_in_fp), Immediate(0));
__ j(not_equal, &rt_call);
// Check that function is not a Smi.
__ test(edi, Immediate(kSmiTagMask));
__ j(zero, &rt_call);
// Check that function is a JSFunction
__ CmpObjectType(edi, JS_FUNCTION_TYPE, eax);
__ j(not_equal, &rt_call);
#endif
// Verified that the constructor is a JSFunction.
// Load the initial map and verify that it is in fact a map.
@ -300,6 +304,16 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
__ push(ecx);
__ ret(0);
// edi: called object
// eax: number of arguments
__ bind(&non_function_call);
// Set expected number of arguments to zero (not changing eax).
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION);
__ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
}

262
deps/v8/src/codegen-ia32.cc → deps/v8/src/ia32/codegen-ia32.cc

@ -30,6 +30,7 @@
#include "bootstrapper.h"
#include "codegen-inl.h"
#include "debug.h"
#include "ic-inl.h"
#include "parser.h"
#include "register-allocator-inl.h"
#include "runtime.h"
@ -37,7 +38,7 @@
namespace v8 { namespace internal {
#define __ masm_->
#define __ ACCESS_MASM(masm_)
// -------------------------------------------------------------------------
// CodeGenState implementation.
@ -1274,12 +1275,9 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
smi_value,
overwrite_mode);
__ Set(answer.reg(), Immediate(value));
if (operand->is_register()) {
__ sub(answer.reg(), Operand(operand->reg()));
} else {
ASSERT(operand->is_constant());
__ sub(Operand(answer.reg()), Immediate(operand->handle()));
}
// We are in the reversed case so they can't both be Smi constants.
ASSERT(operand->is_register());
__ sub(answer.reg(), Operand(operand->reg()));
} else {
operand->ToRegister();
frame_->Spill(operand->reg());
@ -1374,23 +1372,26 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
operand->ToRegister();
__ test(operand->reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(not_zero, operand, not_taken);
Result answer = allocator()->Allocate();
ASSERT(answer.is_valid());
__ mov(answer.reg(), operand->reg());
ASSERT(kSmiTag == 0); // adjust code if not the case
// We do no shifts, only the Smi conversion, if shift_value is 1.
if (shift_value == 0) {
__ sar(answer.reg(), kSmiTagSize);
} else if (shift_value > 1) {
__ shl(answer.reg(), shift_value - 1);
if (shift_value != 0) {
Result answer = allocator()->Allocate();
ASSERT(answer.is_valid());
__ mov(answer.reg(), operand->reg());
ASSERT(kSmiTag == 0); // adjust code if not the case
// We do no shifts, only the Smi conversion, if shift_value is 1.
if (shift_value > 1) {
__ shl(answer.reg(), shift_value - 1);
}
// Convert int result to Smi, checking that it is in int range.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ add(answer.reg(), Operand(answer.reg()));
deferred->enter()->Branch(overflow, operand, not_taken);
operand->Unuse();
deferred->BindExit(&answer);
frame_->Push(&answer);
} else {
deferred->BindExit(operand);
frame_->Push(operand);
}
// Convert int result to Smi, checking that it is in int range.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ add(answer.reg(), Operand(answer.reg()));
deferred->enter()->Branch(overflow, operand, not_taken);
operand->Unuse();
deferred->BindExit(&answer);
frame_->Push(&answer);
}
break;
}
@ -1411,11 +1412,7 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
deferred->enter()->Branch(not_zero, operand, not_taken);
frame_->Spill(operand->reg());
if (op == Token::BIT_AND) {
if (int_value == 0) {
__ xor_(Operand(operand->reg()), operand->reg());
} else {
__ and_(Operand(operand->reg()), Immediate(value));
}
__ and_(Operand(operand->reg()), Immediate(value));
} else if (op == Token::BIT_XOR) {
if (int_value != 0) {
__ xor_(Operand(operand->reg()), Immediate(value));
@ -2009,18 +2006,18 @@ void CodeGenerator::GenerateReturnSequence(Result* return_value) {
// Add a label for checking the size of the code used for returning.
Label check_exit_codesize;
__ bind(&check_exit_codesize);
masm_->bind(&check_exit_codesize);
// Leave the frame and return popping the arguments and the
// receiver.
frame_->Exit();
__ ret((scope_->num_parameters() + 1) * kPointerSize);
masm_->ret((scope_->num_parameters() + 1) * kPointerSize);
DeleteFrame();
// Check that the size of the code used for returning matches what is
// expected by the debugger.
ASSERT_EQ(Debug::kIa32JSReturnSequenceLength,
__ SizeOfCodeGeneratedSince(&check_exit_codesize));
masm_->SizeOfCodeGeneratedSince(&check_exit_codesize));
}
@ -2143,7 +2140,7 @@ void CodeGenerator::GenerateFastCaseSwitchJumpTable(
times_1, 0x0, RelocInfo::INTERNAL_REFERENCE));
smi_value.Unuse();
// Calculate address to overwrite later with actual address of table.
int32_t jump_table_ref = __ pc_offset() - sizeof(int32_t);
int32_t jump_table_ref = masm_->pc_offset() - sizeof(int32_t);
__ Align(4);
Label table_start;
__ bind(&table_start);
@ -3179,10 +3176,12 @@ void CodeGenerator::VisitDebuggerStatement(DebuggerStatement* node) {
ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ DebuggerStatement");
CodeForStatementPosition(node);
#ifdef ENABLE_DEBUGGER_SUPPORT
// Spill everything, even constants, to the frame.
frame_->SpillAll();
frame_->CallRuntime(Runtime::kDebugBreak, 0);
// Ignore the return value.
#endif
}
@ -3384,7 +3383,9 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
// Loop up the context chain. There is no frame effect so it is
// safe to use raw labels here.
Label next, fast;
if (!context.reg().is(tmp.reg())) __ mov(tmp.reg(), context.reg());
if (!context.reg().is(tmp.reg())) {
__ mov(tmp.reg(), context.reg());
}
__ bind(&next);
// Terminate at global context.
__ cmp(FieldOperand(tmp.reg(), HeapObject::kMapOffset),
@ -3410,7 +3411,10 @@ Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Result answer = frame_->CallLoadIC(mode);
// A test eax instruction following the call signals that the inobject
// property case was inlined. Ensure that there is not a test eax
// instruction here.
__ nop();
// Discard the global object. The result is in answer.
frame_->Drop();
return answer;
@ -3933,6 +3937,9 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
} else {
Literal* literal = node->value()->AsLiteral();
bool overwrite_value =
(node->value()->AsBinaryOperation() != NULL &&
node->value()->AsBinaryOperation()->ResultOverwriteAllowed());
Variable* right_var = node->value()->AsVariableProxy()->AsVariable();
// There are two cases where the target is not read in the right hand
// side, that are easy to test for: the right hand side is a literal,
@ -3945,7 +3952,9 @@ void CodeGenerator::VisitAssignment(Assignment* node) {
target.GetValue(NOT_INSIDE_TYPEOF);
}
Load(node->value());
GenericBinaryOperation(node->binary_op(), node->type());
GenericBinaryOperation(node->binary_op(),
node->type(),
overwrite_value ? OVERWRITE_RIGHT : NO_OVERWRITE);
}
if (var != NULL &&
@ -4535,6 +4544,17 @@ void CodeGenerator::GenerateObjectEquals(ZoneList<Expression*>* args) {
}
void CodeGenerator::GenerateGetFramePointer(ZoneList<Expression*>* args) {
ASSERT(args->length() == 0);
ASSERT(kSmiTagSize == 1 && kSmiTag == 0); // shifting code depends on this
Result ebp_as_smi = allocator_->Allocate();
ASSERT(ebp_as_smi.is_valid());
__ mov(ebp_as_smi.reg(), Operand(ebp));
__ shr(ebp_as_smi.reg(), kSmiTagSize);
frame_->Push(&ebp_as_smi);
}
void CodeGenerator::VisitCallRuntime(CallRuntime* node) {
if (CheckForInlineRuntimeCall(node)) {
return;
@ -5228,6 +5248,48 @@ bool CodeGenerator::HasValidEntryRegisters() {
#endif
class DeferredReferenceGetNamedValue: public DeferredCode {
public:
DeferredReferenceGetNamedValue(CodeGenerator* cgen, Handle<String> name)
: DeferredCode(cgen), name_(name) {
set_comment("[ DeferredReferenceGetNamedValue");
}
virtual void Generate();
Label* patch_site() { return &patch_site_; }
private:
Label patch_site_;
Handle<String> name_;
};
void DeferredReferenceGetNamedValue::Generate() {
CodeGenerator* cgen = generator();
Result receiver(cgen);
enter()->Bind(&receiver);
cgen->frame()->Push(&receiver);
cgen->frame()->Push(name_);
Result answer = cgen->frame()->CallLoadIC(RelocInfo::CODE_TARGET);
// The call must be followed by a test eax instruction to indicate
// that the inobject property case was inlined.
ASSERT(answer.is_register() && answer.reg().is(eax));
// Store the delta to the map check instruction here in the test instruction.
// Use masm_-> instead of the double underscore macro since the latter can't
// return a value.
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
// Here we use masm_-> instead of the double underscore macro because
// this is the instruction that gets patched and coverage code gets in
// the way.
masm_->test(answer.reg(), Immediate(-delta_to_patch_site));
__ IncrementCounter(&Counters::named_load_inline_miss, 1);
receiver = cgen->frame()->Pop();
exit_.Jump(&receiver, &answer);
}
class DeferredReferenceGetKeyedValue: public DeferredCode {
public:
DeferredReferenceGetKeyedValue(CodeGenerator* generator, bool is_global)
@ -5268,9 +5330,14 @@ void DeferredReferenceGetKeyedValue::Generate() {
// instruction.
ASSERT(value.is_register() && value.reg().is(eax));
// The delta from the start of the map-compare instruction to the
// test eax instruction.
int delta_to_patch_site = __ SizeOfCodeGeneratedSince(patch_site());
__ test(value.reg(), Immediate(-delta_to_patch_site));
// test instruction. We use masm_ directly here instead of the
// double underscore macro because the macro sometimes uses macro
// expansion to turn into something that can't return a value. This
// is encountered when doing generated code coverage tests.
int delta_to_patch_site = masm_->SizeOfCodeGeneratedSince(patch_site());
// Here we use masm_-> instead of the double underscore macro because this
// is the instruction that gets patched and coverage code gets in the way.
masm_->test(value.reg(), Immediate(-delta_to_patch_site));
__ IncrementCounter(&Counters::keyed_load_inline_miss, 1);
// The receiver and key were spilled by the call, so their state as
@ -5284,7 +5351,7 @@ void DeferredReferenceGetKeyedValue::Generate() {
#undef __
#define __ masm->
#define __ ACCESS_MASM(masm)
Handle<String> Reference::GetName() {
ASSERT(type_ == NAMED);
@ -5324,16 +5391,66 @@ void Reference::GetValue(TypeofState typeof_state) {
// thrown below, we must distinguish between the two kinds of
// loads (typeof expression loads must not throw a reference
// error).
Comment cmnt(masm, "[ Load from named Property");
cgen_->frame()->Push(GetName());
Variable* var = expression_->AsVariableProxy()->AsVariable();
ASSERT(var == NULL || var->is_global());
RelocInfo::Mode mode = (var == NULL)
? RelocInfo::CODE_TARGET
: RelocInfo::CODE_TARGET_CONTEXT;
Result answer = cgen_->frame()->CallLoadIC(mode);
cgen_->frame()->Push(&answer);
bool is_global = var != NULL;
ASSERT(!is_global || var->is_global());
if (is_global || cgen_->scope()->is_global_scope()) {
// Do not inline the inobject property case for loads from the
// global object or loads in toplevel code.
Comment cmnt(masm, "[ Load from named Property");
cgen_->frame()->Push(GetName());
RelocInfo::Mode mode = is_global
? RelocInfo::CODE_TARGET_CONTEXT
: RelocInfo::CODE_TARGET;
Result answer = cgen_->frame()->CallLoadIC(mode);
// A test eax instruction following the call signals that the
// inobject property case was inlined. Ensure that there is not
// a test eax instruction here.
__ nop();
cgen_->frame()->Push(&answer);
} else {
// Inline the inobject property case.
Comment cmnt(masm, "[ Inlined named property load");
DeferredReferenceGetNamedValue* deferred =
new DeferredReferenceGetNamedValue(cgen_, GetName());
Result receiver = cgen_->frame()->Pop();
receiver.ToRegister();
// Check that the receiver is a heap object.
__ test(receiver.reg(), Immediate(kSmiTagMask));
deferred->enter()->Branch(zero, &receiver, not_taken);
// Preallocate the value register to ensure that there is no
// spill emitted between the patch site label and the offset in
// the load instruction.
Result value = cgen_->allocator()->Allocate();
ASSERT(value.is_valid());
__ bind(deferred->patch_site());
// This is the map check instruction that will be patched (so we can't
// use the double underscore macro that may insert instructions).
// Initially use an invalid map to force a failure.
masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
Immediate(Factory::null_value()));
// This branch is always a forwards branch so it's always a fixed
// size which allows the assert below to succeed and patching to work.
deferred->enter()->Branch(not_equal, &receiver, not_taken);
// The delta from the patch label to the load offset must be
// statically known.
ASSERT(masm->SizeOfCodeGeneratedSince(deferred->patch_site()) ==
LoadIC::kOffsetToLoadInstruction);
// The initial (invalid) offset has to be large enough to force
// a 32-bit instruction encoding to allow patching with an
// arbitrary offset. Use kMaxInt (minus kHeapObjectTag).
int offset = kMaxInt;
masm->mov(value.reg(), FieldOperand(receiver.reg(), offset));
__ IncrementCounter(&Counters::named_load_inline, 1);
deferred->BindExit(&receiver, &value);
cgen_->frame()->Push(&receiver);
cgen_->frame()->Push(&value);
}
break;
}
@ -5369,7 +5486,9 @@ void Reference::GetValue(TypeofState typeof_state) {
// Initially, use an invalid map. The map is patched in the IC
// initialization code.
__ bind(deferred->patch_site());
__ cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
// Use masm-> here instead of the double underscore macro since extra
// coverage code can interfere with the patching.
masm->cmp(FieldOperand(receiver.reg(), HeapObject::kMapOffset),
Immediate(Factory::null_value()));
deferred->enter()->Branch(not_equal, &receiver, &key, not_taken);
@ -5566,7 +5685,7 @@ void ToBooleanStub::Generate(MacroAssembler* masm) {
#undef __
#define __ masm_->
#define __ ACCESS_MASM(masm_)
Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
Result* right) {
@ -5794,10 +5913,10 @@ Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
// Right operand must be in register cl because x86 likes it that way.
if (right->reg().is(ecx)) {
// Right is already in the right place. Left may be in the
// same register, which causes problems. Use answer instead.
if (left->reg().is(ecx)) {
*left = answer;
}
// same register, which causes problems. Always use answer
// instead of left, even if left is not ecx, since this avoids
// spilling left.
*left = answer;
} else if (left->reg().is(ecx)) {
generator()->frame()->Spill(left->reg());
__ mov(left->reg(), right->reg());
@ -5811,6 +5930,9 @@ Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
ASSERT(reg_ecx.is_valid());
__ mov(ecx, right->reg());
*right = reg_ecx;
// Answer and left both contain the left operand. Use answer, so
// left is not spilled.
*left = answer;
}
ASSERT(left->reg().is_valid());
ASSERT(!left->reg().is(ecx));
@ -5860,16 +5982,10 @@ Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
case Token::SHL: {
__ shl(left->reg());
// Check that the *signed* result fits in a smi.
//
// TODO(207): Can reduce registers from 4 to 3 by
// preallocating ecx.
JumpTarget result_ok(generator());
Result smi_test_reg = generator()->allocator()->Allocate();
ASSERT(smi_test_reg.is_valid());
__ lea(smi_test_reg.reg(), Operand(left->reg(), 0x40000000));
__ test(smi_test_reg.reg(), Immediate(0x80000000));
smi_test_reg.Unuse();
result_ok.Branch(zero, left, taken);
__ cmp(left->reg(), 0xc0000000);
result_ok.Branch(positive, left, taken);
__ shr(left->reg());
ASSERT(kSmiTag == 0);
__ shl(left->reg(), kSmiTagSize);
@ -5900,7 +6016,7 @@ Result DeferredInlineBinaryOperation::GenerateInlineCode(Result* left,
#undef __
#define __ masm->
#define __ ACCESS_MASM(masm)
void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
// Perform fast-case smi code for the operation (eax <op> ebx) and
@ -6169,11 +6285,15 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
case Token::SHR: __ shr(eax); break;
default: UNREACHABLE();
}
// Check if result is non-negative and fits in a smi.
__ test(eax, Immediate(0xc0000000));
__ j(not_zero, &non_smi_result);
if (op_ == Token::SHR) {
// Check if result is non-negative and fits in a smi.
__ test(eax, Immediate(0xc0000000));
__ j(not_zero, &non_smi_result);
} else {
// Check if result fits in a smi.
__ cmp(eax, 0xc0000000);
__ j(negative, &non_smi_result);
}
// Tag smi result and return.
ASSERT(kSmiTagSize == times_2); // adjust code if not the case
__ lea(eax, Operand(eax, eax, times_1, kSmiTag));
@ -6225,7 +6345,9 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
}
// SHR should return uint32 - go to runtime for non-smi/negative result.
if (op_ == Token::SHR) __ bind(&non_smi_result);
if (op_ == Token::SHR) {
__ bind(&non_smi_result);
}
__ mov(eax, Operand(esp, 1 * kPointerSize));
__ mov(edx, Operand(esp, 2 * kPointerSize));
break;

633
deps/v8/src/ia32/codegen-ia32.h

@ -0,0 +1,633 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_IA32_CODEGEN_IA32_H_
#define V8_IA32_CODEGEN_IA32_H_
namespace v8 { namespace internal {
// Forward declarations
class DeferredCode;
class RegisterAllocator;
class RegisterFile;
enum InitState { CONST_INIT, NOT_CONST_INIT };
enum TypeofState { INSIDE_TYPEOF, NOT_INSIDE_TYPEOF };
// -------------------------------------------------------------------------
// Reference support
// A reference is a C++ stack-allocated object that keeps an ECMA
// reference on the execution stack while in scope. For variables
// the reference is empty, indicating that it isn't necessary to
// store state on the stack for keeping track of references to those.
// For properties, we keep either one (named) or two (indexed) values
// on the execution stack to represent the reference.
class Reference BASE_EMBEDDED {
public:
// The values of the types is important, see size().
enum Type { ILLEGAL = -1, SLOT = 0, NAMED = 1, KEYED = 2 };
Reference(CodeGenerator* cgen, Expression* expression);
~Reference();
Expression* expression() const { return expression_; }
Type type() const { return type_; }
void set_type(Type value) {
ASSERT(type_ == ILLEGAL);
type_ = value;
}
// The size the reference takes up on the stack.
int size() const { return (type_ == ILLEGAL) ? 0 : type_; }
bool is_illegal() const { return type_ == ILLEGAL; }
bool is_slot() const { return type_ == SLOT; }
bool is_property() const { return type_ == NAMED || type_ == KEYED; }
// Return the name. Only valid for named property references.
Handle<String> GetName();
// Generate code to push the value of the reference on top of the
// expression stack. The reference is expected to be already on top of
// the expression stack, and it is left in place with its value above it.
void GetValue(TypeofState typeof_state);
// Like GetValue except that the slot is expected to be written to before
// being read from again. Thae value of the reference may be invalidated,
// causing subsequent attempts to read it to fail.
void TakeValue(TypeofState typeof_state);
// Generate code to store the value on top of the expression stack in the
// reference. The reference is expected to be immediately below the value
// on the expression stack. The stored value is left in place (with the
// reference intact below it) to support chained assignments.
void SetValue(InitState init_state);
private:
CodeGenerator* cgen_;
Expression* expression_;
Type type_;
};
// -------------------------------------------------------------------------
// Control destinations.
// A control destination encapsulates a pair of jump targets and a
// flag indicating which one is the preferred fall-through. The
// preferred fall-through must be unbound, the other may be already
// bound (ie, a backward target).
//
// The true and false targets may be jumped to unconditionally or
// control may split conditionally. Unconditional jumping and
// splitting should be emitted in tail position (as the last thing
// when compiling an expression) because they can cause either label
// to be bound or the non-fall through to be jumped to leaving an
// invalid virtual frame.
//
// The labels in the control destination can be extracted and
// manipulated normally without affecting the state of the
// destination.
class ControlDestination BASE_EMBEDDED {
public:
ControlDestination(JumpTarget* true_target,
JumpTarget* false_target,
bool true_is_fall_through)
: true_target_(true_target),
false_target_(false_target),
true_is_fall_through_(true_is_fall_through),
is_used_(false) {
ASSERT(true_is_fall_through ? !true_target->is_bound()
: !false_target->is_bound());
}
// Accessors for the jump targets. Directly jumping or branching to
// or binding the targets will not update the destination's state.
JumpTarget* true_target() const { return true_target_; }
JumpTarget* false_target() const { return false_target_; }
// True if the the destination has been jumped to unconditionally or
// control has been split to both targets. This predicate does not
// test whether the targets have been extracted and manipulated as
// raw jump targets.
bool is_used() const { return is_used_; }
// True if the destination is used and the true target (respectively
// false target) was the fall through. If the target is backward,
// "fall through" included jumping unconditionally to it.
bool true_was_fall_through() const {
return is_used_ && true_is_fall_through_;
}
bool false_was_fall_through() const {
return is_used_ && !true_is_fall_through_;
}
// Emit a branch to one of the true or false targets, and bind the
// other target. Because this binds the fall-through target, it
// should be emitted in tail position (as the last thing when
// compiling an expression).
void Split(Condition cc) {
ASSERT(!is_used_);
if (true_is_fall_through_) {
false_target_->Branch(NegateCondition(cc));
true_target_->Bind();
} else {
true_target_->Branch(cc);
false_target_->Bind();
}
is_used_ = true;
}
// Emit an unconditional jump in tail position, to the true target
// (if the argument is true) or the false target. The "jump" will
// actually bind the jump target if it is forward, jump to it if it
// is backward.
void Goto(bool where) {
ASSERT(!is_used_);
JumpTarget* target = where ? true_target_ : false_target_;
if (target->is_bound()) {
target->Jump();
} else {
target->Bind();
}
is_used_ = true;
true_is_fall_through_ = where;
}
// Mark this jump target as used as if Goto had been called, but
// without generating a jump or binding a label (the control effect
// should have already happened). This is used when the left
// subexpression of the short-circuit boolean operators are
// compiled.
void Use(bool where) {
ASSERT(!is_used_);
ASSERT((where ? true_target_ : false_target_)->is_bound());
is_used_ = true;
true_is_fall_through_ = where;
}
// Swap the true and false targets but keep the same actual label as
// the fall through. This is used when compiling negated
// expressions, where we want to swap the targets but preserve the
// state.
void Invert() {
JumpTarget* temp_target = true_target_;
true_target_ = false_target_;
false_target_ = temp_target;
true_is_fall_through_ = !true_is_fall_through_;
}
private:
// True and false jump targets.
JumpTarget* true_target_;
JumpTarget* false_target_;
// Before using the destination: true if the true target is the
// preferred fall through, false if the false target is. After
// using the destination: true if the true target was actually used
// as the fall through, false if the false target was.
bool true_is_fall_through_;
// True if the Split or Goto functions have been called.
bool is_used_;
};
// -------------------------------------------------------------------------
// Code generation state
// The state is passed down the AST by the code generator (and back up, in
// the form of the state of the jump target pair). It is threaded through
// the call stack. Constructing a state implicitly pushes it on the owning
// code generator's stack of states, and destroying one implicitly pops it.
//
// The code generator state is only used for expressions, so statements have
// the initial state.
class CodeGenState BASE_EMBEDDED {
public:
// Create an initial code generator state. Destroying the initial state
// leaves the code generator with a NULL state.
explicit CodeGenState(CodeGenerator* owner);
// Create a code generator state based on a code generator's current
// state. The new state may or may not be inside a typeof, and has its
// own control destination.
CodeGenState(CodeGenerator* owner,
TypeofState typeof_state,
ControlDestination* destination);
// Destroy a code generator state and restore the owning code generator's
// previous state.
~CodeGenState();
// Accessors for the state.
TypeofState typeof_state() const { return typeof_state_; }
ControlDestination* destination() const { return destination_; }
private:
// The owning code generator.
CodeGenerator* owner_;
// A flag indicating whether we are compiling the immediate subexpression
// of a typeof expression.
TypeofState typeof_state_;
// A control destination in case the expression has a control-flow
// effect.
ControlDestination* destination_;
// The previous state of the owning code generator, restored when
// this state is destroyed.
CodeGenState* previous_;
};
// -------------------------------------------------------------------------
// CodeGenerator
class CodeGenerator: public AstVisitor {
public:
// Takes a function literal, generates code for it. This function should only
// be called by compiler.cc.
static Handle<Code> MakeCode(FunctionLiteral* fun,
Handle<Script> script,
bool is_eval);
#ifdef ENABLE_LOGGING_AND_PROFILING
static bool ShouldGenerateLog(Expression* type);
#endif
static void SetFunctionInfo(Handle<JSFunction> fun,
int length,
int function_token_position,
int start_position,
int end_position,
bool is_expression,
bool is_toplevel,
Handle<Script> script,
Handle<String> inferred_name);
// Accessors
MacroAssembler* masm() { return masm_; }
VirtualFrame* frame() const { return frame_; }
bool has_valid_frame() const { return frame_ != NULL; }
// Set the virtual frame to be new_frame, with non-frame register
// reference counts given by non_frame_registers. The non-frame
// register reference counts of the old frame are returned in
// non_frame_registers.
void SetFrame(VirtualFrame* new_frame, RegisterFile* non_frame_registers);
void DeleteFrame();
RegisterAllocator* allocator() const { return allocator_; }
CodeGenState* state() { return state_; }
void set_state(CodeGenState* state) { state_ = state; }
void AddDeferred(DeferredCode* code) { deferred_.Add(code); }
bool in_spilled_code() const { return in_spilled_code_; }
void set_in_spilled_code(bool flag) { in_spilled_code_ = flag; }
private:
// Construction/Destruction
CodeGenerator(int buffer_size, Handle<Script> script, bool is_eval);
virtual ~CodeGenerator() { delete masm_; }
// Accessors
Scope* scope() const { return scope_; }
// Clearing and generating deferred code.
void ClearDeferred();
void ProcessDeferred();
bool is_eval() { return is_eval_; }
// State
TypeofState typeof_state() const { return state_->typeof_state(); }
ControlDestination* destination() const { return state_->destination(); }
// Track loop nesting level.
int loop_nesting() const { return loop_nesting_; }
void IncrementLoopNesting() { loop_nesting_++; }
void DecrementLoopNesting() { loop_nesting_--; }
// Node visitors.
void VisitStatements(ZoneList<Statement*>* statements);
#define DEF_VISIT(type) \
void Visit##type(type* node);
NODE_LIST(DEF_VISIT)
#undef DEF_VISIT
// Visit a statement and then spill the virtual frame if control flow can
// reach the end of the statement (ie, it does not exit via break,
// continue, return, or throw). This function is used temporarily while
// the code generator is being transformed.
void VisitAndSpill(Statement* statement);
// Visit a list of statements and then spill the virtual frame if control
// flow can reach the end of the list.
void VisitStatementsAndSpill(ZoneList<Statement*>* statements);
// Main code generation function
void GenCode(FunctionLiteral* fun);
// Generate the return sequence code. Should be called no more than
// once per compiled function, immediately after binding the return
// target (which can not be done more than once).
void GenerateReturnSequence(Result* return_value);
// The following are used by class Reference.
void LoadReference(Reference* ref);
void UnloadReference(Reference* ref);
Operand ContextOperand(Register context, int index) const {
return Operand(context, Context::SlotOffset(index));
}
Operand SlotOperand(Slot* slot, Register tmp);
Operand ContextSlotOperandCheckExtensions(Slot* slot,
Result tmp,
JumpTarget* slow);
// Expressions
Operand GlobalObject() const {
return ContextOperand(esi, Context::GLOBAL_INDEX);
}
void LoadCondition(Expression* x,
TypeofState typeof_state,
ControlDestination* destination,
bool force_control);
void Load(Expression* x, TypeofState typeof_state = NOT_INSIDE_TYPEOF);
void LoadGlobal();
void LoadGlobalReceiver();
// Generate code to push the value of an expression on top of the frame
// and then spill the frame fully to memory. This function is used
// temporarily while the code generator is being transformed.
void LoadAndSpill(Expression* expression,
TypeofState typeof_state = NOT_INSIDE_TYPEOF);
// Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state);
Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state,
JumpTarget* slow);
// Store the value on top of the expression stack into a slot, leaving the
// value in place.
void StoreToSlot(Slot* slot, InitState init_state);
// Special code for typeof expressions: Unfortunately, we must
// be careful when loading the expression in 'typeof'
// expressions. We are not allowed to throw reference errors for
// non-existing properties of the global object, so we must make it
// look like an explicit property access, instead of an access
// through the context chain.
void LoadTypeofExpression(Expression* x);
// Translate the value on top of the frame into control flow to the
// control destination.
void ToBoolean(ControlDestination* destination);
void GenericBinaryOperation(
Token::Value op,
SmiAnalysis* type,
OverwriteMode overwrite_mode);
// If possible, combine two constant smi values using op to produce
// a smi result, and push it on the virtual frame, all at compile time.
// Returns true if it succeeds. Otherwise it has no effect.
bool FoldConstantSmis(Token::Value op, int left, int right);
// Emit code to perform a binary operation on a constant
// smi and a likely smi. Consumes the Result *operand.
void ConstantSmiBinaryOperation(Token::Value op,
Result* operand,
Handle<Object> constant_operand,
SmiAnalysis* type,
bool reversed,
OverwriteMode overwrite_mode);
// Emit code to perform a binary operation on two likely smis.
// The code to handle smi arguments is produced inline.
// Consumes the Results *left and *right.
void LikelySmiBinaryOperation(Token::Value op,
Result* left,
Result* right,
OverwriteMode overwrite_mode);
void Comparison(Condition cc,
bool strict,
ControlDestination* destination);
// To prevent long attacker-controlled byte sequences, integer constants
// from the JavaScript source are loaded in two parts if they are larger
// than 16 bits.
static const int kMaxSmiInlinedBits = 16;
bool IsUnsafeSmi(Handle<Object> value);
// Load an integer constant x into a register target using
// at most 16 bits of user-controlled data per assembly operation.
void LoadUnsafeSmi(Register target, Handle<Object> value);
void CallWithArguments(ZoneList<Expression*>* arguments, int position);
void CheckStack();
struct InlineRuntimeLUT {
void (CodeGenerator::*method)(ZoneList<Expression*>*);
const char* name;
};
static InlineRuntimeLUT* FindInlineRuntimeLUT(Handle<String> name);
bool CheckForInlineRuntimeCall(CallRuntime* node);
static bool PatchInlineRuntimeEntry(Handle<String> name,
const InlineRuntimeLUT& new_entry,
InlineRuntimeLUT* old_entry);
Handle<JSFunction> BuildBoilerplate(FunctionLiteral* node);
void ProcessDeclarations(ZoneList<Declaration*>* declarations);
Handle<Code> ComputeCallInitialize(int argc);
Handle<Code> ComputeCallInitializeInLoop(int argc);
// Declare global variables and functions in the given array of
// name/value pairs.
void DeclareGlobals(Handle<FixedArray> pairs);
// Instantiate the function boilerplate.
void InstantiateBoilerplate(Handle<JSFunction> boilerplate);
// Support for type checks.
void GenerateIsSmi(ZoneList<Expression*>* args);
void GenerateIsNonNegativeSmi(ZoneList<Expression*>* args);
void GenerateIsArray(ZoneList<Expression*>* args);
// Support for arguments.length and arguments[?].
void GenerateArgumentsLength(ZoneList<Expression*>* args);
void GenerateArgumentsAccess(ZoneList<Expression*>* args);
// Support for accessing the value field of an object (used by Date).
void GenerateValueOf(ZoneList<Expression*>* args);
void GenerateSetValueOf(ZoneList<Expression*>* args);
// Fast support for charCodeAt(n).
void GenerateFastCharCodeAt(ZoneList<Expression*>* args);
// Fast support for object equality testing.
void GenerateObjectEquals(ZoneList<Expression*>* args);
void GenerateLog(ZoneList<Expression*>* args);
void GenerateGetFramePointer(ZoneList<Expression*>* args);
// Methods and constants for fast case switch statement support.
//
// Only allow fast-case switch if the range of labels is at most
// this factor times the number of case labels.
// Value is derived from comparing the size of code generated by the normal
// switch code for Smi-labels to the size of a single pointer. If code
// quality increases this number should be decreased to match.
static const int kFastSwitchMaxOverheadFactor = 5;
// Minimal number of switch cases required before we allow jump-table
// optimization.
static const int kFastSwitchMinCaseCount = 5;
// The limit of the range of a fast-case switch, as a factor of the number
// of cases of the switch. Each platform should return a value that
// is optimal compared to the default code generated for a switch statement
// on that platform.
int FastCaseSwitchMaxOverheadFactor();
// The minimal number of cases in a switch before the fast-case switch
// optimization is enabled. Each platform should return a value that
// is optimal compared to the default code generated for a switch statement
// on that platform.
int FastCaseSwitchMinCaseCount();
// Allocate a jump table and create code to jump through it.
// Should call GenerateFastCaseSwitchCases to generate the code for
// all the cases at the appropriate point.
void GenerateFastCaseSwitchJumpTable(SwitchStatement* node,
int min_index,
int range,
Label* fail_label,
Vector<Label*> case_targets,
Vector<Label> case_labels);
// Generate the code for cases for the fast case switch.
// Called by GenerateFastCaseSwitchJumpTable.
void GenerateFastCaseSwitchCases(SwitchStatement* node,
Vector<Label> case_labels,
VirtualFrame* start_frame);
// Fast support for constant-Smi switches.
void GenerateFastCaseSwitchStatement(SwitchStatement* node,
int min_index,
int range,
int default_index);
// Fast support for constant-Smi switches. Tests whether switch statement
// permits optimization and calls GenerateFastCaseSwitch if it does.
// Returns true if the fast-case switch was generated, and false if not.
bool TryGenerateFastCaseSwitchStatement(SwitchStatement* node);
// Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation
// information.
void CodeForFunctionPosition(FunctionLiteral* fun);
void CodeForReturnPosition(FunctionLiteral* fun);
void CodeForStatementPosition(Node* node);
void CodeForSourcePosition(int pos);
#ifdef DEBUG
// True if the registers are valid for entry to a block. There should be
// no frame-external references to eax, ebx, ecx, edx, or edi.
bool HasValidEntryRegisters();
#endif
bool is_eval_; // Tells whether code is generated for eval.
Handle<Script> script_;
List<DeferredCode*> deferred_;
// Assembler
MacroAssembler* masm_; // to generate code
// Code generation state
Scope* scope_;
VirtualFrame* frame_;
RegisterAllocator* allocator_;
CodeGenState* state_;
int loop_nesting_;
// Jump targets.
// The target of the return from the function.
BreakTarget function_return_;
// True if the function return is shadowed (ie, jumping to the target
// function_return_ does not jump to the true function return, but rather
// to some unlinking code).
bool function_return_is_shadowed_;
// True when we are in code that expects the virtual frame to be fully
// spilled. Some virtual frame function are disabled in DEBUG builds when
// called from spilled code, because they do not leave the virtual frame
// in a spilled state.
bool in_spilled_code_;
static InlineRuntimeLUT kInlineRuntimeLUT[];
friend class VirtualFrame;
friend class JumpTarget;
friend class Reference;
friend class Result;
friend class CodeGeneratorPatcher; // Used in test-log-ia32.cc
DISALLOW_COPY_AND_ASSIGN(CodeGenerator);
};
} } // namespace v8::internal
#endif // V8_IA32_CODEGEN_IA32_H_

0
deps/v8/src/cpu-ia32.cc → deps/v8/src/ia32/cpu-ia32.cc

4
deps/v8/src/debug-ia32.cc → deps/v8/src/ia32/debug-ia32.cc

@ -33,6 +33,7 @@
namespace v8 { namespace internal {
#ifdef ENABLE_DEBUGGER_SUPPORT
// A debug break in the frame exit code is identified by a call instruction.
bool BreakLocationIterator::IsDebugBreakAtReturn() {
@ -67,7 +68,7 @@ bool Debug::IsDebugBreakAtReturn(RelocInfo* rinfo) {
}
#define __ masm->
#define __ ACCESS_MASM(masm)
static void Generate_DebugBreakCallHelper(MacroAssembler* masm,
@ -214,5 +215,6 @@ void Debug::GenerateStubNoRegistersDebugBreak(MacroAssembler* masm) {
#undef __
#endif // ENABLE_DEBUGGER_SUPPORT
} } // namespace v8::internal

0
deps/v8/src/disasm-ia32.cc → deps/v8/src/ia32/disasm-ia32.cc

0
deps/v8/src/frames-ia32.cc → deps/v8/src/ia32/frames-ia32.cc

6
deps/v8/src/frames-ia32.h → deps/v8/src/ia32/frames-ia32.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_FRAMES_IA32_H_
#define V8_FRAMES_IA32_H_
#ifndef V8_IA32_FRAMES_IA32_H_
#define V8_IA32_FRAMES_IA32_H_
namespace v8 { namespace internal {
@ -288,4 +288,4 @@ inline Object* JavaScriptFrame::function_slot_object() const {
} } // namespace v8::internal
#endif // V8_FRAMES_IA32_H_
#endif // V8_IA32_FRAMES_IA32_H_

116
deps/v8/src/ic-ia32.cc → deps/v8/src/ia32/ic-ia32.cc

@ -38,7 +38,7 @@ namespace v8 { namespace internal {
// Static IC stub generators.
//
#define __ masm->
#define __ ACCESS_MASM(masm)
// Helper function used to load a property from a dictionary backing storage.
@ -91,7 +91,9 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
// Compute the masked index: (hash + i + i * i) & mask.
__ mov(r1, FieldOperand(name, String::kLengthOffset));
__ shr(r1, String::kHashShift);
if (i > 0) __ add(Operand(r1), Immediate(Dictionary::GetProbeOffset(i)));
if (i > 0) {
__ add(Operand(r1), Immediate(Dictionary::GetProbeOffset(i)));
}
__ and_(r1, Operand(r2));
// Scale the index by multiplying by the element size.
@ -121,23 +123,19 @@ static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss_label,
}
// Helper function used to check that a value is either not a function
// or is loaded if it is a function.
static void GenerateCheckNonFunctionOrLoaded(MacroAssembler* masm, Label* miss,
Register value, Register scratch) {
// Helper function used to check that a value is either not an object
// or is loaded if it is an object.
static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
Register value, Register scratch) {
Label done;
// Check if the value is a Smi.
__ test(value, Immediate(kSmiTagMask));
__ j(zero, &done, not_taken);
// Check if the value is a function.
__ CmpObjectType(value, JS_FUNCTION_TYPE, scratch);
__ j(not_equal, &done, taken);
// Check if the function has been loaded.
__ mov(scratch, FieldOperand(value, JSFunction::kSharedFunctionInfoOffset));
__ mov(scratch,
FieldOperand(scratch, SharedFunctionInfo::kLazyLoadDataOffset));
__ cmp(scratch, Factory::undefined_value());
__ j(not_equal, miss, not_taken);
// Check if the object has been loaded.
__ mov(scratch, FieldOperand(value, JSFunction::kMapOffset));
__ mov(scratch, FieldOperand(scratch, Map::kBitField2Offset));
__ test(scratch, Immediate(1 << Map::kNeedsLoading));
__ j(not_zero, miss, not_taken);
__ bind(&done);
}
@ -266,7 +264,7 @@ void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) {
__ j(not_zero, &slow, not_taken);
// Probe the dictionary leaving result in ecx.
GenerateDictionaryLoad(masm, &slow, ebx, ecx, edx, eax);
GenerateCheckNonFunctionOrLoaded(masm, &slow, ecx, edx);
GenerateCheckNonObjectOrLoaded(masm, &slow, ecx, edx);
__ mov(eax, Operand(ecx));
__ IncrementCounter(&Counters::keyed_load_generic_symbol, 1);
__ ret(0);
@ -491,10 +489,10 @@ static void GenerateNormalHelper(MacroAssembler* masm,
__ j(not_equal, miss, not_taken);
// Check that the function has been loaded.
__ mov(edx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(edx, FieldOperand(edx, SharedFunctionInfo::kLazyLoadDataOffset));
__ cmp(edx, Factory::undefined_value());
__ j(not_equal, miss, not_taken);
__ mov(edx, FieldOperand(edi, JSFunction::kMapOffset));
__ mov(edx, FieldOperand(edx, Map::kBitField2Offset));
__ test(edx, Immediate(1 << Map::kNeedsLoading));
__ j(not_zero, miss, not_taken);
// Patch the receiver with the global proxy if necessary.
if (is_global_object) {
@ -681,7 +679,7 @@ void LoadIC::GenerateNormal(MacroAssembler* masm) {
// Search the dictionary placing the result in eax.
__ bind(&probe);
GenerateDictionaryLoad(masm, &miss, edx, eax, ebx, ecx);
GenerateCheckNonFunctionOrLoaded(masm, &miss, eax, edx);
GenerateCheckNonObjectOrLoaded(masm, &miss, eax, edx);
__ ret(0);
// Global object access: Check access rights.
@ -727,24 +725,70 @@ void LoadIC::Generate(MacroAssembler* masm, const ExternalReference& f) {
}
void KeyedLoadIC::PatchInlinedMapCheck(Address address, Object* value) {
static const byte kTestEaxByte = 0xA9;
// One byte opcode for test eax,0xXXXXXXXX.
static const byte kTestEaxByte = 0xA9;
void LoadIC::ClearInlinedVersion(Address address) {
// Reset the map check of the inlined inobject property load (if
// present) to guarantee failure by holding an invalid map (the null
// value). The offset can be patched to anything.
PatchInlinedLoad(address, Heap::null_value(), kMaxInt);
}
void KeyedLoadIC::ClearInlinedVersion(Address address) {
// Insert null as the map to check for to make sure the map check fails
// sending control flow to the IC instead of the inlined version.
PatchInlinedLoad(address, Heap::null_value());
}
bool LoadIC::PatchInlinedLoad(Address address, Object* map, int offset) {
// The address of the instruction following the call.
Address test_instruction_address = address + 4;
// If the instruction following the call is not a test eax, nothing
// was inlined.
if (*test_instruction_address != kTestEaxByte) return false;
Address delta_address = test_instruction_address + 1;
// The delta to the start of the map check instruction.
int delta = *reinterpret_cast<int*>(delta_address);
// The map address is the last 4 bytes of the 7-byte
// operand-immediate compare instruction, so we add 3 to get the
// offset to the last 4 bytes.
Address map_address = test_instruction_address + delta + 3;
*(reinterpret_cast<Object**>(map_address)) = map;
// The offset is in the last 4 bytes of a six byte
// memory-to-register move instruction, so we add 2 to get the
// offset to the last 4 bytes.
Address offset_address =
test_instruction_address + delta + kOffsetToLoadInstruction + 2;
*reinterpret_cast<int*>(offset_address) = offset - kHeapObjectTag;
return true;
}
bool KeyedLoadIC::PatchInlinedLoad(Address address, Object* map) {
Address test_instruction_address = address + 4; // 4 = stub address
// The keyed load has a fast inlined case if the IC call instruction
// is immediately followed by a test instruction.
if (*test_instruction_address == kTestEaxByte) {
// Fetch the offset from the test instruction to the map cmp
// instruction. This offset is stored in the last 4 bytes of the
// 5 byte test instruction.
Address offset_address = test_instruction_address + 1;
int offset_value = *(reinterpret_cast<int*>(offset_address));
// Compute the map address. The map address is in the last 4
// bytes of the 7-byte operand-immediate compare instruction, so
// we add 3 to the offset to get the map address.
Address map_address = test_instruction_address + offset_value + 3;
// patch the map check.
(*(reinterpret_cast<Object**>(map_address))) = value;
}
if (*test_instruction_address != kTestEaxByte) return false;
// Fetch the offset from the test instruction to the map cmp
// instruction. This offset is stored in the last 4 bytes of the 5
// byte test instruction.
Address delta_address = test_instruction_address + 1;
int delta = *reinterpret_cast<int*>(delta_address);
// Compute the map address. The map address is in the last 4 bytes
// of the 7-byte operand-immediate compare instruction, so we add 3
// to the offset to get the map address.
Address map_address = test_instruction_address + delta + 3;
// Patch the map check.
*(reinterpret_cast<Object**>(map_address)) = map;
return true;
}

12
deps/v8/src/jump-target-ia32.cc → deps/v8/src/ia32/jump-target-ia32.cc

@ -35,7 +35,7 @@ namespace v8 { namespace internal {
// -------------------------------------------------------------------------
// JumpTarget implementation.
#define __ masm_->
#define __ ACCESS_MASM(masm_)
void JumpTarget::DoJump() {
ASSERT(cgen_ != NULL);
@ -115,11 +115,13 @@ void JumpTarget::DoBranch(Condition cc, Hint hint) {
__ bind(&original_fall_through);
} else {
// Forward branch. A copy of the current frame is added to the end
// of the list of frames reaching the target block and a branch to
// the merge code is emitted.
// Forward branch. A copy of the current frame is added to the end of the
// list of frames reaching the target block and a branch to the merge code
// is emitted. Use masm_-> instead of __ as forward branches are expected
// to be a fixed size (no inserted coverage-checking instructions please).
// This is used in Reference::GetValue.
AddReachingFrame(new VirtualFrame(cgen_->frame()));
__ j(cc, &merge_labels_.last(), hint);
masm_->j(cc, &merge_labels_.last(), hint);
is_linked_ = true;
}
}

13
deps/v8/src/macro-assembler-ia32.cc → deps/v8/src/ia32/macro-assembler-ia32.cc

@ -216,6 +216,7 @@ void MacroAssembler::RecordWrite(Register object, int offset,
}
#ifdef ENABLE_DEBUGGER_SUPPORT
void MacroAssembler::SaveRegistersToMemory(RegList regs) {
ASSERT((regs & ~kJSCallerSaved) == 0);
// Copy the content of registers to memory location.
@ -290,7 +291,7 @@ void MacroAssembler::CopyRegistersFromStackToMemory(Register base,
}
}
}
#endif
void MacroAssembler::Set(Register dst, const Immediate& x) {
if (x.is_zero()) {
@ -378,6 +379,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
mov(edi, Operand(eax));
lea(esi, Operand(ebp, eax, times_4, offset));
#ifdef ENABLE_DEBUGGER_SUPPORT
// Save the state of all registers to the stack from the memory
// location. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) {
@ -389,6 +391,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
// associated with this issue).
PushRegistersFromMemory(kJSCallerSaved);
}
#endif
// Reserve space for two arguments: argc and argv.
sub(Operand(esp), Immediate(2 * kPointerSize));
@ -406,6 +409,7 @@ void MacroAssembler::EnterExitFrame(StackFrame::Type type) {
void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
#ifdef ENABLE_DEBUGGER_SUPPORT
// Restore the memory copy of the registers by digging them out from
// the stack. This is needed to allow nested break points.
if (type == StackFrame::EXIT_DEBUG) {
@ -416,6 +420,7 @@ void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
lea(ebx, Operand(ebp, kOffset));
CopyRegistersFromStackToMemory(ebx, ecx, kJSCallerSaved);
}
#endif
// Get the return address from the stack and restore the frame pointer.
mov(ecx, Operand(ebp, 1 * kPointerSize));
@ -427,9 +432,9 @@ void MacroAssembler::LeaveExitFrame(StackFrame::Type type) {
// Restore current context from top and clear it in debug mode.
ExternalReference context_address(Top::k_context_address);
mov(esi, Operand::StaticVariable(context_address));
if (kDebug) {
mov(Operand::StaticVariable(context_address), Immediate(0));
}
#ifdef DEBUG
mov(Operand::StaticVariable(context_address), Immediate(0));
#endif
// Push the return address to get ready to return.
push(ecx);

371
deps/v8/src/ia32/macro-assembler-ia32.h

@ -0,0 +1,371 @@
// Copyright 2006-2008 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_IA32_MACRO_ASSEMBLER_IA32_H_
#define V8_IA32_MACRO_ASSEMBLER_IA32_H_
#include "assembler.h"
namespace v8 { namespace internal {
// Forward declaration.
class JumpTarget;
// Helper types to make flags easier to read at call sites.
enum InvokeFlag {
CALL_FUNCTION,
JUMP_FUNCTION
};
enum CodeLocation {
IN_JAVASCRIPT,
IN_JS_ENTRY,
IN_C_ENTRY
};
enum HandlerType {
TRY_CATCH_HANDLER,
TRY_FINALLY_HANDLER,
JS_ENTRY_HANDLER
};
// MacroAssembler implements a collection of frequently used macros.
class MacroAssembler: public Assembler {
public:
MacroAssembler(void* buffer, int size);
// ---------------------------------------------------------------------------
// GC Support
// Set the remembered set bit for [object+offset].
// object is the object being stored into, value is the object being stored.
// If offset is zero, then the scratch register contains the array index into
// the elements array represented as a Smi.
// All registers are clobbered by the operation.
void RecordWrite(Register object,
int offset,
Register value,
Register scratch);
#ifdef ENABLE_DEBUGGER_SUPPORT
// ---------------------------------------------------------------------------
// Debugger Support
void SaveRegistersToMemory(RegList regs);
void RestoreRegistersFromMemory(RegList regs);
void PushRegistersFromMemory(RegList regs);
void PopRegistersToMemory(RegList regs);
void CopyRegistersFromStackToMemory(Register base,
Register scratch,
RegList regs);
#endif
// ---------------------------------------------------------------------------
// Activation frames
void EnterInternalFrame() { EnterFrame(StackFrame::INTERNAL); }
void LeaveInternalFrame() { LeaveFrame(StackFrame::INTERNAL); }
void EnterConstructFrame() { EnterFrame(StackFrame::CONSTRUCT); }
void LeaveConstructFrame() { LeaveFrame(StackFrame::CONSTRUCT); }
// Enter specific kind of exit frame; either EXIT or
// EXIT_DEBUG. Expects the number of arguments in register eax and
// sets up the number of arguments in register edi and the pointer
// to the first argument in register esi.
void EnterExitFrame(StackFrame::Type type);
// Leave the current exit frame. Expects the return value in
// register eax:edx (untouched) and the pointer to the first
// argument in register esi.
void LeaveExitFrame(StackFrame::Type type);
// ---------------------------------------------------------------------------
// JavaScript invokes
// Invoke the JavaScript function code by either calling or jumping.
void InvokeCode(const Operand& code,
const ParameterCount& expected,
const ParameterCount& actual,
InvokeFlag flag);
void InvokeCode(Handle<Code> code,
const ParameterCount& expected,
const ParameterCount& actual,
RelocInfo::Mode rmode,
InvokeFlag flag);
// Invoke the JavaScript function in the given register. Changes the
// current context to the context in the function before invoking.
void InvokeFunction(Register function,
const ParameterCount& actual,
InvokeFlag flag);
// Invoke specified builtin JavaScript function. Adds an entry to
// the unresolved list if the name does not resolve.
void InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag);
// Store the code object for the given builtin in the target register.
void GetBuiltinEntry(Register target, Builtins::JavaScript id);
// Expression support
void Set(Register dst, const Immediate& x);
void Set(const Operand& dst, const Immediate& x);
// Compare object type for heap object.
// Incoming register is heap_object and outgoing register is map.
void CmpObjectType(Register heap_object, InstanceType type, Register map);
// Compare instance type for map.
void CmpInstanceType(Register map, InstanceType type);
// FCmp is similar to integer cmp, but requires unsigned
// jcc instructions (je, ja, jae, jb, jbe, je, and jz).
void FCmp();
// ---------------------------------------------------------------------------
// Exception handling
// Push a new try handler and link into try handler chain.
// The return address must be pushed before calling this helper.
// On exit, eax contains TOS (next_sp).
void PushTryHandler(CodeLocation try_location, HandlerType type);
// ---------------------------------------------------------------------------
// Inline caching support
// Generates code that verifies that the maps of objects in the
// prototype chain of object hasn't changed since the code was
// generated and branches to the miss label if any map has. If
// necessary the function also generates code for security check
// in case of global object holders. The scratch and holder
// registers are always clobbered, but the object register is only
// clobbered if it the same as the holder register. The function
// returns a register containing the holder - either object_reg or
// holder_reg.
Register CheckMaps(JSObject* object, Register object_reg,
JSObject* holder, Register holder_reg,
Register scratch, Label* miss);
// Generate code for checking access rights - used for security checks
// on access to global objects across environments. The holder register
// is left untouched, but the scratch register is clobbered.
void CheckAccessGlobalProxy(Register holder_reg,
Register scratch,
Label* miss);
// ---------------------------------------------------------------------------
// Support functions.
// Check if result is zero and op is negative.
void NegativeZeroTest(Register result, Register op, Label* then_label);
// Check if result is zero and op is negative in code using jump targets.
void NegativeZeroTest(CodeGenerator* cgen,
Register result,
Register op,
JumpTarget* then_target);
// Check if result is zero and any of op1 and op2 are negative.
// Register scratch is destroyed, and it must be different from op2.
void NegativeZeroTest(Register result, Register op1, Register op2,
Register scratch, Label* then_label);
// Try to get function prototype of a function and puts the value in
// the result register. Checks that the function really is a
// function and jumps to the miss label if the fast checks fail. The
// function register will be untouched; the other registers may be
// clobbered.
void TryGetFunctionPrototype(Register function,
Register result,
Register scratch,
Label* miss);
// Generates code for reporting that an illegal operation has
// occurred.
void IllegalOperation(int num_arguments);
// ---------------------------------------------------------------------------
// Runtime calls
// Call a code stub.
void CallStub(CodeStub* stub);
// Return from a code stub after popping its arguments.
void StubReturn(int argc);
// Call a runtime routine.
// Eventually this should be used for all C calls.
void CallRuntime(Runtime::Function* f, int num_arguments);
// Convenience function: Same as above, but takes the fid instead.
void CallRuntime(Runtime::FunctionId id, int num_arguments);
// Tail call of a runtime routine (jump).
// Like JumpToBuiltin, but also takes care of passing the number
// of arguments.
void TailCallRuntime(const ExternalReference& ext, int num_arguments);
// Jump to the builtin routine.
void JumpToBuiltin(const ExternalReference& ext);
// ---------------------------------------------------------------------------
// Utilities
void Ret();
struct Unresolved {
int pc;
uint32_t flags; // see Bootstrapper::FixupFlags decoders/encoders.
const char* name;
};
List<Unresolved>* unresolved() { return &unresolved_; }
Handle<Object> CodeObject() { return code_object_; }
// ---------------------------------------------------------------------------
// StatsCounter support
void SetCounter(StatsCounter* counter, int value);
void IncrementCounter(StatsCounter* counter, int value);
void DecrementCounter(StatsCounter* counter, int value);
// ---------------------------------------------------------------------------
// Debugging
// Calls Abort(msg) if the condition cc is not satisfied.
// Use --debug_code to enable.
void Assert(Condition cc, const char* msg);
// Like Assert(), but always enabled.
void Check(Condition cc, const char* msg);
// Print a message to stdout and abort execution.
void Abort(const char* msg);
// Verify restrictions about code generated in stubs.
void set_generating_stub(bool value) { generating_stub_ = value; }
bool generating_stub() { return generating_stub_; }
void set_allow_stub_calls(bool value) { allow_stub_calls_ = value; }
bool allow_stub_calls() { return allow_stub_calls_; }
private:
List<Unresolved> unresolved_;
bool generating_stub_;
bool allow_stub_calls_;
Handle<Object> code_object_; // This handle will be patched with the code
// code object on installation.
// Helper functions for generating invokes.
void InvokePrologue(const ParameterCount& expected,
const ParameterCount& actual,
Handle<Code> code_constant,
const Operand& code_operand,
Label* done,
InvokeFlag flag);
// Get the code for the given builtin. Returns if able to resolve
// the function in the 'resolved' flag.
Handle<Code> ResolveBuiltin(Builtins::JavaScript id, bool* resolved);
// Activation support.
void EnterFrame(StackFrame::Type type);
void LeaveFrame(StackFrame::Type type);
};
// The code patcher is used to patch (typically) small parts of code e.g. for
// debugging and other types of instrumentation. When using the code patcher
// the exact number of bytes specified must be emitted. Is not legal to emit
// relocation information. If any of these constraints are violated it causes
// an assertion.
class CodePatcher {
public:
CodePatcher(byte* address, int size);
virtual ~CodePatcher();
// Macro assembler to emit code.
MacroAssembler* masm() { return &masm_; }
private:
byte* address_; // The address of the code being patched.
int size_; // Number of bytes of the expected patch size.
MacroAssembler masm_; // Macro assembler used to generate the code.
};
// -----------------------------------------------------------------------------
// Static helper functions.
// Generate an Operand for loading a field from an object.
static inline Operand FieldOperand(Register object, int offset) {
return Operand(object, offset - kHeapObjectTag);
}
// Generate an Operand for loading an indexed field from an object.
static inline Operand FieldOperand(Register object,
Register index,
ScaleFactor scale,
int offset) {
return Operand(object, index, scale, offset - kHeapObjectTag);
}
#ifdef GENERATED_CODE_COVERAGE
extern void LogGeneratedCodeCoverage(const char* file_line);
#define CODE_COVERAGE_STRINGIFY(x) #x
#define CODE_COVERAGE_TOSTRING(x) CODE_COVERAGE_STRINGIFY(x)
#define __FILE_LINE__ __FILE__ ":" CODE_COVERAGE_TOSTRING(__LINE__)
#define ACCESS_MASM(masm) { \
byte* ia32_coverage_function = \
reinterpret_cast<byte*>(FUNCTION_ADDR(LogGeneratedCodeCoverage)); \
masm->pushfd(); \
masm->pushad(); \
masm->push(Immediate(reinterpret_cast<int>(&__FILE_LINE__))); \
masm->call(ia32_coverage_function, RelocInfo::RUNTIME_ENTRY); \
masm->pop(eax); \
masm->popad(); \
masm->popfd(); \
} \
masm->
#else
#define ACCESS_MASM(masm) masm->
#endif
} } // namespace v8::internal
#endif // V8_IA32_MACRO_ASSEMBLER_IA32_H_

16
deps/v8/src/regexp-macro-assembler-ia32.cc → deps/v8/src/ia32/regexp-macro-assembler-ia32.cc

@ -32,8 +32,8 @@
#include "regexp-stack.h"
#include "macro-assembler.h"
#include "regexp-macro-assembler.h"
#include "macro-assembler-ia32.h"
#include "regexp-macro-assembler-ia32.h"
#include "ia32/macro-assembler-ia32.h"
#include "ia32/regexp-macro-assembler-ia32.h"
namespace v8 { namespace internal {
@ -86,7 +86,7 @@ namespace v8 { namespace internal {
* byte* stack_area_top)
*/
#define __ masm_->
#define __ ACCESS_MASM(masm_)
RegExpMacroAssemblerIA32::RegExpMacroAssemblerIA32(
Mode mode,
@ -974,7 +974,7 @@ RegExpMacroAssemblerIA32::Result RegExpMacroAssemblerIA32::Match(
int start_offset = previous_index;
int end_offset = subject_ptr->length();
bool is_ascii = StringShape(*subject).IsAsciiRepresentation();
bool is_ascii = subject->IsAsciiRepresentation();
if (StringShape(subject_ptr).IsCons()) {
subject_ptr = ConsString::cast(subject_ptr)->first();
@ -985,7 +985,7 @@ RegExpMacroAssemblerIA32::Result RegExpMacroAssemblerIA32::Match(
subject_ptr = slice->buffer();
}
// Ensure that an underlying string has the same ascii-ness.
ASSERT(StringShape(subject_ptr).IsAsciiRepresentation() == is_ascii);
ASSERT(subject_ptr->IsAsciiRepresentation() == is_ascii);
ASSERT(subject_ptr->IsExternalString() || subject_ptr->IsSeqString());
// String is now either Sequential or External
int char_size_shift = is_ascii ? 0 : 1;
@ -1112,7 +1112,7 @@ const byte* RegExpMacroAssemblerIA32::StringCharacterPosition(String* subject,
ASSERT(subject->IsExternalString() || subject->IsSeqString());
ASSERT(start_index >= 0);
ASSERT(start_index <= subject->length());
if (StringShape(subject).IsAsciiRepresentation()) {
if (subject->IsAsciiRepresentation()) {
const byte* address;
if (StringShape(subject).IsExternal()) {
const char* data = ExternalAsciiString::cast(subject)->resource()->data();
@ -1152,7 +1152,7 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
Handle<String> subject(frame_entry<String*>(re_frame, kInputString));
// Current string.
bool is_ascii = StringShape(*subject).IsAsciiRepresentation();
bool is_ascii = subject->IsAsciiRepresentation();
ASSERT(re_code->instruction_start() <= *return_address);
ASSERT(*return_address <=
@ -1171,7 +1171,7 @@ int RegExpMacroAssemblerIA32::CheckStackGuardState(Address* return_address,
}
// String might have changed.
if (StringShape(*subject).IsAsciiRepresentation() != is_ascii) {
if (subject->IsAsciiRepresentation() != is_ascii) {
// If we changed between an ASCII and an UC16 string, the specialized
// code cannot be used, and we need to restart regexp matching from
// scratch (including, potentially, compiling a new version of the code).

6
deps/v8/src/regexp-macro-assembler-ia32.h → deps/v8/src/ia32/regexp-macro-assembler-ia32.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef REGEXP_MACRO_ASSEMBLER_IA32_H_
#define REGEXP_MACRO_ASSEMBLER_IA32_H_
#ifndef V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
#define V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_
namespace v8 { namespace internal {
@ -282,4 +282,4 @@ class RegExpMacroAssemblerIA32: public RegExpMacroAssembler {
}} // namespace v8::internal
#endif /* REGEXP_MACRO_ASSEMBLER_IA32_H_ */
#endif // V8_IA32_REGEXP_MACRO_ASSEMBLER_IA32_H_

6
deps/v8/src/register-allocator-ia32.cc → deps/v8/src/ia32/register-allocator-ia32.cc

@ -97,6 +97,12 @@ void RegisterAllocator::UnuseReserved(RegisterFile* register_file) {
}
bool RegisterAllocator::IsReserved(int reg_code) {
// Test below relies on the order of register codes.
return reg_code >= esp.code() && reg_code <= esi.code();
}
void RegisterAllocator::Initialize() {
Reset();
// The following register is live on function entry, saved in the

0
deps/v8/src/simulator-ia32.cc → deps/v8/src/ia32/simulator-ia32.cc

6
deps/v8/src/simulator-ia32.h → deps/v8/src/ia32/simulator-ia32.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_SIMULATOR_IA32_H_
#define V8_SIMULATOR_IA32_H_
#ifndef V8_IA32_SIMULATOR_IA32_H_
#define V8_IA32_SIMULATOR_IA32_H_
// Since there is no simulator for the ia32 architecture the only thing we can
@ -44,4 +44,4 @@
(reinterpret_cast<uintptr_t>(this) >= limit ? \
reinterpret_cast<uintptr_t>(this) - limit : 0)
#endif // V8_SIMULATOR_IA32_H_
#endif // V8_IA32_SIMULATOR_IA32_H_

16
deps/v8/src/stub-cache-ia32.cc → deps/v8/src/ia32/stub-cache-ia32.cc

@ -33,7 +33,7 @@
namespace v8 { namespace internal {
#define __ masm->
#define __ ACCESS_MASM(masm)
static void ProbeTable(MacroAssembler* masm,
@ -256,7 +256,7 @@ void StubCompiler::GenerateLoadField(MacroAssembler* masm,
// Check that the maps haven't changed.
Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Get the value from the properties.
GenerateFastPropertyLoad(masm, eax, reg, holder, index);
@ -279,7 +279,7 @@ void StubCompiler::GenerateLoadCallback(MacroAssembler* masm,
// Check that the maps haven't changed.
Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Push the arguments on the JS stack of the caller.
__ pop(scratch2); // remove return address
@ -310,7 +310,7 @@ void StubCompiler::GenerateLoadConstant(MacroAssembler* masm,
// Check that the maps haven't changed.
Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Return the constant value.
__ mov(eax, Handle<Object>(value));
@ -332,7 +332,7 @@ void StubCompiler::GenerateLoadInterceptor(MacroAssembler* masm,
// Check that the maps haven't changed.
Register reg =
__ CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
masm->CheckMaps(object, receiver, holder, scratch1, scratch2, miss_label);
// Push the arguments on the JS stack of the caller.
__ pop(scratch2); // remove return address
@ -440,7 +440,7 @@ void StubCompiler::GenerateStoreField(MacroAssembler* masm,
#undef __
#define __ masm()->
#define __ ACCESS_MASM(masm())
// TODO(1241006): Avoid having lazy compile stubs specialized by the
@ -485,7 +485,7 @@ Object* CallStubCompiler::CompileCallField(Object* object,
// Do the right check and compute the holder register.
Register reg =
__ CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
masm()->CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
GenerateFastPropertyLoad(masm(), edi, reg, holder, index);
@ -656,7 +656,7 @@ Object* CallStubCompiler::CompileCallInterceptor(Object* object,
// Check that maps have not changed and compute the holder register.
Register reg =
__ CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
masm()->CheckMaps(JSObject::cast(object), edx, holder, ebx, ecx, &miss);
// Enter an internal frame.
__ EnterInternalFrame();

160
deps/v8/src/virtual-frame-ia32.cc → deps/v8/src/ia32/virtual-frame-ia32.cc

@ -33,7 +33,7 @@
namespace v8 { namespace internal {
#define __ masm_->
#define __ ACCESS_MASM(masm_)
// -------------------------------------------------------------------------
// VirtualFrame implementation.
@ -158,6 +158,28 @@ void VirtualFrame::SyncElementByPushing(int index) {
}
// Clear the dirty bits for the range of elements in
// [min(stack_pointer_ + 1,begin), end].
void VirtualFrame::SyncRange(int begin, int end) {
ASSERT(begin >= 0);
ASSERT(end < elements_.length());
// Sync elements below the range if they have not been materialized
// on the stack.
int start = Min(begin, stack_pointer_ + 1);
// If positive we have to adjust the stack pointer.
int delta = end - stack_pointer_;
if (delta > 0) {
stack_pointer_ = end;
__ sub(Operand(esp), Immediate(delta * kPointerSize));
}
for (int i = start; i <= end; i++) {
if (!elements_[i].is_synced()) SyncElementBelowStackPointer(i);
}
}
void VirtualFrame::MergeTo(VirtualFrame* expected) {
Comment cmnt(masm_, "[ Merge frame");
// We should always be merging the code generator's current frame to an
@ -288,64 +310,33 @@ void VirtualFrame::MergeMoveRegistersToRegisters(VirtualFrame* expected) {
// We have already done X-to-memory moves.
ASSERT(stack_pointer_ >= expected->stack_pointer_);
// Perform register-to-register moves.
int start = 0;
int end = elements_.length() - 1;
bool any_moves_blocked; // Did we fail to make some moves this iteration?
bool should_break_cycles = false;
bool any_moves_made; // Did we make any progress this iteration?
do {
any_moves_blocked = false;
any_moves_made = false;
int first_move_blocked = kIllegalIndex;
int last_move_blocked = kIllegalIndex;
for (int i = start; i <= end; i++) {
FrameElement source = elements_[i];
FrameElement target = expected->elements_[i];
if (source.is_register() && target.is_register()) {
if (target.reg().is(source.reg())) {
if (target.is_synced() && !source.is_synced()) {
__ mov(Operand(ebp, fp_relative(i)), source.reg());
}
elements_[i] = target;
} else {
// We need to move source to target.
if (is_used(target.reg())) {
// The move is blocked because the target contains valid data.
// If we are stuck with only cycles remaining, then we spill source.
// Otherwise, we just need more iterations.
if (should_break_cycles) {
SpillElementAt(i);
should_break_cycles = false;
} else { // Record a blocked move.
if (!any_moves_blocked) {
first_move_blocked = i;
}
last_move_blocked = i;
any_moves_blocked = true;
}
} else {
// The move is not blocked. This frame element can be moved from
// its source register to its target register.
if (target.is_synced() && !source.is_synced()) {
SyncElementAt(i);
}
Use(target.reg(), i);
Unuse(source.reg());
elements_[i] = target;
__ mov(target.reg(), source.reg());
any_moves_made = true;
}
}
for (int i = 0; i < kNumRegisters; i++) {
// Move the right value into register i if it is currently in a register.
int index = expected->register_locations_[i];
int use_index = register_locations_[i];
// Fast check if register is unused in target or already correct
if (index != kIllegalIndex
&& index != use_index
&& elements_[index].is_register()) {
Register source = elements_[index].reg();
Register target = { i };
if (use_index == kIllegalIndex) { // Target is currently unused.
// Copy contents of source from source to target.
// Set frame element register to target.
elements_[index].set_reg(target);
Use(target, index);
Unuse(source);
__ mov(target, source);
} else {
// Exchange contents of registers source and target.
elements_[use_index].set_reg(source);
elements_[index].set_reg(target);
register_locations_[target.code()] = index;
register_locations_[source.code()] = use_index;
__ xchg(source, target);
}
}
// Update control flags for next iteration.
should_break_cycles = (any_moves_blocked && !any_moves_made);
if (any_moves_blocked) {
start = first_move_blocked;
end = last_move_blocked;
}
} while (any_moves_blocked);
}
}
@ -354,19 +345,22 @@ void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame *expected) {
// final step and is done from the bottom up so that the backing
// elements of copies are in their correct locations when we
// encounter the copies.
for (int i = 0; i < elements_.length(); i++) {
FrameElement source = elements_[i];
FrameElement target = expected->elements_[i];
if (target.is_register() && !source.is_register()) {
for (int i = 0; i < kNumRegisters; i++) {
int index = expected->register_locations_[i];
if (index != kIllegalIndex) {
FrameElement source = elements_[index];
FrameElement target = expected->elements_[index];
switch (source.type()) {
case FrameElement::INVALID: // Fall through.
case FrameElement::REGISTER:
UNREACHABLE();
break;
case FrameElement::REGISTER:
ASSERT(source.reg().is(target.reg()));
continue; // Go to next iteration. Skips Use(target.reg()) below.
break;
case FrameElement::MEMORY:
ASSERT(i <= stack_pointer_);
__ mov(target.reg(), Operand(ebp, fp_relative(i)));
ASSERT(index <= stack_pointer_);
__ mov(target.reg(), Operand(ebp, fp_relative(index)));
break;
case FrameElement::CONSTANT:
@ -378,11 +372,25 @@ void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame *expected) {
break;
case FrameElement::COPY: {
FrameElement backing = elements_[source.index()];
int backing_index = source.index();
FrameElement backing = elements_[backing_index];
ASSERT(backing.is_memory() || backing.is_register());
if (backing.is_memory()) {
ASSERT(source.index() <= stack_pointer_);
__ mov(target.reg(), Operand(ebp, fp_relative(source.index())));
ASSERT(backing_index <= stack_pointer_);
// Code optimization if backing store should also move
// to a register: move backing store to its register first.
if (expected->elements_[backing_index].is_register()) {
FrameElement new_backing = expected->elements_[backing_index];
Register new_backing_reg = new_backing.reg();
ASSERT(!is_used(new_backing_reg));
elements_[backing_index] = new_backing;
Use(new_backing_reg, backing_index);
__ mov(new_backing_reg,
Operand(ebp, fp_relative(backing_index)));
__ mov(target.reg(), new_backing_reg);
} else {
__ mov(target.reg(), Operand(ebp, fp_relative(backing_index)));
}
} else {
__ mov(target.reg(), backing.reg());
}
@ -390,11 +398,11 @@ void VirtualFrame::MergeMoveMemoryToRegisters(VirtualFrame *expected) {
}
// Ensure the proper sync state. If the source was memory no
// code needs to be emitted.
if (target.is_synced() && !source.is_memory()) {
SyncElementAt(i);
if (target.is_synced() && !source.is_synced()) {
__ mov(Operand(ebp, fp_relative(index)), target.reg());
}
Use(target.reg(), i);
elements_[i] = target;
Use(target.reg(), index);
elements_[index] = target;
}
}
}
@ -467,7 +475,7 @@ void VirtualFrame::AllocateStackSlots(int count) {
// we sync them with the actual frame to allocate space for spilling
// them later. First sync everything above the stack pointer so we can
// use pushes to allocate and initialize the locals.
SyncRange(stack_pointer_ + 1, elements_.length());
SyncRange(stack_pointer_ + 1, elements_.length() - 1);
Handle<Object> undefined = Factory::undefined_value();
FrameElement initial_value =
FrameElement::ConstantElement(undefined, FrameElement::SYNCED);
@ -615,6 +623,12 @@ void VirtualFrame::StoreToFrameSlotAt(int index) {
InvalidateFrameSlotAt(index);
// InvalidateFrameSlotAt can potentially change any frame element, due
// to spilling registers to allocate temporaries in order to preserve
// the copy-on-write semantics of aliased elements. Reload top from
// the frame.
top = elements_[top_index];
if (top.is_copy()) {
// There are two cases based on the relative positions of the
// stored-to slot and the backing slot of the top element.

6
deps/v8/src/virtual-frame-ia32.h → deps/v8/src/ia32/virtual-frame-ia32.h

@ -25,8 +25,8 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_VIRTUAL_FRAME_IA32_H_
#define V8_VIRTUAL_FRAME_IA32_H_
#ifndef V8_IA32_VIRTUAL_FRAME_IA32_H_
#define V8_IA32_VIRTUAL_FRAME_IA32_H_
#include "register-allocator.h"
@ -490,4 +490,4 @@ class VirtualFrame : public Malloced {
} } // namespace v8::internal
#endif // V8_VIRTUAL_FRAME_IA32_H_
#endif // V8_IA32_VIRTUAL_FRAME_IA32_H_

4
deps/v8/src/ic-inl.h

@ -39,6 +39,7 @@ Address IC::address() {
// Get the address of the call.
Address result = pc() - Assembler::kTargetAddrToReturnAddrDist;
#ifdef ENABLE_DEBUGGER_SUPPORT
// First check if any break points are active if not just return the address
// of the call.
if (!Debug::has_break_points()) return result;
@ -55,6 +56,9 @@ Address IC::address() {
// No break point here just return the address of the call.
return result;
}
#else
return result;
#endif
}

37
deps/v8/src/ic.cc

@ -42,7 +42,7 @@ static char TransitionMarkFromState(IC::State state) {
switch (state) {
case UNINITIALIZED: return '0';
case UNINITIALIZED_IN_LOOP: return 'L';
case PREMONOMORPHIC: return '0';
case PREMONOMORPHIC: return 'P';
case MONOMORPHIC: return '1';
case MONOMORPHIC_PROTOTYPE_FAILURE: return '^';
case MEGAMORPHIC: return 'N';
@ -100,6 +100,7 @@ IC::IC(FrameDepth depth) {
}
#ifdef ENABLE_DEBUGGER_SUPPORT
Address IC::OriginalCodeAddress() {
HandleScope scope;
// Compute the JavaScript frame for the frame pointer of this IC
@ -126,7 +127,7 @@ Address IC::OriginalCodeAddress() {
int delta = original_code->instruction_start() - code->instruction_start();
return addr + delta;
}
#endif
IC::State IC::StateFrom(Code* target, Object* receiver) {
IC::State state = target->ic_state();
@ -236,13 +237,14 @@ void KeyedLoadIC::Clear(Address address, Code* target) {
// Make sure to also clear the map used in inline fast cases. If we
// do not clear these maps, cached code can keep objects alive
// through the embedded maps.
PatchInlinedMapCheck(address, Heap::null_value());
ClearInlinedVersion(address);
SetTargetAtAddress(address, initialize_stub());
}
void LoadIC::Clear(Address address, Code* target) {
if (target->ic_state() == UNINITIALIZED) return;
ClearInlinedVersion(address);
SetTargetAtAddress(address, initialize_stub());
}
@ -356,6 +358,7 @@ Object* CallIC::LoadFunction(State state,
if (opt->IsJSFunction()) return opt;
}
#ifdef ENABLE_DEBUGGER_SUPPORT
// Handle stepping into a function if step into is active.
if (Debug::StepInActive()) {
// Protect the result in a handle as the debugger can allocate and might
@ -365,6 +368,7 @@ Object* CallIC::LoadFunction(State state,
Debug::HandleStepIn(function, fp(), false);
return *function;
}
#endif
return result;
}
@ -520,6 +524,31 @@ Object* LoadIC::Load(State state, Handle<Object> object, Handle<String> name) {
LOG(SuspectReadEvent(*name, *object));
}
bool can_be_inlined =
FLAG_use_ic &&
state == PREMONOMORPHIC &&
lookup.IsValid() &&
lookup.IsLoaded() &&
lookup.IsCacheable() &&
lookup.holder() == *object &&
lookup.type() == FIELD &&
!object->IsAccessCheckNeeded();
if (can_be_inlined) {
Map* map = lookup.holder()->map();
// Property's index in the properties array. If negative we have
// an inobject property.
int index = lookup.GetFieldIndex() - map->inobject_properties();
if (index < 0) {
// Index is an offset from the end of the object.
int offset = map->instance_size() + (index * kPointerSize);
if (PatchInlinedLoad(address(), map, offset)) {
set_target(megamorphic_stub());
return lookup.holder()->FastPropertyAt(lookup.GetFieldIndex());
}
}
}
// Update inline cache and stub cache.
if (FLAG_use_ic && lookup.IsLoaded()) {
UpdateCaches(&lookup, state, object, name);
@ -731,7 +760,7 @@ Object* KeyedLoadIC::Load(State state,
!object->IsJSValue() &&
!JSObject::cast(*object)->HasIndexedInterceptor()) {
Map* map = JSObject::cast(*object)->map();
PatchInlinedMapCheck(address(), map);
PatchInlinedLoad(address(), map);
}
}

18
deps/v8/src/ic.h

@ -107,9 +107,11 @@ class IC {
Address fp() const { return fp_; }
Address pc() const { return *pc_address_; }
#ifdef ENABLE_DEBUGGER_SUPPORT
// Computes the address in the original code when the code running is
// containing break points (calls to DebugBreakXXX builtins).
Address OriginalCodeAddress();
#endif
// Set the call-site target.
void set_target(Code* code) { SetTargetAtAddress(address(), code); }
@ -214,6 +216,11 @@ class LoadIC: public IC {
static void GenerateStringLength(MacroAssembler* masm);
static void GenerateFunctionPrototype(MacroAssembler* masm);
// The offset from the inlined patch site to the start of the
// inlined load instruction. It is 7 bytes (test eax, imm) plus
// 6 bytes (jne slow_label).
static const int kOffsetToLoadInstruction = 13;
private:
static void Generate(MacroAssembler* masm, const ExternalReference& f);
@ -236,6 +243,12 @@ class LoadIC: public IC {
}
static void Clear(Address address, Code* target);
// Clear the use of the inlined version.
static void ClearInlinedVersion(Address address);
static bool PatchInlinedLoad(Address address, Object* map, int index);
friend class IC;
};
@ -252,6 +265,9 @@ class KeyedLoadIC: public IC {
static void GeneratePreMonomorphic(MacroAssembler* masm);
static void GenerateGeneric(MacroAssembler* masm);
// Clear the use of the inlined version.
static void ClearInlinedVersion(Address address);
private:
static void Generate(MacroAssembler* masm, const ExternalReference& f);
@ -279,7 +295,7 @@ class KeyedLoadIC: public IC {
// Support for patching the map that is checked in an inlined
// version of keyed load.
static void PatchInlinedMapCheck(Address address, Object* map);
static bool PatchInlinedLoad(Address address, Object* map);
friend class IC;
};

6
deps/v8/src/interpreter-irregexp.cc

@ -130,13 +130,13 @@ static void TraceInterpreter(const byte* code_base,
static int32_t Load32Aligned(const byte* pc) {
ASSERT((reinterpret_cast<int>(pc) & 3) == 0);
ASSERT((reinterpret_cast<intptr_t>(pc) & 3) == 0);
return *reinterpret_cast<const int32_t *>(pc);
}
static int32_t Load16Aligned(const byte* pc) {
ASSERT((reinterpret_cast<int>(pc) & 1) == 0);
ASSERT((reinterpret_cast<intptr_t>(pc) & 1) == 0);
return *reinterpret_cast<const uint16_t *>(pc);
}
@ -574,7 +574,7 @@ bool IrregexpInterpreter::Match(Handle<ByteArray> code_array,
AssertNoAllocation a;
const byte* code_base = code_array->GetDataStartAddress();
uc16 previous_char = '\n';
if (StringShape(*subject).IsAsciiRepresentation()) {
if (subject->IsAsciiRepresentation()) {
Vector<const char> subject_vector = subject->ToAsciiVector();
if (start_position != 0) previous_char = subject_vector[start_position - 1];
return RawMatch(code_base,

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save