Browse Source

Upgrade v8 to 1.2.10 and libev to 3.6

v0.7.4-release
Ryan 16 years ago
parent
commit
e763efdadf
  1. 17
      deps/libev/Changes
  2. 619
      deps/libev/Makefile.in
  3. 3
      deps/libev/README.embed
  4. 62
      deps/libev/Symbols.ev
  5. 21
      deps/libev/Symbols.event
  6. 7526
      deps/libev/aclocal.m4
  7. 1526
      deps/libev/config.guess
  8. 115
      deps/libev/config.h.in
  9. 1658
      deps/libev/config.sub
  10. 22083
      deps/libev/configure
  11. 2
      deps/libev/configure.ac
  12. 448
      deps/libev/ev.3
  13. 474
      deps/libev/ev.c
  14. 98
      deps/libev/ev.h
  15. 458
      deps/libev/ev.pod
  16. 22
      deps/libev/ev_vars.h
  17. 2
      deps/libev/ev_win32.c
  18. 32
      deps/libev/ev_wrap.h
  19. 226
      deps/libev/event_compat.h
  20. 131
      deps/libev/import_libevent
  21. 294
      deps/libev/install-sh
  22. 6871
      deps/libev/ltmain.sh
  23. 336
      deps/libev/missing
  24. 111
      deps/libev/mkinstalldirs
  25. 19
      deps/libev/update_ev_wrap
  26. 7
      deps/libev/update_symbols
  27. 1
      deps/v8/AUTHORS
  28. 31
      deps/v8/ChangeLog
  29. 8
      deps/v8/SConstruct
  30. 2
      deps/v8/benchmarks/revisions.html
  31. 37
      deps/v8/benchmarks/run.html
  32. 38
      deps/v8/benchmarks/style.css
  33. 6
      deps/v8/include/v8.h
  34. 5
      deps/v8/src/accessors.cc
  35. 20
      deps/v8/src/api.cc
  36. 2
      deps/v8/src/arm/assembler-arm-inl.h
  37. 28
      deps/v8/src/arm/builtins-arm.cc
  38. 134
      deps/v8/src/arm/codegen-arm.cc
  39. 53
      deps/v8/src/arm/codegen-arm.h
  40. 4
      deps/v8/src/arm/virtual-frame-arm.h
  41. 10
      deps/v8/src/assembler.h
  42. 2
      deps/v8/src/ast.cc
  43. 16
      deps/v8/src/ast.h
  44. 5
      deps/v8/src/bootstrapper.cc
  45. 2
      deps/v8/src/builtins.h
  46. 125
      deps/v8/src/codegen.cc
  47. 6
      deps/v8/src/codegen.h
  48. 360
      deps/v8/src/compilation-cache.cc
  49. 17
      deps/v8/src/compilation-cache.h
  50. 16
      deps/v8/src/compiler.cc
  51. 2
      deps/v8/src/contexts.cc
  52. 2
      deps/v8/src/conversions.cc
  53. 5
      deps/v8/src/date-delay.js
  54. 16
      deps/v8/src/debug-delay.js
  55. 5
      deps/v8/src/dtoa-config.c
  56. 2
      deps/v8/src/factory.cc
  57. 49
      deps/v8/src/frame-element.h
  58. 2
      deps/v8/src/globals.h
  59. 22
      deps/v8/src/heap-inl.h
  60. 221
      deps/v8/src/heap.cc
  61. 117
      deps/v8/src/heap.h
  62. 2
      deps/v8/src/ia32/assembler-ia32-inl.h
  63. 15
      deps/v8/src/ia32/assembler-ia32.cc
  64. 16
      deps/v8/src/ia32/assembler-ia32.h
  65. 33
      deps/v8/src/ia32/builtins-ia32.cc
  66. 625
      deps/v8/src/ia32/codegen-ia32.cc
  67. 78
      deps/v8/src/ia32/codegen-ia32.h
  68. 3
      deps/v8/src/ia32/ic-ia32.cc
  69. 20
      deps/v8/src/ia32/virtual-frame-ia32.cc
  70. 70
      deps/v8/src/ia32/virtual-frame-ia32.h
  71. 22
      deps/v8/src/ic.cc
  72. 2
      deps/v8/src/ic.h
  73. 65
      deps/v8/src/jsregexp.cc
  74. 26
      deps/v8/src/jump-target.cc
  75. 126
      deps/v8/src/log-inl.h
  76. 18
      deps/v8/src/log-utils.cc
  77. 231
      deps/v8/src/log.cc
  78. 26
      deps/v8/src/log.h
  79. 17
      deps/v8/src/mark-compact.cc
  80. 28
      deps/v8/src/messages.js
  81. 26
      deps/v8/src/mirror-delay.js
  82. 35
      deps/v8/src/objects-inl.h
  83. 112
      deps/v8/src/objects.cc
  84. 39
      deps/v8/src/objects.h
  85. 4
      deps/v8/src/oprofile-agent.cc
  86. 16
      deps/v8/src/parser.cc
  87. 27
      deps/v8/src/platform-linux.cc
  88. 23
      deps/v8/src/platform-macos.cc
  89. 23
      deps/v8/src/platform.h
  90. 13
      deps/v8/src/register-allocator.cc
  91. 94
      deps/v8/src/register-allocator.h
  92. 5
      deps/v8/src/rewriter.cc
  93. 190
      deps/v8/src/runtime.cc
  94. 5
      deps/v8/src/runtime.js
  95. 86
      deps/v8/src/scopeinfo.cc
  96. 68
      deps/v8/src/scopeinfo.h
  97. 23
      deps/v8/src/scopes.cc
  98. 2
      deps/v8/src/scopes.h
  99. 18
      deps/v8/src/serialize.cc
  100. 1
      deps/v8/src/spaces.h

17
deps/libev/Changes

@ -1,5 +1,22 @@
Revision history for libev, a high-performance and full-featured event loop. Revision history for libev, a high-performance and full-featured event loop.
3.6 Tue Apr 28 02:49:30 CEST 2009
- multiple timers becoming ready within an event loop iteration
will be invoked in the "correct" order now.
- do not leave the event loop early just because we have no active
watchers, fixing a problem when embedding a kqueue loop
that has active kernel events but no registered watchers
(reported by blacksand blacksand).
- correctly zero the idx values for arrays, so destroying and
reinitialising the default loop actually works (patch by
Malek Hadj-Ali).
- implement ev_suspend and ev_resume.
- new EV_CUSTOM revents flag for use by applications.
- add documentation section about priorites.
- add a glossary to the dcoumentation.
- extend the ev_fork description slightly.
- optimize a jump out of call_pending.
3.53 Sun Feb 15 02:38:20 CET 2009 3.53 Sun Feb 15 02:38:20 CET 2009
- fix a bug in event pipe creation on win32 that would cause a - fix a bug in event pipe creation on win32 that would cause a
failed assertion on event loop creation (patch by Malek Hadj-Ali). failed assertion on event loop creation (patch by Malek Hadj-Ali).

619
deps/libev/Makefile.in

@ -0,0 +1,619 @@
# Makefile.in generated by automake 1.7.9 from Makefile.am.
# @configure_input@
# Copyright 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003
# Free Software Foundation, Inc.
# This Makefile.in is free software; the Free Software Foundation
# gives unlimited permission to copy and/or distribute it,
# with or without modifications, as long as this notice is preserved.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY, to the extent permitted by law; without
# even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE.
@SET_MAKE@
srcdir = @srcdir@
top_srcdir = @top_srcdir@
VPATH = @srcdir@
pkgdatadir = $(datadir)/@PACKAGE@
pkglibdir = $(libdir)/@PACKAGE@
pkgincludedir = $(includedir)/@PACKAGE@
top_builddir = .
am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd
INSTALL = @INSTALL@
install_sh_DATA = $(install_sh) -c -m 644
install_sh_PROGRAM = $(install_sh) -c
install_sh_SCRIPT = $(install_sh) -c
INSTALL_HEADER = $(INSTALL_DATA)
transform = $(program_transform_name)
NORMAL_INSTALL = :
PRE_INSTALL = :
POST_INSTALL = :
NORMAL_UNINSTALL = :
PRE_UNINSTALL = :
POST_UNINSTALL = :
host_triplet = @host@
ACLOCAL = @ACLOCAL@
AMDEP_FALSE = @AMDEP_FALSE@
AMDEP_TRUE = @AMDEP_TRUE@
AMTAR = @AMTAR@
AR = @AR@
AUTOCONF = @AUTOCONF@
AUTOHEADER = @AUTOHEADER@
AUTOMAKE = @AUTOMAKE@
AWK = @AWK@
CC = @CC@
CCDEPMODE = @CCDEPMODE@
CFLAGS = @CFLAGS@
CPP = @CPP@
CPPFLAGS = @CPPFLAGS@
CXX = @CXX@
CXXCPP = @CXXCPP@
CXXDEPMODE = @CXXDEPMODE@
CXXFLAGS = @CXXFLAGS@
CYGPATH_W = @CYGPATH_W@
DEFS = @DEFS@
DEPDIR = @DEPDIR@
DSYMUTIL = @DSYMUTIL@
ECHO = @ECHO@
ECHO_C = @ECHO_C@
ECHO_N = @ECHO_N@
ECHO_T = @ECHO_T@
EGREP = @EGREP@
EXEEXT = @EXEEXT@
F77 = @F77@
FFLAGS = @FFLAGS@
GREP = @GREP@
INSTALL_DATA = @INSTALL_DATA@
INSTALL_PROGRAM = @INSTALL_PROGRAM@
INSTALL_SCRIPT = @INSTALL_SCRIPT@
INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@
LDFLAGS = @LDFLAGS@
LIBOBJS = @LIBOBJS@
LIBS = @LIBS@
LIBTOOL = @LIBTOOL@
LN_S = @LN_S@
LTLIBOBJS = @LTLIBOBJS@
MAINT = @MAINT@
MAINTAINER_MODE_FALSE = @MAINTAINER_MODE_FALSE@
MAINTAINER_MODE_TRUE = @MAINTAINER_MODE_TRUE@
MAKEINFO = @MAKEINFO@
NMEDIT = @NMEDIT@
OBJEXT = @OBJEXT@
PACKAGE = @PACKAGE@
PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@
PACKAGE_NAME = @PACKAGE_NAME@
PACKAGE_STRING = @PACKAGE_STRING@
PACKAGE_TARNAME = @PACKAGE_TARNAME@
PACKAGE_VERSION = @PACKAGE_VERSION@
PATH_SEPARATOR = @PATH_SEPARATOR@
RANLIB = @RANLIB@
SED = @SED@
SET_MAKE = @SET_MAKE@
SHELL = @SHELL@
STRIP = @STRIP@
VERSION = @VERSION@
ac_ct_CC = @ac_ct_CC@
ac_ct_CXX = @ac_ct_CXX@
ac_ct_F77 = @ac_ct_F77@
am__fastdepCC_FALSE = @am__fastdepCC_FALSE@
am__fastdepCC_TRUE = @am__fastdepCC_TRUE@
am__fastdepCXX_FALSE = @am__fastdepCXX_FALSE@
am__fastdepCXX_TRUE = @am__fastdepCXX_TRUE@
am__include = @am__include@
am__leading_dot = @am__leading_dot@
am__quote = @am__quote@
bindir = @bindir@
build = @build@
build_alias = @build_alias@
build_cpu = @build_cpu@
build_os = @build_os@
build_vendor = @build_vendor@
datadir = @datadir@
datarootdir = @datarootdir@
docdir = @docdir@
dvidir = @dvidir@
exec_prefix = @exec_prefix@
host = @host@
host_alias = @host_alias@
host_cpu = @host_cpu@
host_os = @host_os@
host_vendor = @host_vendor@
htmldir = @htmldir@
includedir = @includedir@
infodir = @infodir@
install_sh = @install_sh@
libdir = @libdir@
libexecdir = @libexecdir@
localedir = @localedir@
localstatedir = @localstatedir@
mandir = @mandir@
oldincludedir = @oldincludedir@
pdfdir = @pdfdir@
prefix = @prefix@
program_transform_name = @program_transform_name@
psdir = @psdir@
sbindir = @sbindir@
sharedstatedir = @sharedstatedir@
sysconfdir = @sysconfdir@
target_alias = @target_alias@
AUTOMAKE_OPTIONS = foreign no-dependencies
VERSION_INFO = 3:0
EXTRA_DIST = LICENSE Changes libev.m4 autogen.sh \
ev_vars.h ev_wrap.h \
ev_epoll.c ev_select.c ev_poll.c ev_kqueue.c ev_port.c ev_win32.c \
ev.3 ev.pod
man_MANS = ev.3
include_HEADERS = ev.h ev++.h event.h
lib_LTLIBRARIES = libev.la
libev_la_SOURCES = ev.c event.c
libev_la_LDFLAGS = -version-info $(VERSION_INFO)
subdir = .
ACLOCAL_M4 = $(top_srcdir)/aclocal.m4
mkinstalldirs = $(SHELL) $(top_srcdir)/mkinstalldirs
CONFIG_HEADER = config.h
CONFIG_CLEAN_FILES =
LTLIBRARIES = $(lib_LTLIBRARIES)
libev_la_LIBADD =
am_libev_la_OBJECTS = ev.lo event.lo
libev_la_OBJECTS = $(am_libev_la_OBJECTS)
DEFAULT_INCLUDES = -I. -I$(srcdir) -I.
depcomp =
am__depfiles_maybe =
COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \
$(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
LTCOMPILE = $(LIBTOOL) --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) \
$(INCLUDES) $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS)
CCLD = $(CC)
LINK = $(LIBTOOL) --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) \
$(AM_LDFLAGS) $(LDFLAGS) -o $@
DIST_SOURCES = $(libev_la_SOURCES)
NROFF = nroff
MANS = $(man_MANS)
HEADERS = $(include_HEADERS)
DIST_COMMON = README $(include_HEADERS) $(srcdir)/Makefile.in \
$(srcdir)/configure Makefile.am aclocal.m4 config.guess \
config.h.in config.sub configure configure.ac install-sh \
ltmain.sh missing mkinstalldirs
SOURCES = $(libev_la_SOURCES)
all: config.h
$(MAKE) $(AM_MAKEFLAGS) all-am
.SUFFIXES:
.SUFFIXES: .c .lo .o .obj
am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \
configure.lineno
$(srcdir)/Makefile.in: @MAINTAINER_MODE_TRUE@ Makefile.am $(top_srcdir)/configure.ac $(ACLOCAL_M4)
cd $(top_srcdir) && \
$(AUTOMAKE) --foreign Makefile
Makefile: @MAINTAINER_MODE_TRUE@ $(srcdir)/Makefile.in $(top_builddir)/config.status
cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)
$(top_builddir)/config.status: $(srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES)
$(SHELL) ./config.status --recheck
$(srcdir)/configure: @MAINTAINER_MODE_TRUE@ $(srcdir)/configure.ac $(ACLOCAL_M4) $(CONFIGURE_DEPENDENCIES)
cd $(srcdir) && $(AUTOCONF)
$(ACLOCAL_M4): @MAINTAINER_MODE_TRUE@ configure.ac
cd $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS)
config.h: stamp-h1
@if test ! -f $@; then \
rm -f stamp-h1; \
$(MAKE) stamp-h1; \
else :; fi
stamp-h1: $(srcdir)/config.h.in $(top_builddir)/config.status
@rm -f stamp-h1
cd $(top_builddir) && $(SHELL) ./config.status config.h
$(srcdir)/config.h.in: @MAINTAINER_MODE_TRUE@ $(top_srcdir)/configure.ac $(ACLOCAL_M4)
cd $(top_srcdir) && $(AUTOHEADER)
touch $(srcdir)/config.h.in
distclean-hdr:
-rm -f config.h stamp-h1
libLTLIBRARIES_INSTALL = $(INSTALL)
install-libLTLIBRARIES: $(lib_LTLIBRARIES)
@$(NORMAL_INSTALL)
$(mkinstalldirs) $(DESTDIR)$(libdir)
@list='$(lib_LTLIBRARIES)'; for p in $$list; do \
if test -f $$p; then \
f="`echo $$p | sed -e 's|^.*/||'`"; \
echo " $(LIBTOOL) --mode=install $(libLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) $$p $(DESTDIR)$(libdir)/$$f"; \
$(LIBTOOL) --mode=install $(libLTLIBRARIES_INSTALL) $(INSTALL_STRIP_FLAG) $$p $(DESTDIR)$(libdir)/$$f; \
else :; fi; \
done
uninstall-libLTLIBRARIES:
@$(NORMAL_UNINSTALL)
@list='$(lib_LTLIBRARIES)'; for p in $$list; do \
p="`echo $$p | sed -e 's|^.*/||'`"; \
echo " $(LIBTOOL) --mode=uninstall rm -f $(DESTDIR)$(libdir)/$$p"; \
$(LIBTOOL) --mode=uninstall rm -f $(DESTDIR)$(libdir)/$$p; \
done
clean-libLTLIBRARIES:
-test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES)
@list='$(lib_LTLIBRARIES)'; for p in $$list; do \
dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \
test "$$dir" = "$$p" && dir=.; \
echo "rm -f \"$${dir}/so_locations\""; \
rm -f "$${dir}/so_locations"; \
done
libev.la: $(libev_la_OBJECTS) $(libev_la_DEPENDENCIES)
$(LINK) -rpath $(libdir) $(libev_la_LDFLAGS) $(libev_la_OBJECTS) $(libev_la_LIBADD) $(LIBS)
mostlyclean-compile:
-rm -f *.$(OBJEXT) core *.core
distclean-compile:
-rm -f *.tab.c
.c.o:
$(COMPILE) -c `test -f '$<' || echo '$(srcdir)/'`$<
.c.obj:
$(COMPILE) -c `if test -f '$<'; then $(CYGPATH_W) '$<'; else $(CYGPATH_W) '$(srcdir)/$<'; fi`
.c.lo:
$(LTCOMPILE) -c -o $@ `test -f '$<' || echo '$(srcdir)/'`$<
mostlyclean-libtool:
-rm -f *.lo
clean-libtool:
-rm -rf .libs _libs
distclean-libtool:
-rm -f libtool
uninstall-info-am:
man3dir = $(mandir)/man3
install-man3: $(man3_MANS) $(man_MANS)
@$(NORMAL_INSTALL)
$(mkinstalldirs) $(DESTDIR)$(man3dir)
@list='$(man3_MANS) $(dist_man3_MANS) $(nodist_man3_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.3*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
if test -f $(srcdir)/$$i; then file=$(srcdir)/$$i; \
else file=$$i; fi; \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
3*) ;; \
*) ext='3' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " $(INSTALL_DATA) $$file $(DESTDIR)$(man3dir)/$$inst"; \
$(INSTALL_DATA) $$file $(DESTDIR)$(man3dir)/$$inst; \
done
uninstall-man3:
@$(NORMAL_UNINSTALL)
@list='$(man3_MANS) $(dist_man3_MANS) $(nodist_man3_MANS)'; \
l2='$(man_MANS) $(dist_man_MANS) $(nodist_man_MANS)'; \
for i in $$l2; do \
case "$$i" in \
*.3*) list="$$list $$i" ;; \
esac; \
done; \
for i in $$list; do \
ext=`echo $$i | sed -e 's/^.*\\.//'`; \
case "$$ext" in \
3*) ;; \
*) ext='3' ;; \
esac; \
inst=`echo $$i | sed -e 's/\\.[0-9a-z]*$$//'`; \
inst=`echo $$inst | sed -e 's/^.*\///'`; \
inst=`echo $$inst | sed '$(transform)'`.$$ext; \
echo " rm -f $(DESTDIR)$(man3dir)/$$inst"; \
rm -f $(DESTDIR)$(man3dir)/$$inst; \
done
includeHEADERS_INSTALL = $(INSTALL_HEADER)
install-includeHEADERS: $(include_HEADERS)
@$(NORMAL_INSTALL)
$(mkinstalldirs) $(DESTDIR)$(includedir)
@list='$(include_HEADERS)'; for p in $$list; do \
if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \
f="`echo $$p | sed -e 's|^.*/||'`"; \
echo " $(includeHEADERS_INSTALL) $$d$$p $(DESTDIR)$(includedir)/$$f"; \
$(includeHEADERS_INSTALL) $$d$$p $(DESTDIR)$(includedir)/$$f; \
done
uninstall-includeHEADERS:
@$(NORMAL_UNINSTALL)
@list='$(include_HEADERS)'; for p in $$list; do \
f="`echo $$p | sed -e 's|^.*/||'`"; \
echo " rm -f $(DESTDIR)$(includedir)/$$f"; \
rm -f $(DESTDIR)$(includedir)/$$f; \
done
ETAGS = etags
ETAGSFLAGS =
CTAGS = ctags
CTAGSFLAGS =
tags: TAGS
ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES)
list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
mkid -fID $$unique
TAGS: $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
$(TAGS_FILES) $(LISP)
tags=; \
here=`pwd`; \
list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
test -z "$(ETAGS_ARGS)$$tags$$unique" \
|| $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \
$$tags $$unique
ctags: CTAGS
CTAGS: $(HEADERS) $(SOURCES) config.h.in $(TAGS_DEPENDENCIES) \
$(TAGS_FILES) $(LISP)
tags=; \
here=`pwd`; \
list='$(SOURCES) $(HEADERS) config.h.in $(LISP) $(TAGS_FILES)'; \
unique=`for i in $$list; do \
if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \
done | \
$(AWK) ' { files[$$0] = 1; } \
END { for (i in files) print i; }'`; \
test -z "$(CTAGS_ARGS)$$tags$$unique" \
|| $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \
$$tags $$unique
GTAGS:
here=`$(am__cd) $(top_builddir) && pwd` \
&& cd $(top_srcdir) \
&& gtags -i $(GTAGS_ARGS) $$here
distclean-tags:
-rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags
DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST)
top_distdir = .
distdir = $(PACKAGE)-$(VERSION)
am__remove_distdir = \
{ test ! -d $(distdir) \
|| { find $(distdir) -type d ! -perm -200 -exec chmod u+w {} ';' \
&& rm -fr $(distdir); }; }
GZIP_ENV = --best
distuninstallcheck_listfiles = find . -type f -print
distcleancheck_listfiles = find . -type f -print
distdir: $(DISTFILES)
$(am__remove_distdir)
mkdir $(distdir)
@srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; \
topsrcdirstrip=`echo "$(top_srcdir)" | sed 's|.|.|g'`; \
list='$(DISTFILES)'; for file in $$list; do \
case $$file in \
$(srcdir)/*) file=`echo "$$file" | sed "s|^$$srcdirstrip/||"`;; \
$(top_srcdir)/*) file=`echo "$$file" | sed "s|^$$topsrcdirstrip/|$(top_builddir)/|"`;; \
esac; \
if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \
dir=`echo "$$file" | sed -e 's,/[^/]*$$,,'`; \
if test "$$dir" != "$$file" && test "$$dir" != "."; then \
dir="/$$dir"; \
$(mkinstalldirs) "$(distdir)$$dir"; \
else \
dir=''; \
fi; \
if test -d $$d/$$file; then \
if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \
cp -pR $(srcdir)/$$file $(distdir)$$dir || exit 1; \
fi; \
cp -pR $$d/$$file $(distdir)$$dir || exit 1; \
else \
test -f $(distdir)/$$file \
|| cp -p $$d/$$file $(distdir)/$$file \
|| exit 1; \
fi; \
done
-find $(distdir) -type d ! -perm -777 -exec chmod a+rwx {} \; -o \
! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \
! -type d ! -perm -400 -exec chmod a+r {} \; -o \
! -type d ! -perm -444 -exec $(SHELL) $(install_sh) -c -m a+r {} {} \; \
|| chmod -R a+r $(distdir)
dist-gzip: distdir
$(AMTAR) chof - $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
$(am__remove_distdir)
dist dist-all: distdir
$(AMTAR) chof - $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz
$(am__remove_distdir)
# This target untars the dist file and tries a VPATH configuration. Then
# it guarantees that the distribution is self-contained by making another
# tarfile.
distcheck: dist
$(am__remove_distdir)
GZIP=$(GZIP_ENV) gunzip -c $(distdir).tar.gz | $(AMTAR) xf -
chmod -R a-w $(distdir); chmod a+w $(distdir)
mkdir $(distdir)/_build
mkdir $(distdir)/_inst
chmod a-w $(distdir)
dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \
&& dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \
&& cd $(distdir)/_build \
&& ../configure --srcdir=.. --prefix="$$dc_install_base" \
$(DISTCHECK_CONFIGURE_FLAGS) \
&& $(MAKE) $(AM_MAKEFLAGS) \
&& $(MAKE) $(AM_MAKEFLAGS) dvi \
&& $(MAKE) $(AM_MAKEFLAGS) check \
&& $(MAKE) $(AM_MAKEFLAGS) install \
&& $(MAKE) $(AM_MAKEFLAGS) installcheck \
&& $(MAKE) $(AM_MAKEFLAGS) uninstall \
&& $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \
distuninstallcheck \
&& chmod -R a-w "$$dc_install_base" \
&& ({ \
(cd ../.. && $(mkinstalldirs) "$$dc_destdir") \
&& $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \
&& $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \
&& $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \
distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \
} || { rm -rf "$$dc_destdir"; exit 1; }) \
&& rm -rf "$$dc_destdir" \
&& $(MAKE) $(AM_MAKEFLAGS) dist-gzip \
&& rm -f $(distdir).tar.gz \
&& $(MAKE) $(AM_MAKEFLAGS) distcleancheck
$(am__remove_distdir)
@echo "$(distdir).tar.gz is ready for distribution" | \
sed 'h;s/./=/g;p;x;p;x'
distuninstallcheck:
@cd $(distuninstallcheck_dir) \
&& test `$(distuninstallcheck_listfiles) | wc -l` -le 1 \
|| { echo "ERROR: files left after uninstall:" ; \
if test -n "$(DESTDIR)"; then \
echo " (check DESTDIR support)"; \
fi ; \
$(distuninstallcheck_listfiles) ; \
exit 1; } >&2
distcleancheck: distclean
@if test '$(srcdir)' = . ; then \
echo "ERROR: distcleancheck can only run from a VPATH build" ; \
exit 1 ; \
fi
@test `$(distcleancheck_listfiles) | wc -l` -eq 0 \
|| { echo "ERROR: files left in build directory after distclean:" ; \
$(distcleancheck_listfiles) ; \
exit 1; } >&2
check-am: all-am
check: check-am
all-am: Makefile $(LTLIBRARIES) $(MANS) $(HEADERS) config.h
installdirs:
$(mkinstalldirs) $(DESTDIR)$(libdir) $(DESTDIR)$(man3dir) $(DESTDIR)$(includedir)
install: install-am
install-exec: install-exec-am
install-data: install-data-am
uninstall: uninstall-am
install-am: all-am
@$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am
installcheck: installcheck-am
install-strip:
$(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \
install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \
`test -z '$(STRIP)' || \
echo "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'"` install
mostlyclean-generic:
clean-generic:
distclean-generic:
-rm -f $(CONFIG_CLEAN_FILES)
maintainer-clean-generic:
@echo "This command is intended for maintainers to use"
@echo "it deletes files that may require special tools to rebuild."
clean: clean-am
clean-am: clean-generic clean-libLTLIBRARIES clean-libtool \
mostlyclean-am
distclean: distclean-am
-rm -f $(am__CONFIG_DISTCLEAN_FILES)
-rm -f Makefile
distclean-am: clean-am distclean-compile distclean-generic distclean-hdr \
distclean-libtool distclean-tags
dvi: dvi-am
dvi-am:
info: info-am
info-am:
install-data-am: install-includeHEADERS install-man
install-exec-am: install-libLTLIBRARIES
install-info: install-info-am
install-man: install-man3
installcheck-am:
maintainer-clean: maintainer-clean-am
-rm -f $(am__CONFIG_DISTCLEAN_FILES)
-rm -rf $(top_srcdir)/autom4te.cache
-rm -f Makefile
maintainer-clean-am: distclean-am maintainer-clean-generic
mostlyclean: mostlyclean-am
mostlyclean-am: mostlyclean-compile mostlyclean-generic \
mostlyclean-libtool
pdf: pdf-am
pdf-am:
ps: ps-am
ps-am:
uninstall-am: uninstall-includeHEADERS uninstall-info-am \
uninstall-libLTLIBRARIES uninstall-man
uninstall-man: uninstall-man3
.PHONY: CTAGS GTAGS all all-am check check-am clean clean-generic \
clean-libLTLIBRARIES clean-libtool ctags dist dist-all \
dist-gzip distcheck distclean distclean-compile \
distclean-generic distclean-hdr distclean-libtool \
distclean-tags distcleancheck distdir distuninstallcheck dvi \
dvi-am info info-am install install-am install-data \
install-data-am install-exec install-exec-am \
install-includeHEADERS install-info install-info-am \
install-libLTLIBRARIES install-man install-man3 install-strip \
installcheck installcheck-am installdirs maintainer-clean \
maintainer-clean-generic mostlyclean mostlyclean-compile \
mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \
tags uninstall uninstall-am uninstall-includeHEADERS \
uninstall-info-am uninstall-libLTLIBRARIES uninstall-man \
uninstall-man3
# Tell versions [3.59,3.63) of GNU make to not export all variables.
# Otherwise a system limit (for SysV at least) may be exceeded.
.NOEXPORT:

3
deps/libev/README.embed

@ -1,3 +0,0 @@
This file is now included in the main libev documentation, see
http://cvs.schmorp.de/libev/ev.html

62
deps/libev/Symbols.ev

@ -1,62 +0,0 @@
ev_async_send
ev_async_start
ev_async_stop
ev_backend
ev_check_start
ev_check_stop
ev_child_start
ev_child_stop
ev_clear_pending
ev_default_destroy
ev_default_fork
ev_default_loop_init
ev_default_loop_ptr
ev_embed_start
ev_embed_stop
ev_embed_sweep
ev_embeddable_backends
ev_feed_event
ev_feed_fd_event
ev_feed_signal_event
ev_fork_start
ev_fork_stop
ev_idle_start
ev_idle_stop
ev_invoke
ev_io_start
ev_io_stop
ev_loop
ev_loop_count
ev_loop_destroy
ev_loop_fork
ev_loop_new
ev_loop_verify
ev_now
ev_now_update
ev_once
ev_periodic_again
ev_periodic_start
ev_periodic_stop
ev_prepare_start
ev_prepare_stop
ev_recommended_backends
ev_ref
ev_set_allocator
ev_set_io_collect_interval
ev_set_syserr_cb
ev_set_timeout_collect_interval
ev_signal_start
ev_signal_stop
ev_sleep
ev_stat_start
ev_stat_stat
ev_stat_stop
ev_supported_backends
ev_time
ev_timer_again
ev_timer_start
ev_timer_stop
ev_unloop
ev_unref
ev_version_major
ev_version_minor

21
deps/libev/Symbols.event

@ -1,21 +0,0 @@
event_active
event_add
event_base_dispatch
event_base_free
event_base_loop
event_base_loopexit
event_base_once
event_base_priority_init
event_base_set
event_del
event_dispatch
event_get_method
event_get_version
event_init
event_loop
event_loopexit
event_once
event_pending
event_priority_init
event_priority_set
event_set

7526
deps/libev/aclocal.m4

File diff suppressed because it is too large

1526
deps/libev/config.guess

File diff suppressed because it is too large

115
deps/libev/config.h.in

@ -0,0 +1,115 @@
/* config.h.in. Generated from configure.ac by autoheader. */
/* Define to 1 if you have the `clock_gettime' function. */
#undef HAVE_CLOCK_GETTIME
/* "use syscall interface for clock_gettime" */
#undef HAVE_CLOCK_SYSCALL
/* Define to 1 if you have the <dlfcn.h> header file. */
#undef HAVE_DLFCN_H
/* Define to 1 if you have the `epoll_ctl' function. */
#undef HAVE_EPOLL_CTL
/* Define to 1 if you have the `eventfd' function. */
#undef HAVE_EVENTFD
/* Define to 1 if you have the `inotify_init' function. */
#undef HAVE_INOTIFY_INIT
/* Define to 1 if you have the <inttypes.h> header file. */
#undef HAVE_INTTYPES_H
/* Define to 1 if you have the `kqueue' function. */
#undef HAVE_KQUEUE
/* Define to 1 if you have the `m' library (-lm). */
#undef HAVE_LIBM
/* Define to 1 if you have the `rt' library (-lrt). */
#undef HAVE_LIBRT
/* Define to 1 if you have the <memory.h> header file. */
#undef HAVE_MEMORY_H
/* Define to 1 if you have the `nanosleep' function. */
#undef HAVE_NANOSLEEP
/* Define to 1 if you have the `poll' function. */
#undef HAVE_POLL
/* Define to 1 if you have the <poll.h> header file. */
#undef HAVE_POLL_H
/* Define to 1 if you have the `port_create' function. */
#undef HAVE_PORT_CREATE
/* Define to 1 if you have the <port.h> header file. */
#undef HAVE_PORT_H
/* Define to 1 if you have the `select' function. */
#undef HAVE_SELECT
/* Define to 1 if you have the <stdint.h> header file. */
#undef HAVE_STDINT_H
/* Define to 1 if you have the <stdlib.h> header file. */
#undef HAVE_STDLIB_H
/* Define to 1 if you have the <strings.h> header file. */
#undef HAVE_STRINGS_H
/* Define to 1 if you have the <string.h> header file. */
#undef HAVE_STRING_H
/* Define to 1 if you have the <sys/epoll.h> header file. */
#undef HAVE_SYS_EPOLL_H
/* Define to 1 if you have the <sys/eventfd.h> header file. */
#undef HAVE_SYS_EVENTFD_H
/* Define to 1 if you have the <sys/event.h> header file. */
#undef HAVE_SYS_EVENT_H
/* Define to 1 if you have the <sys/inotify.h> header file. */
#undef HAVE_SYS_INOTIFY_H
/* Define to 1 if you have the <sys/queue.h> header file. */
#undef HAVE_SYS_QUEUE_H
/* Define to 1 if you have the <sys/select.h> header file. */
#undef HAVE_SYS_SELECT_H
/* Define to 1 if you have the <sys/stat.h> header file. */
#undef HAVE_SYS_STAT_H
/* Define to 1 if you have the <sys/types.h> header file. */
#undef HAVE_SYS_TYPES_H
/* Define to 1 if you have the <unistd.h> header file. */
#undef HAVE_UNISTD_H
/* Name of package */
#undef PACKAGE
/* Define to the address where bug reports for this package should be sent. */
#undef PACKAGE_BUGREPORT
/* Define to the full name of this package. */
#undef PACKAGE_NAME
/* Define to the full name and version of this package. */
#undef PACKAGE_STRING
/* Define to the one symbol short name of this package. */
#undef PACKAGE_TARNAME
/* Define to the version of this package. */
#undef PACKAGE_VERSION
/* Define to 1 if you have the ANSI C header files. */
#undef STDC_HEADERS
/* Version number of package */
#undef VERSION

1658
deps/libev/config.sub

File diff suppressed because it is too large

22083
deps/libev/configure

File diff suppressed because it is too large

2
deps/libev/configure.ac

@ -1,7 +1,7 @@
AC_INIT AC_INIT
AC_CONFIG_SRCDIR([ev_epoll.c]) AC_CONFIG_SRCDIR([ev_epoll.c])
AM_INIT_AUTOMAKE(libev,3.53) AM_INIT_AUTOMAKE(libev,3.6)
AC_CONFIG_HEADERS([config.h]) AC_CONFIG_HEADERS([config.h])
AM_MAINTAINER_MODE AM_MAINTAINER_MODE

448
deps/libev/ev.3

@ -132,7 +132,7 @@
.\" ======================================================================== .\" ========================================================================
.\" .\"
.IX Title "LIBEV 3" .IX Title "LIBEV 3"
.TH LIBEV 3 "2009-02-06" "libev-3.53" "libev - high performance full featured event loop" .TH LIBEV 3 "2009-04-25" "libev-3.6" "libev - high performance full featured event loop"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes .\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents. .\" way too many mistakes in technical documents.
.if n .ad l .if n .ad l
@ -203,12 +203,23 @@ libev \- a high performance full\-featured event loop written in C
\& return 0; \& return 0;
\& } \& }
.Ve .Ve
.SH "DESCRIPTION" .SH "ABOUT THIS DOCUMENT"
.IX Header "DESCRIPTION" .IX Header "ABOUT THIS DOCUMENT"
This document documents the libev software package.
.PP
The newest version of this document is also available as an html-formatted The newest version of this document is also available as an html-formatted
web page you might find easier to navigate when reading it for the first web page you might find easier to navigate when reading it for the first
time: <http://pod.tst.eu/http://cvs.schmorp.de/libev/ev.pod>. time: <http://pod.tst.eu/http://cvs.schmorp.de/libev/ev.pod>.
.PP .PP
While this document tries to be as complete as possible in documenting
libev, its usage and the rationale behind its design, it is not a tutorial
on event-based programming, nor will it introduce event-based programming
with libev.
.PP
Familarity with event based programming techniques in general is assumed
throughout this document.
.SH "ABOUT LIBEV"
.IX Header "ABOUT LIBEV"
Libev is an event loop: you register interest in certain events (such as a Libev is an event loop: you register interest in certain events (such as a
file descriptor being readable or a timeout occurring), and it will manage file descriptor being readable or a timeout occurring), and it will manage
these event sources and provide your program with events. these event sources and provide your program with events.
@ -248,12 +259,12 @@ name \f(CW\*(C`loop\*(C'\fR (which is always of type \f(CW\*(C`ev_loop *\*(C'\fR
this argument. this argument.
.Sh "\s-1TIME\s0 \s-1REPRESENTATION\s0" .Sh "\s-1TIME\s0 \s-1REPRESENTATION\s0"
.IX Subsection "TIME REPRESENTATION" .IX Subsection "TIME REPRESENTATION"
Libev represents time as a single floating point number, representing the Libev represents time as a single floating point number, representing
(fractional) number of seconds since the (\s-1POSIX\s0) epoch (somewhere near the (fractional) number of seconds since the (\s-1POSIX\s0) epoch (somewhere
the beginning of 1970, details are complicated, don't ask). This type is near the beginning of 1970, details are complicated, don't ask). This
called \f(CW\*(C`ev_tstamp\*(C'\fR, which is what you should use too. It usually aliases type is called \f(CW\*(C`ev_tstamp\*(C'\fR, which is what you should use too. It usually
to the \f(CW\*(C`double\*(C'\fR type in C, and when you need to do any calculations on aliases to the \f(CW\*(C`double\*(C'\fR type in C. When you need to do any calculations
it, you should treat it as some floating point value. Unlike the name on it, you should treat it as some floating point value. Unlike the name
component \f(CW\*(C`stamp\*(C'\fR might indicate, it is also used for time differences component \f(CW\*(C`stamp\*(C'\fR might indicate, it is also used for time differences
throughout libev. throughout libev.
.SH "ERROR HANDLING" .SH "ERROR HANDLING"
@ -762,6 +773,33 @@ very long time without entering the event loop, updating libev's idea of
the current time is a good idea. the current time is a good idea.
.Sp .Sp
See also \*(L"The special problem of time updates\*(R" in the \f(CW\*(C`ev_timer\*(C'\fR section. See also \*(L"The special problem of time updates\*(R" in the \f(CW\*(C`ev_timer\*(C'\fR section.
.IP "ev_suspend (loop)" 4
.IX Item "ev_suspend (loop)"
.PD 0
.IP "ev_resume (loop)" 4
.IX Item "ev_resume (loop)"
.PD
These two functions suspend and resume a loop, for use when the loop is
not used for a while and timeouts should not be processed.
.Sp
A typical use case would be an interactive program such as a game: When
the user presses \f(CW\*(C`^Z\*(C'\fR to suspend the game and resumes it an hour later it
would be best to handle timeouts as if no time had actually passed while
the program was suspended. This can be achieved by calling \f(CW\*(C`ev_suspend\*(C'\fR
in your \f(CW\*(C`SIGTSTP\*(C'\fR handler, sending yourself a \f(CW\*(C`SIGSTOP\*(C'\fR and calling
\&\f(CW\*(C`ev_resume\*(C'\fR directly afterwards to resume timer processing.
.Sp
Effectively, all \f(CW\*(C`ev_timer\*(C'\fR watchers will be delayed by the time spend
between \f(CW\*(C`ev_suspend\*(C'\fR and \f(CW\*(C`ev_resume\*(C'\fR, and all \f(CW\*(C`ev_periodic\*(C'\fR watchers
will be rescheduled (that is, they will lose any events that would have
occured while suspended).
.Sp
After calling \f(CW\*(C`ev_suspend\*(C'\fR you \fBmust not\fR call \fIany\fR function on the
given loop other than \f(CW\*(C`ev_resume\*(C'\fR, and you \fBmust not\fR call \f(CW\*(C`ev_resume\*(C'\fR
without a previous call to \f(CW\*(C`ev_suspend\*(C'\fR.
.Sp
Calling \f(CW\*(C`ev_suspend\*(C'\fR/\f(CW\*(C`ev_resume\*(C'\fR has the side effect of updating the
event loop time (see \f(CW\*(C`ev_now_update\*(C'\fR).
.IP "ev_loop (loop, int flags)" 4 .IP "ev_loop (loop, int flags)" 4
.IX Item "ev_loop (loop, int flags)" .IX Item "ev_loop (loop, int flags)"
Finally, this is it, the event handler. This function usually is called Finally, this is it, the event handler. This function usually is called
@ -858,13 +896,15 @@ If you have a watcher you never unregister that should not keep \f(CW\*(C`ev_loo
from returning, call \fIev_unref()\fR after starting, and \fIev_ref()\fR before from returning, call \fIev_unref()\fR after starting, and \fIev_ref()\fR before
stopping it. stopping it.
.Sp .Sp
As an example, libev itself uses this for its internal signal pipe: It is As an example, libev itself uses this for its internal signal pipe: It
not visible to the libev user and should not keep \f(CW\*(C`ev_loop\*(C'\fR from exiting is not visible to the libev user and should not keep \f(CW\*(C`ev_loop\*(C'\fR from
if no event watchers registered by it are active. It is also an excellent exiting if no event watchers registered by it are active. It is also an
way to do this for generic recurring timers or from within third-party excellent way to do this for generic recurring timers or from within
libraries. Just remember to \fIunref after start\fR and \fIref before stop\fR third-party libraries. Just remember to \fIunref after start\fR and \fIref
(but only if the watcher wasn't active before, or was active before, before stop\fR (but only if the watcher wasn't active before, or was active
respectively). before, respectively. Note also that libev might stop watchers itself
(e.g. non-repeating timers) in which case you have to \f(CW\*(C`ev_ref\*(C'\fR
in the callback).
.Sp .Sp
Example: Create a signal watcher, but keep it from keeping \f(CW\*(C`ev_loop\*(C'\fR Example: Create a signal watcher, but keep it from keeping \f(CW\*(C`ev_loop\*(C'\fR
running when nothing else is active. running when nothing else is active.
@ -1062,6 +1102,11 @@ The event loop has been resumed in the child process after fork (see
.el .IP "\f(CWEV_ASYNC\fR" 4 .el .IP "\f(CWEV_ASYNC\fR" 4
.IX Item "EV_ASYNC" .IX Item "EV_ASYNC"
The given async watcher has been asynchronously notified (see \f(CW\*(C`ev_async\*(C'\fR). The given async watcher has been asynchronously notified (see \f(CW\*(C`ev_async\*(C'\fR).
.ie n .IP """EV_CUSTOM""" 4
.el .IP "\f(CWEV_CUSTOM\fR" 4
.IX Item "EV_CUSTOM"
Not ever sent (or otherwise used) by libev itself, but can be freely used
by libev users to signal watchers (e.g. via \f(CW\*(C`ev_feed_event\*(C'\fR).
.ie n .IP """EV_ERROR""" 4 .ie n .IP """EV_ERROR""" 4
.el .IP "\f(CWEV_ERROR\fR" 4 .el .IP "\f(CWEV_ERROR\fR" 4
.IX Item "EV_ERROR" .IX Item "EV_ERROR"
@ -1186,23 +1231,21 @@ integer between \f(CW\*(C`EV_MAXPRI\*(C'\fR (default: \f(CW2\fR) and \f(CW\*(C`E
before watchers with lower priority, but priority will not keep watchers before watchers with lower priority, but priority will not keep watchers
from being executed (except for \f(CW\*(C`ev_idle\*(C'\fR watchers). from being executed (except for \f(CW\*(C`ev_idle\*(C'\fR watchers).
.Sp .Sp
This means that priorities are \fIonly\fR used for ordering callback
invocation after new events have been received. This is useful, for
example, to reduce latency after idling, or more often, to bind two
watchers on the same event and make sure one is called first.
.Sp
If you need to suppress invocation when higher priority events are pending If you need to suppress invocation when higher priority events are pending
you need to look at \f(CW\*(C`ev_idle\*(C'\fR watchers, which provide this functionality. you need to look at \f(CW\*(C`ev_idle\*(C'\fR watchers, which provide this functionality.
.Sp .Sp
You \fImust not\fR change the priority of a watcher as long as it is active or You \fImust not\fR change the priority of a watcher as long as it is active or
pending. pending.
.Sp .Sp
The default priority used by watchers when no priority has been set is
always \f(CW0\fR, which is supposed to not be too high and not be too low :).
.Sp
Setting a priority outside the range of \f(CW\*(C`EV_MINPRI\*(C'\fR to \f(CW\*(C`EV_MAXPRI\*(C'\fR is Setting a priority outside the range of \f(CW\*(C`EV_MINPRI\*(C'\fR to \f(CW\*(C`EV_MAXPRI\*(C'\fR is
fine, as long as you do not mind that the priority value you query might fine, as long as you do not mind that the priority value you query might
or might not have been clamped to the valid range. or might not have been clamped to the valid range.
.Sp
The default priority used by watchers when no priority has been set is
always \f(CW0\fR, which is supposed to not be too high and not be too low :).
.Sp
See \*(L"\s-1WATCHER\s0 \s-1PRIORITY\s0 \s-1MODELS\s0\*(R", below, for a more thorough treatment of
priorities.
.IP "ev_invoke (loop, ev_TYPE *watcher, int revents)" 4 .IP "ev_invoke (loop, ev_TYPE *watcher, int revents)" 4
.IX Item "ev_invoke (loop, ev_TYPE *watcher, int revents)" .IX Item "ev_invoke (loop, ev_TYPE *watcher, int revents)"
Invoke the \f(CW\*(C`watcher\*(C'\fR with the given \f(CW\*(C`loop\*(C'\fR and \f(CW\*(C`revents\*(C'\fR. Neither Invoke the \f(CW\*(C`watcher\*(C'\fR with the given \f(CW\*(C`loop\*(C'\fR and \f(CW\*(C`revents\*(C'\fR. Neither
@ -1289,6 +1332,110 @@ programmers):
\& (((char *)w) \- offsetof (struct my_biggy, t2)); \& (((char *)w) \- offsetof (struct my_biggy, t2));
\& } \& }
.Ve .Ve
.Sh "\s-1WATCHER\s0 \s-1PRIORITY\s0 \s-1MODELS\s0"
.IX Subsection "WATCHER PRIORITY MODELS"
Many event loops support \fIwatcher priorities\fR, which are usually small
integers that influence the ordering of event callback invocation
between watchers in some way, all else being equal.
.PP
In libev, Watcher priorities can be set using \f(CW\*(C`ev_set_priority\*(C'\fR. See its
description for the more technical details such as the actual priority
range.
.PP
There are two common ways how these these priorities are being interpreted
by event loops:
.PP
In the more common lock-out model, higher priorities \*(L"lock out\*(R" invocation
of lower priority watchers, which means as long as higher priority
watchers receive events, lower priority watchers are not being invoked.
.PP
The less common only-for-ordering model uses priorities solely to order
callback invocation within a single event loop iteration: Higher priority
watchers are invoked before lower priority ones, but they all get invoked
before polling for new events.
.PP
Libev uses the second (only-for-ordering) model for all its watchers
except for idle watchers (which use the lock-out model).
.PP
The rationale behind this is that implementing the lock-out model for
watchers is not well supported by most kernel interfaces, and most event
libraries will just poll for the same events again and again as long as
their callbacks have not been executed, which is very inefficient in the
common case of one high-priority watcher locking out a mass of lower
priority ones.
.PP
Static (ordering) priorities are most useful when you have two or more
watchers handling the same resource: a typical usage example is having an
\&\f(CW\*(C`ev_io\*(C'\fR watcher to receive data, and an associated \f(CW\*(C`ev_timer\*(C'\fR to handle
timeouts. Under load, data might be received while the program handles
other jobs, but since timers normally get invoked first, the timeout
handler will be executed before checking for data. In that case, giving
the timer a lower priority than the I/O watcher ensures that I/O will be
handled first even under adverse conditions (which is usually, but not
always, what you want).
.PP
Since idle watchers use the \*(L"lock-out\*(R" model, meaning that idle watchers
will only be executed when no same or higher priority watchers have
received events, they can be used to implement the \*(L"lock-out\*(R" model when
required.
.PP
For example, to emulate how many other event libraries handle priorities,
you can associate an \f(CW\*(C`ev_idle\*(C'\fR watcher to each such watcher, and in
the normal watcher callback, you just start the idle watcher. The real
processing is done in the idle watcher callback. This causes libev to
continously poll and process kernel event data for the watcher, but when
the lock-out case is known to be rare (which in turn is rare :), this is
workable.
.PP
Usually, however, the lock-out model implemented that way will perform
miserably under the type of load it was designed to handle. In that case,
it might be preferable to stop the real watcher before starting the
idle watcher, so the kernel will not have to process the event in case
the actual processing will be delayed for considerable time.
.PP
Here is an example of an I/O watcher that should run at a strictly lower
priority than the default, and which should only process data when no
other events are pending:
.PP
.Vb 2
\& ev_idle idle; // actual processing watcher
\& ev_io io; // actual event watcher
\&
\& static void
\& io_cb (EV_P_ ev_io *w, int revents)
\& {
\& // stop the I/O watcher, we received the event, but
\& // are not yet ready to handle it.
\& ev_io_stop (EV_A_ w);
\&
\& // start the idle watcher to ahndle the actual event.
\& // it will not be executed as long as other watchers
\& // with the default priority are receiving events.
\& ev_idle_start (EV_A_ &idle);
\& }
\&
\& static void
\& idle\-cb (EV_P_ ev_idle *w, int revents)
\& {
\& // actual processing
\& read (STDIN_FILENO, ...);
\&
\& // have to start the I/O watcher again, as
\& // we have handled the event
\& ev_io_start (EV_P_ &io);
\& }
\&
\& // initialisation
\& ev_idle_init (&idle, idle_cb);
\& ev_io_init (&io, io_cb, STDIN_FILENO, EV_READ);
\& ev_io_start (EV_DEFAULT_ &io);
.Ve
.PP
In the \*(L"real\*(R" world, it might also be beneficial to start a timer, so that
low-priority connections can not be locked out forever under load. This
enables your program to keep a lower latency for important connections
during short periods of high load, while not completely locking out less
important ones.
.SH "WATCHER TYPES" .SH "WATCHER TYPES"
.IX Header "WATCHER TYPES" .IX Header "WATCHER TYPES"
This section describes each watcher in detail, but will not repeat This section describes each watcher in detail, but will not repeat
@ -1321,7 +1468,9 @@ required if you know what you are doing).
.PP .PP
If you cannot use non-blocking mode, then force the use of a If you cannot use non-blocking mode, then force the use of a
known-to-be-good backend (at the time of this writing, this includes only known-to-be-good backend (at the time of this writing, this includes only
\&\f(CW\*(C`EVBACKEND_SELECT\*(C'\fR and \f(CW\*(C`EVBACKEND_POLL\*(C'\fR). \&\f(CW\*(C`EVBACKEND_SELECT\*(C'\fR and \f(CW\*(C`EVBACKEND_POLL\*(C'\fR). The same applies to file
descriptors for which non-blocking operation makes no sense (such as
files) \- libev doesn't guarentee any specific behaviour in that case.
.PP .PP
Another thing you have to watch out for is that it is quite easy to Another thing you have to watch out for is that it is quite easy to
receive \*(L"spurious\*(R" readiness notifications, that is your callback might receive \*(L"spurious\*(R" readiness notifications, that is your callback might
@ -1453,8 +1602,11 @@ detecting time jumps is hard, and some inaccuracies are unavoidable (the
monotonic clock option helps a lot here). monotonic clock option helps a lot here).
.PP .PP
The callback is guaranteed to be invoked only \fIafter\fR its timeout has The callback is guaranteed to be invoked only \fIafter\fR its timeout has
passed, but if multiple timers become ready during the same loop iteration passed (not \fIat\fR, so on systems with very low-resolution clocks this
then order of execution is undefined. might introduce a small delay). If multiple timers become ready during the
same loop iteration then the ones with earlier time-out values are invoked
before ones with later time-out values (but this is no longer true when a
callback calls \f(CW\*(C`ev_loop\*(C'\fR recursively).
.PP .PP
\fIBe smart about timeouts\fR \fIBe smart about timeouts\fR
.IX Subsection "Be smart about timeouts" .IX Subsection "Be smart about timeouts"
@ -1745,51 +1897,62 @@ inactivity.
Periodic watchers are also timers of a kind, but they are very versatile Periodic watchers are also timers of a kind, but they are very versatile
(and unfortunately a bit complex). (and unfortunately a bit complex).
.PP .PP
Unlike \f(CW\*(C`ev_timer\*(C'\fR's, they are not based on real time (or relative time) Unlike \f(CW\*(C`ev_timer\*(C'\fR, periodic watchers are not based on real time (or
but on wall clock time (absolute time). You can tell a periodic watcher relative time, the physical time that passes) but on wall clock time
to trigger after some specific point in time. For example, if you tell a (absolute time, the thing you can read on your calender or clock). The
periodic watcher to trigger in 10 seconds (by specifying e.g. \f(CW\*(C`ev_now () difference is that wall clock time can run faster or slower than real
+ 10.\*(C'\fR, that is, an absolute time not a delay) and then reset your system time, and time jumps are not uncommon (e.g. when you adjust your
clock to January of the previous year, then it will take more than year wrist-watch).
to trigger the event (unlike an \f(CW\*(C`ev_timer\*(C'\fR, which would still trigger .PP
roughly 10 seconds later as it uses a relative timeout). You can tell a periodic watcher to trigger after some specific point
.PP in time: for example, if you tell a periodic watcher to trigger \*(L"in 10
\&\f(CW\*(C`ev_periodic\*(C'\fRs can also be used to implement vastly more complex timers, seconds\*(R" (by specifying e.g. \f(CW\*(C`ev_now () + 10.\*(C'\fR, that is, an absolute time
such as triggering an event on each \*(L"midnight, local time\*(R", or other not a delay) and then reset your system clock to January of the previous
complicated rules. year, then it will take a year or more to trigger the event (unlike an
\&\f(CW\*(C`ev_timer\*(C'\fR, which would still trigger roughly 10 seconds after starting
it, as it uses a relative timeout).
.PP
\&\f(CW\*(C`ev_periodic\*(C'\fR watchers can also be used to implement vastly more complex
timers, such as triggering an event on each \*(L"midnight, local time\*(R", or
other complicated rules. This cannot be done with \f(CW\*(C`ev_timer\*(C'\fR watchers, as
those cannot react to time jumps.
.PP .PP
As with timers, the callback is guaranteed to be invoked only when the As with timers, the callback is guaranteed to be invoked only when the
time (\f(CW\*(C`at\*(C'\fR) has passed, but if multiple periodic timers become ready point in time where it is supposed to trigger has passed. If multiple
during the same loop iteration, then order of execution is undefined. timers become ready during the same loop iteration then the ones with
earlier time-out values are invoked before ones with later time-out values
(but this is no longer true when a callback calls \f(CW\*(C`ev_loop\*(C'\fR recursively).
.PP .PP
\fIWatcher-Specific Functions and Data Members\fR \fIWatcher-Specific Functions and Data Members\fR
.IX Subsection "Watcher-Specific Functions and Data Members" .IX Subsection "Watcher-Specific Functions and Data Members"
.IP "ev_periodic_init (ev_periodic *, callback, ev_tstamp at, ev_tstamp interval, reschedule_cb)" 4 .IP "ev_periodic_init (ev_periodic *, callback, ev_tstamp offset, ev_tstamp interval, reschedule_cb)" 4
.IX Item "ev_periodic_init (ev_periodic *, callback, ev_tstamp at, ev_tstamp interval, reschedule_cb)" .IX Item "ev_periodic_init (ev_periodic *, callback, ev_tstamp offset, ev_tstamp interval, reschedule_cb)"
.PD 0 .PD 0
.IP "ev_periodic_set (ev_periodic *, ev_tstamp after, ev_tstamp repeat, reschedule_cb)" 4 .IP "ev_periodic_set (ev_periodic *, ev_tstamp offset, ev_tstamp interval, reschedule_cb)" 4
.IX Item "ev_periodic_set (ev_periodic *, ev_tstamp after, ev_tstamp repeat, reschedule_cb)" .IX Item "ev_periodic_set (ev_periodic *, ev_tstamp offset, ev_tstamp interval, reschedule_cb)"
.PD .PD
Lots of arguments, lets sort it out... There are basically three modes of Lots of arguments, let's sort it out... There are basically three modes of
operation, and we will explain them from simplest to most complex: operation, and we will explain them from simplest to most complex:
.RS 4 .RS 4
.IP "\(bu" 4 .IP "\(bu" 4
absolute timer (at = time, interval = reschedule_cb = 0) absolute timer (offset = absolute time, interval = 0, reschedule_cb = 0)
.Sp .Sp
In this configuration the watcher triggers an event after the wall clock In this configuration the watcher triggers an event after the wall clock
time \f(CW\*(C`at\*(C'\fR has passed. It will not repeat and will not adjust when a time time \f(CW\*(C`offset\*(C'\fR has passed. It will not repeat and will not adjust when a
jump occurs, that is, if it is to be run at January 1st 2011 then it will time jump occurs, that is, if it is to be run at January 1st 2011 then it
only run when the system clock reaches or surpasses this time. will be stopped and invoked when the system clock reaches or surpasses
this point in time.
.IP "\(bu" 4 .IP "\(bu" 4
repeating interval timer (at = offset, interval > 0, reschedule_cb = 0) repeating interval timer (offset = offset within interval, interval > 0, reschedule_cb = 0)
.Sp .Sp
In this mode the watcher will always be scheduled to time out at the next In this mode the watcher will always be scheduled to time out at the next
\&\f(CW\*(C`at + N * interval\*(C'\fR time (for some integer N, which can also be negative) \&\f(CW\*(C`offset + N * interval\*(C'\fR time (for some integer N, which can also be
and then repeat, regardless of any time jumps. negative) and then repeat, regardless of any time jumps. The \f(CW\*(C`offset\*(C'\fR
argument is merely an offset into the \f(CW\*(C`interval\*(C'\fR periods.
.Sp .Sp
This can be used to create timers that do not drift with respect to the This can be used to create timers that do not drift with respect to the
system clock, for example, here is a \f(CW\*(C`ev_periodic\*(C'\fR that triggers each system clock, for example, here is an \f(CW\*(C`ev_periodic\*(C'\fR that triggers each
hour, on the hour: hour, on the hour (with respect to \s-1UTC\s0):
.Sp .Sp
.Vb 1 .Vb 1
\& ev_periodic_set (&periodic, 0., 3600., 0); \& ev_periodic_set (&periodic, 0., 3600., 0);
@ -1802,9 +1965,9 @@ by 3600.
.Sp .Sp
Another way to think about it (for the mathematically inclined) is that Another way to think about it (for the mathematically inclined) is that
\&\f(CW\*(C`ev_periodic\*(C'\fR will try to run the callback in this mode at the next possible \&\f(CW\*(C`ev_periodic\*(C'\fR will try to run the callback in this mode at the next possible
time where \f(CW\*(C`time = at (mod interval)\*(C'\fR, regardless of any time jumps. time where \f(CW\*(C`time = offset (mod interval)\*(C'\fR, regardless of any time jumps.
.Sp .Sp
For numerical stability it is preferable that the \f(CW\*(C`at\*(C'\fR value is near For numerical stability it is preferable that the \f(CW\*(C`offset\*(C'\fR value is near
\&\f(CW\*(C`ev_now ()\*(C'\fR (the current time), but there is no range requirement for \&\f(CW\*(C`ev_now ()\*(C'\fR (the current time), but there is no range requirement for
this value, and in fact is often specified as zero. this value, and in fact is often specified as zero.
.Sp .Sp
@ -1813,15 +1976,16 @@ speed for example), so if \f(CW\*(C`interval\*(C'\fR is very small then timing s
will of course deteriorate. Libev itself tries to be exact to be about one will of course deteriorate. Libev itself tries to be exact to be about one
millisecond (if the \s-1OS\s0 supports it and the machine is fast enough). millisecond (if the \s-1OS\s0 supports it and the machine is fast enough).
.IP "\(bu" 4 .IP "\(bu" 4
manual reschedule mode (at and interval ignored, reschedule_cb = callback) manual reschedule mode (offset ignored, interval ignored, reschedule_cb = callback)
.Sp .Sp
In this mode the values for \f(CW\*(C`interval\*(C'\fR and \f(CW\*(C`at\*(C'\fR are both being In this mode the values for \f(CW\*(C`interval\*(C'\fR and \f(CW\*(C`offset\*(C'\fR are both being
ignored. Instead, each time the periodic watcher gets scheduled, the ignored. Instead, each time the periodic watcher gets scheduled, the
reschedule callback will be called with the watcher as first, and the reschedule callback will be called with the watcher as first, and the
current time as second argument. current time as second argument.
.Sp .Sp
\&\s-1NOTE:\s0 \fIThis callback \s-1MUST\s0 \s-1NOT\s0 stop or destroy any periodic watcher, \&\s-1NOTE:\s0 \fIThis callback \s-1MUST\s0 \s-1NOT\s0 stop or destroy any periodic watcher, ever,
ever, or make \s-1ANY\s0 event loop modifications whatsoever\fR. or make \s-1ANY\s0 other event loop modifications whatsoever, unless explicitly
allowed by documentation here\fR.
.Sp .Sp
If you need to stop it, return \f(CW\*(C`now + 1e30\*(C'\fR (or so, fudge fudge) and stop If you need to stop it, return \f(CW\*(C`now + 1e30\*(C'\fR (or so, fudge fudge) and stop
it afterwards (e.g. by starting an \f(CW\*(C`ev_prepare\*(C'\fR watcher, which is the it afterwards (e.g. by starting an \f(CW\*(C`ev_prepare\*(C'\fR watcher, which is the
@ -1862,12 +2026,15 @@ a different time than the last time it was called (e.g. in a crond like
program when the crontabs have changed). program when the crontabs have changed).
.IP "ev_tstamp ev_periodic_at (ev_periodic *)" 4 .IP "ev_tstamp ev_periodic_at (ev_periodic *)" 4
.IX Item "ev_tstamp ev_periodic_at (ev_periodic *)" .IX Item "ev_tstamp ev_periodic_at (ev_periodic *)"
When active, returns the absolute time that the watcher is supposed to When active, returns the absolute time that the watcher is supposed
trigger next. to trigger next. This is not the same as the \f(CW\*(C`offset\*(C'\fR argument to
\&\f(CW\*(C`ev_periodic_set\*(C'\fR, but indeed works even in interval and manual
rescheduling modes.
.IP "ev_tstamp offset [read\-write]" 4 .IP "ev_tstamp offset [read\-write]" 4
.IX Item "ev_tstamp offset [read-write]" .IX Item "ev_tstamp offset [read-write]"
When repeating, this contains the offset value, otherwise this is the When repeating, this contains the offset value, otherwise this is the
absolute point in time (the \f(CW\*(C`at\*(C'\fR value passed to \f(CW\*(C`ev_periodic_set\*(C'\fR). absolute point in time (the \f(CW\*(C`offset\*(C'\fR value passed to \f(CW\*(C`ev_periodic_set\*(C'\fR,
although libev might modify this value for better numerical stability).
.Sp .Sp
Can be modified any time, but changes only take effect when the periodic Can be modified any time, but changes only take effect when the periodic
timer fires or \f(CW\*(C`ev_periodic_again\*(C'\fR is being called. timer fires or \f(CW\*(C`ev_periodic_again\*(C'\fR is being called.
@ -2329,8 +2496,8 @@ event loop has handled all outstanding events.
.PP .PP
\fIWatcher-Specific Functions and Data Members\fR \fIWatcher-Specific Functions and Data Members\fR
.IX Subsection "Watcher-Specific Functions and Data Members" .IX Subsection "Watcher-Specific Functions and Data Members"
.IP "ev_idle_init (ev_signal *, callback)" 4 .IP "ev_idle_init (ev_idle *, callback)" 4
.IX Item "ev_idle_init (ev_signal *, callback)" .IX Item "ev_idle_init (ev_idle *, callback)"
Initialises and configures the idle watcher \- it has no parameters of any Initialises and configures the idle watcher \- it has no parameters of any
kind. There is a \f(CW\*(C`ev_idle_set\*(C'\fR macro, but using it is utterly pointless, kind. There is a \f(CW\*(C`ev_idle_set\*(C'\fR macro, but using it is utterly pointless,
believe me. believe me.
@ -2700,6 +2867,40 @@ and only in the child after the fork. If whoever good citizen calling
\&\f(CW\*(C`ev_default_fork\*(C'\fR cheats and calls it in the wrong process, the fork \&\f(CW\*(C`ev_default_fork\*(C'\fR cheats and calls it in the wrong process, the fork
handlers will be invoked, too, of course. handlers will be invoked, too, of course.
.PP .PP
\fIThe special problem of life after fork \- how is it possible?\fR
.IX Subsection "The special problem of life after fork - how is it possible?"
.PP
Most uses of \f(CW\*(C`fork()\*(C'\fR consist of forking, then some simple calls to ste
up/change the process environment, followed by a call to \f(CW\*(C`exec()\*(C'\fR. This
sequence should be handled by libev without any problems.
.PP
This changes when the application actually wants to do event handling
in the child, or both parent in child, in effect \*(L"continuing\*(R" after the
fork.
.PP
The default mode of operation (for libev, with application help to detect
forks) is to duplicate all the state in the child, as would be expected
when \fIeither\fR the parent \fIor\fR the child process continues.
.PP
When both processes want to continue using libev, then this is usually the
wrong result. In that case, usually one process (typically the parent) is
supposed to continue with all watchers in place as before, while the other
process typically wants to start fresh, i.e. without any active watchers.
.PP
The cleanest and most efficient way to achieve that with libev is to
simply create a new event loop, which of course will be \*(L"empty\*(R", and
use that for new watchers. This has the advantage of not touching more
memory than necessary, and thus avoiding the copy-on-write, and the
disadvantage of having to use multiple event loops (which do not support
signal watchers).
.PP
When this is not possible, or you want to use the default loop for
other reasons, then in the process that wants to start \*(L"fresh\*(R", call
\&\f(CW\*(C`ev_default_destroy ()\*(C'\fR followed by \f(CW\*(C`ev_default_loop (...)\*(C'\fR. Destroying
the default loop will \*(L"orphan\*(R" (not stop) all registered watchers, so you
have to be careful not to execute code that modifies those watchers. Note
also that in that case, you have to re-register any signal watchers.
.PP
\fIWatcher-Specific Functions and Data Members\fR \fIWatcher-Specific Functions and Data Members\fR
.IX Subsection "Watcher-Specific Functions and Data Members" .IX Subsection "Watcher-Specific Functions and Data Members"
.IP "ev_fork_init (ev_signal *, callback)" 4 .IP "ev_fork_init (ev_signal *, callback)" 4
@ -2827,9 +3028,14 @@ an \f(CW\*(C`EV_ASYNC\*(C'\fR event on the watcher into the event loop. Unlike
similar contexts (see the discussion of \f(CW\*(C`EV_ATOMIC_T\*(C'\fR in the embedding similar contexts (see the discussion of \f(CW\*(C`EV_ATOMIC_T\*(C'\fR in the embedding
section below on what exactly this means). section below on what exactly this means).
.Sp .Sp
This call incurs the overhead of a system call only once per loop iteration, Note that, as with other watchers in libev, multiple events might get
so while the overhead might be noticeable, it doesn't apply to repeated compressed into a single callback invocation (another way to look at this
calls to \f(CW\*(C`ev_async_send\*(C'\fR. is that \f(CW\*(C`ev_async\*(C'\fR watchers are level-triggered, set on \f(CW\*(C`ev_async_send\*(C'\fR,
reset when the event loop detects that).
.Sp
This call incurs the overhead of a system call only once per event loop
iteration, so while the overhead might be noticeable, it doesn't apply to
repeated calls to \f(CW\*(C`ev_async_send\*(C'\fR for the same event loop.
.IP "bool = ev_async_pending (ev_async *)" 4 .IP "bool = ev_async_pending (ev_async *)" 4
.IX Item "bool = ev_async_pending (ev_async *)" .IX Item "bool = ev_async_pending (ev_async *)"
Returns a non-zero value when \f(CW\*(C`ev_async_send\*(C'\fR has been called on the Returns a non-zero value when \f(CW\*(C`ev_async_send\*(C'\fR has been called on the
@ -2841,8 +3047,10 @@ the loop iterates next and checks for the watcher to have become active,
it will reset the flag again. \f(CW\*(C`ev_async_pending\*(C'\fR can be used to very it will reset the flag again. \f(CW\*(C`ev_async_pending\*(C'\fR can be used to very
quickly check whether invoking the loop might be a good idea. quickly check whether invoking the loop might be a good idea.
.Sp .Sp
Not that this does \fInot\fR check whether the watcher itself is pending, only Not that this does \fInot\fR check whether the watcher itself is pending,
whether it has been requested to make this watcher pending. only whether it has been requested to make this watcher pending: there
is a time window between the event loop checking and resetting the async
notification, and the callback being invoked.
.SH "OTHER FUNCTIONS" .SH "OTHER FUNCTIONS"
.IX Header "OTHER FUNCTIONS" .IX Header "OTHER FUNCTIONS"
There are some other functions of possible interest. Described. Here. Now. There are some other functions of possible interest. Described. Here. Now.
@ -3133,11 +3341,7 @@ It can be found and installed via \s-1CPAN\s0, its homepage is at
.IP "Python" 4 .IP "Python" 4
.IX Item "Python" .IX Item "Python"
Python bindings can be found at <http://code.google.com/p/pyev/>. It Python bindings can be found at <http://code.google.com/p/pyev/>. It
seems to be quite complete and well-documented. Note, however, that the seems to be quite complete and well-documented.
patch they require for libev is outright dangerous as it breaks the \s-1ABI\s0
for everybody else, and therefore, should never be applied in an installed
libev (if python requires an incompatible \s-1ABI\s0 then it needs to embed
libev).
.IP "Ruby" 4 .IP "Ruby" 4
.IX Item "Ruby" .IX Item "Ruby"
Tony Arcieri has written a ruby extension that offers access to a subset Tony Arcieri has written a ruby extension that offers access to a subset
@ -3147,6 +3351,10 @@ more on top of it. It can be found via gem servers. Its homepage is at
.Sp .Sp
Roger Pack reports that using the link order \f(CW\*(C`\-lws2_32 \-lmsvcrt\-ruby\-190\*(C'\fR Roger Pack reports that using the link order \f(CW\*(C`\-lws2_32 \-lmsvcrt\-ruby\-190\*(C'\fR
makes rev work even on mingw. makes rev work even on mingw.
.IP "Haskell" 4
.IX Item "Haskell"
A haskell binding to libev is available at
<http://hackage.haskell.org/cgi\-bin/hackage\-scripts/package/hlibev>.
.IP "D" 4 .IP "D" 4
.IX Item "D" .IX Item "D"
Leandro Lucarella has written a D language binding (\fIev.d\fR) for libev, to Leandro Lucarella has written a D language binding (\fIev.d\fR) for libev, to
@ -3825,6 +4033,9 @@ way (note also that glib is the slowest event library known to man).
There is no supported compilation method available on windows except There is no supported compilation method available on windows except
embedding it into other applications. embedding it into other applications.
.PP .PP
Sensible signal handling is officially unsupported by Microsoft \- libev
tries its best, but under most conditions, signals will simply not work.
.PP
Not a libev limitation but worth mentioning: windows apparently doesn't Not a libev limitation but worth mentioning: windows apparently doesn't
accept large writes: instead of resulting in a partial write, windows will accept large writes: instead of resulting in a partial write, windows will
either accept everything or return \f(CW\*(C`ENOBUFS\*(C'\fR if the buffer is too large, either accept everything or return \f(CW\*(C`ENOBUFS\*(C'\fR if the buffer is too large,
@ -3838,7 +4049,7 @@ is not recommended (and not reasonable). If your program needs to use
more than a hundred or so sockets, then likely it needs to use a totally more than a hundred or so sockets, then likely it needs to use a totally
different implementation for windows, as libev offers the \s-1POSIX\s0 readiness different implementation for windows, as libev offers the \s-1POSIX\s0 readiness
notification model, which cannot be implemented efficiently on windows notification model, which cannot be implemented efficiently on windows
(Microsoft monopoly games). (due to Microsoft monopoly games).
.PP .PP
A typical way to use libev under windows is to embed it (see the embedding A typical way to use libev under windows is to embed it (see the embedding
section for details) and use the following \fIevwrap.h\fR header file instead section for details) and use the following \fIevwrap.h\fR header file instead
@ -3886,24 +4097,22 @@ Early versions of winsocket's select only supported waiting for a maximum
of \f(CW64\fR handles (probably owning to the fact that all windows kernels of \f(CW64\fR handles (probably owning to the fact that all windows kernels
can only wait for \f(CW64\fR things at the same time internally; Microsoft can only wait for \f(CW64\fR things at the same time internally; Microsoft
recommends spawning a chain of threads and wait for 63 handles and the recommends spawning a chain of threads and wait for 63 handles and the
previous thread in each. Great). previous thread in each. Sounds great!).
.Sp .Sp
Newer versions support more handles, but you need to define \f(CW\*(C`FD_SETSIZE\*(C'\fR Newer versions support more handles, but you need to define \f(CW\*(C`FD_SETSIZE\*(C'\fR
to some high number (e.g. \f(CW2048\fR) before compiling the winsocket select to some high number (e.g. \f(CW2048\fR) before compiling the winsocket select
call (which might be in libev or elsewhere, for example, perl does its own call (which might be in libev or elsewhere, for example, perl and many
select emulation on windows). other interpreters do their own select emulation on windows).
.Sp .Sp
Another limit is the number of file descriptors in the Microsoft runtime Another limit is the number of file descriptors in the Microsoft runtime
libraries, which by default is \f(CW64\fR (there must be a hidden \fI64\fR fetish libraries, which by default is \f(CW64\fR (there must be a hidden \fI64\fR
or something like this inside Microsoft). You can increase this by calling fetish or something like this inside Microsoft). You can increase this
\&\f(CW\*(C`_setmaxstdio\*(C'\fR, which can increase this limit to \f(CW2048\fR (another by calling \f(CW\*(C`_setmaxstdio\*(C'\fR, which can increase this limit to \f(CW2048\fR
arbitrary limit), but is broken in many versions of the Microsoft runtime (another arbitrary limit), but is broken in many versions of the Microsoft
libraries. runtime libraries. This might get you to about \f(CW512\fR or \f(CW2048\fR sockets
.Sp (depending on windows version and/or the phase of the moon). To get more,
This might get you to about \f(CW512\fR or \f(CW2048\fR sockets (depending on you need to wrap all I/O functions and provide your own fd management, but
windows version and/or the phase of the moon). To get more, you need to the cost of calling select (O(nA\*^X)) will likely make this unworkable.
wrap all I/O functions and provide your own fd management, but the cost of
calling select (O(nA\*^X)) will likely make this unworkable.
.Sh "\s-1PORTABILITY\s0 \s-1REQUIREMENTS\s0" .Sh "\s-1PORTABILITY\s0 \s-1REQUIREMENTS\s0"
.IX Subsection "PORTABILITY REQUIREMENTS" .IX Subsection "PORTABILITY REQUIREMENTS"
In addition to a working ISO-C implementation and of course the In addition to a working ISO-C implementation and of course the
@ -4016,6 +4225,65 @@ watchers becomes O(1) with respect to priority handling.
Sending involves a system call \fIiff\fR there were no other \f(CW\*(C`ev_async_send\*(C'\fR Sending involves a system call \fIiff\fR there were no other \f(CW\*(C`ev_async_send\*(C'\fR
calls in the current loop iteration. Checking for async and signal events calls in the current loop iteration. Checking for async and signal events
involves iterating over all running async watchers or all signal numbers. involves iterating over all running async watchers or all signal numbers.
.SH "GLOSSARY"
.IX Header "GLOSSARY"
.IP "active" 4
.IX Item "active"
A watcher is active as long as it has been started (has been attached to
an event loop) but not yet stopped (disassociated from the event loop).
.IP "application" 4
.IX Item "application"
In this document, an application is whatever is using libev.
.IP "callback" 4
.IX Item "callback"
The address of a function that is called when some event has been
detected. Callbacks are being passed the event loop, the watcher that
received the event, and the actual event bitset.
.IP "callback invocation" 4
.IX Item "callback invocation"
The act of calling the callback associated with a watcher.
.IP "event" 4
.IX Item "event"
A change of state of some external event, such as data now being available
for reading on a file descriptor, time having passed or simply not having
any other events happening anymore.
.Sp
In libev, events are represented as single bits (such as \f(CW\*(C`EV_READ\*(C'\fR or
\&\f(CW\*(C`EV_TIMEOUT\*(C'\fR).
.IP "event library" 4
.IX Item "event library"
A software package implementing an event model and loop.
.IP "event loop" 4
.IX Item "event loop"
An entity that handles and processes external events and converts them
into callback invocations.
.IP "event model" 4
.IX Item "event model"
The model used to describe how an event loop handles and processes
watchers and events.
.IP "pending" 4
.IX Item "pending"
A watcher is pending as soon as the corresponding event has been detected,
and stops being pending as soon as the watcher will be invoked or its
pending status is explicitly cleared by the application.
.Sp
A watcher can be pending, but not active. Stopping a watcher also clears
its pending status.
.IP "real time" 4
.IX Item "real time"
The physical time that is observed. It is apparently strictly monotonic :)
.IP "wall-clock time" 4
.IX Item "wall-clock time"
The time and date as shown on clocks. Unlike real time, it can actually
be wrong and jump forwards and backwards, e.g. when the you adjust your
clock.
.IP "watcher" 4
.IX Item "watcher"
A data structure that describes interest in certain events. Watchers need
to be started (attached to an event loop) before they can receive events.
.IP "watcher invocation" 4
.IX Item "watcher invocation"
The act of calling the callback associated with a watcher.
.SH "AUTHOR" .SH "AUTHOR"
.IX Header "AUTHOR" .IX Header "AUTHOR"
Marc Lehmann <libev@schmorp.de>, with repeated corrections by Mikael Magnusson. Marc Lehmann <libev@schmorp.de>, with repeated corrections by Mikael Magnusson.

474
deps/libev/ev.c

@ -480,25 +480,27 @@ ev_realloc (void *ptr, long size)
/*****************************************************************************/ /*****************************************************************************/
/* file descriptor info structure */
typedef struct typedef struct
{ {
WL head; WL head;
unsigned char events; unsigned char events; /* the events watched for */
unsigned char reify; unsigned char reify; /* flag set when this ANFD needs reification */
unsigned char emask; /* the epoll backend stores the actual kernel mask in here */ unsigned char emask; /* the epoll backend stores the actual kernel mask in here */
unsigned char unused; unsigned char unused;
#if EV_USE_EPOLL #if EV_USE_EPOLL
unsigned int egen; /* generation counter to counter epoll bugs */ unsigned int egen; /* generation counter to counter epoll bugs */
#endif #endif
#if EV_SELECT_IS_WINSOCKET #if EV_SELECT_IS_WINSOCKET
SOCKET handle; SOCKET handle;
#endif #endif
} ANFD; } ANFD;
/* stores the pending event set for a given watcher */
typedef struct typedef struct
{ {
W w; W w;
int events; int events; /* the pending event set for the given watcher */
} ANPENDING; } ANPENDING;
#if EV_USE_INOTIFY #if EV_USE_INOTIFY
@ -511,6 +513,7 @@ typedef struct
/* Heap Entry */ /* Heap Entry */
#if EV_HEAP_CACHE_AT #if EV_HEAP_CACHE_AT
/* a heap element */
typedef struct { typedef struct {
ev_tstamp at; ev_tstamp at;
WT w; WT w;
@ -520,6 +523,7 @@ typedef struct
#define ANHE_at(he) (he).at /* access cached at, read-only */ #define ANHE_at(he) (he).at /* access cached at, read-only */
#define ANHE_at_cache(he) (he).at = (he).w->at /* update at from watcher */ #define ANHE_at_cache(he) (he).at = (he).w->at /* update at from watcher */
#else #else
/* a heap element */
typedef WT ANHE; typedef WT ANHE;
#define ANHE_w(he) (he) #define ANHE_w(he) (he)
@ -572,7 +576,7 @@ ev_time (void)
return tv.tv_sec + tv.tv_usec * 1e-6; return tv.tv_sec + tv.tv_usec * 1e-6;
} }
ev_tstamp inline_size inline_size ev_tstamp
get_clock (void) get_clock (void)
{ {
#if EV_USE_MONOTONIC #if EV_USE_MONOTONIC
@ -627,7 +631,9 @@ ev_sleep (ev_tstamp delay)
#define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */ #define MALLOC_ROUND 4096 /* prefer to allocate in chunks of this size, must be 2**n and >> 4 longs */
int inline_size /* find a suitable new size for the given array, */
/* hopefully by rounding to a ncie-to-malloc size */
inline_size int
array_nextsize (int elem, int cur, int cnt) array_nextsize (int elem, int cur, int cnt)
{ {
int ncur = cur + 1; int ncur = cur + 1;
@ -678,10 +684,16 @@ array_realloc (int elem, void *base, int *cur, int cnt)
#endif #endif
#define array_free(stem, idx) \ #define array_free(stem, idx) \
ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; ev_free (stem ## s idx); stem ## cnt idx = stem ## max idx = 0; stem ## s idx = 0
/*****************************************************************************/ /*****************************************************************************/
/* dummy callback for pending events */
static void noinline
pendingcb (EV_P_ ev_prepare *w, int revents)
{
}
void noinline void noinline
ev_feed_event (EV_P_ void *w, int revents) ev_feed_event (EV_P_ void *w, int revents)
{ {
@ -699,7 +711,22 @@ ev_feed_event (EV_P_ void *w, int revents)
} }
} }
void inline_speed inline_speed void
feed_reverse (EV_P_ W w)
{
array_needsize (W, rfeeds, rfeedmax, rfeedcnt + 1, EMPTY2);
rfeeds [rfeedcnt++] = w;
}
inline_size void
feed_reverse_done (EV_P_ int revents)
{
do
ev_feed_event (EV_A_ rfeeds [--rfeedcnt], revents);
while (rfeedcnt);
}
inline_speed void
queue_events (EV_P_ W *events, int eventcnt, int type) queue_events (EV_P_ W *events, int eventcnt, int type)
{ {
int i; int i;
@ -710,7 +737,7 @@ queue_events (EV_P_ W *events, int eventcnt, int type)
/*****************************************************************************/ /*****************************************************************************/
void inline_speed inline_speed void
fd_event (EV_P_ int fd, int revents) fd_event (EV_P_ int fd, int revents)
{ {
ANFD *anfd = anfds + fd; ANFD *anfd = anfds + fd;
@ -732,7 +759,9 @@ ev_feed_fd_event (EV_P_ int fd, int revents)
fd_event (EV_A_ fd, revents); fd_event (EV_A_ fd, revents);
} }
void inline_size /* make sure the external fd watch events are in-sync */
/* with the kernel/libev internal state */
inline_size void
fd_reify (EV_P) fd_reify (EV_P)
{ {
int i; int i;
@ -768,7 +797,7 @@ fd_reify (EV_P)
anfd->reify = 0; anfd->reify = 0;
anfd->events = events; anfd->events = events;
if (o_events != events || o_reify & EV_IOFDSET) if (o_events != events || o_reify & EV__IOFDSET)
backend_modify (EV_A_ fd, o_events, events); backend_modify (EV_A_ fd, o_events, events);
} }
} }
@ -776,7 +805,8 @@ fd_reify (EV_P)
fdchangecnt = 0; fdchangecnt = 0;
} }
void inline_size /* something about the given fd changed */
inline_size void
fd_change (EV_P_ int fd, int flags) fd_change (EV_P_ int fd, int flags)
{ {
unsigned char reify = anfds [fd].reify; unsigned char reify = anfds [fd].reify;
@ -790,7 +820,8 @@ fd_change (EV_P_ int fd, int flags)
} }
} }
void inline_speed /* the given fd is invalid/unusable, so make sure it doesn't hurt us anymore */
inline_speed void
fd_kill (EV_P_ int fd) fd_kill (EV_P_ int fd)
{ {
ev_io *w; ev_io *w;
@ -802,7 +833,8 @@ fd_kill (EV_P_ int fd)
} }
} }
int inline_size /* check whether the given fd is atcually valid, for error recovery */
inline_size int
fd_valid (int fd) fd_valid (int fd)
{ {
#ifdef _WIN32 #ifdef _WIN32
@ -849,7 +881,7 @@ fd_rearm_all (EV_P)
{ {
anfds [fd].events = 0; anfds [fd].events = 0;
anfds [fd].emask = 0; anfds [fd].emask = 0;
fd_change (EV_A_ fd, EV_IOFDSET | 1); fd_change (EV_A_ fd, EV__IOFDSET | 1);
} }
} }
@ -875,7 +907,7 @@ fd_rearm_all (EV_P)
#define UPHEAP_DONE(p,k) ((p) == (k)) #define UPHEAP_DONE(p,k) ((p) == (k))
/* away from the root */ /* away from the root */
void inline_speed inline_speed void
downheap (ANHE *heap, int N, int k) downheap (ANHE *heap, int N, int k)
{ {
ANHE he = heap [k]; ANHE he = heap [k];
@ -925,7 +957,7 @@ downheap (ANHE *heap, int N, int k)
#define UPHEAP_DONE(p,k) (!(p)) #define UPHEAP_DONE(p,k) (!(p))
/* away from the root */ /* away from the root */
void inline_speed inline_speed void
downheap (ANHE *heap, int N, int k) downheap (ANHE *heap, int N, int k)
{ {
ANHE he = heap [k]; ANHE he = heap [k];
@ -955,7 +987,7 @@ downheap (ANHE *heap, int N, int k)
#endif #endif
/* towards the root */ /* towards the root */
void inline_speed inline_speed void
upheap (ANHE *heap, int k) upheap (ANHE *heap, int k)
{ {
ANHE he = heap [k]; ANHE he = heap [k];
@ -976,7 +1008,8 @@ upheap (ANHE *heap, int k)
ev_active (ANHE_w (he)) = k; ev_active (ANHE_w (he)) = k;
} }
void inline_size /* move an element suitably so it is in a correct place */
inline_size void
adjustheap (ANHE *heap, int N, int k) adjustheap (ANHE *heap, int N, int k)
{ {
if (k > HEAP0 && ANHE_at (heap [HPARENT (k)]) >= ANHE_at (heap [k])) if (k > HEAP0 && ANHE_at (heap [HPARENT (k)]) >= ANHE_at (heap [k]))
@ -986,7 +1019,7 @@ adjustheap (ANHE *heap, int N, int k)
} }
/* rebuild the heap: this function is used only once and executed rarely */ /* rebuild the heap: this function is used only once and executed rarely */
void inline_size inline_size void
reheap (ANHE *heap, int N) reheap (ANHE *heap, int N)
{ {
int i; int i;
@ -999,6 +1032,7 @@ reheap (ANHE *heap, int N)
/*****************************************************************************/ /*****************************************************************************/
/* associate signal watchers to a signal signal */
typedef struct typedef struct
{ {
WL head; WL head;
@ -1012,7 +1046,9 @@ static EV_ATOMIC_T gotsig;
/*****************************************************************************/ /*****************************************************************************/
void inline_speed /* used to prepare libev internal fd's */
/* this is not fork-safe */
inline_speed void
fd_intern (int fd) fd_intern (int fd)
{ {
#ifdef _WIN32 #ifdef _WIN32
@ -1027,14 +1063,14 @@ fd_intern (int fd)
static void noinline static void noinline
evpipe_init (EV_P) evpipe_init (EV_P)
{ {
if (!ev_is_active (&pipeev)) if (!ev_is_active (&pipe_w))
{ {
#if EV_USE_EVENTFD #if EV_USE_EVENTFD
if ((evfd = eventfd (0, 0)) >= 0) if ((evfd = eventfd (0, 0)) >= 0)
{ {
evpipe [0] = -1; evpipe [0] = -1;
fd_intern (evfd); fd_intern (evfd);
ev_io_set (&pipeev, evfd, EV_READ); ev_io_set (&pipe_w, evfd, EV_READ);
} }
else else
#endif #endif
@ -1044,15 +1080,15 @@ evpipe_init (EV_P)
fd_intern (evpipe [0]); fd_intern (evpipe [0]);
fd_intern (evpipe [1]); fd_intern (evpipe [1]);
ev_io_set (&pipeev, evpipe [0], EV_READ); ev_io_set (&pipe_w, evpipe [0], EV_READ);
} }
ev_io_start (EV_A_ &pipeev); ev_io_start (EV_A_ &pipe_w);
ev_unref (EV_A); /* watcher should not keep loop alive */ ev_unref (EV_A); /* watcher should not keep loop alive */
} }
} }
void inline_size inline_size void
evpipe_write (EV_P_ EV_ATOMIC_T *flag) evpipe_write (EV_P_ EV_ATOMIC_T *flag)
{ {
if (!*flag) if (!*flag)
@ -1075,6 +1111,8 @@ evpipe_write (EV_P_ EV_ATOMIC_T *flag)
} }
} }
/* called whenever the libev signal pipe */
/* got some events (signal, async) */
static void static void
pipecb (EV_P_ ev_io *iow, int revents) pipecb (EV_P_ ev_io *iow, int revents)
{ {
@ -1166,7 +1204,8 @@ static ev_signal childev;
# define WIFCONTINUED(status) 0 # define WIFCONTINUED(status) 0
#endif #endif
void inline_speed /* handle a single child status event */
inline_speed void
child_reap (EV_P_ int chain, int pid, int status) child_reap (EV_P_ int chain, int pid, int status)
{ {
ev_child *w; ev_child *w;
@ -1189,6 +1228,7 @@ child_reap (EV_P_ int chain, int pid, int status)
# define WCONTINUED 0 # define WCONTINUED 0
#endif #endif
/* called on sigchld etc., calls waitpid */
static void static void
childcb (EV_P_ ev_signal *sw, int revents) childcb (EV_P_ ev_signal *sw, int revents)
{ {
@ -1323,6 +1363,7 @@ ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval)
timeout_blocktime = interval; timeout_blocktime = interval;
} }
/* initialise a loop structure, must be zero-initialised */
static void noinline static void noinline
loop_init (EV_P_ unsigned int flags) loop_init (EV_P_ unsigned int flags)
{ {
@ -1392,20 +1433,23 @@ loop_init (EV_P_ unsigned int flags)
if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init (EV_A_ flags); if (!backend && (flags & EVBACKEND_SELECT)) backend = select_init (EV_A_ flags);
#endif #endif
ev_init (&pipeev, pipecb); ev_prepare_init (&pending_w, pendingcb);
ev_set_priority (&pipeev, EV_MAXPRI);
ev_init (&pipe_w, pipecb);
ev_set_priority (&pipe_w, EV_MAXPRI);
} }
} }
/* free up a loop structure */
static void noinline static void noinline
loop_destroy (EV_P) loop_destroy (EV_P)
{ {
int i; int i;
if (ev_is_active (&pipeev)) if (ev_is_active (&pipe_w))
{ {
ev_ref (EV_A); /* signal watcher */ ev_ref (EV_A); /* signal watcher */
ev_io_stop (EV_A_ &pipeev); ev_io_stop (EV_A_ &pipe_w);
#if EV_USE_EVENTFD #if EV_USE_EVENTFD
if (evfd >= 0) if (evfd >= 0)
@ -1454,6 +1498,7 @@ loop_destroy (EV_P)
ev_free (anfds); anfdmax = 0; ev_free (anfds); anfdmax = 0;
/* have to use the microsoft-never-gets-it-right macro */ /* have to use the microsoft-never-gets-it-right macro */
array_free (rfeed, EMPTY);
array_free (fdchange, EMPTY); array_free (fdchange, EMPTY);
array_free (timer, EMPTY); array_free (timer, EMPTY);
#if EV_PERIODIC_ENABLE #if EV_PERIODIC_ENABLE
@ -1472,10 +1517,10 @@ loop_destroy (EV_P)
} }
#if EV_USE_INOTIFY #if EV_USE_INOTIFY
void inline_size infy_fork (EV_P); inline_size void infy_fork (EV_P);
#endif #endif
void inline_size inline_size void
loop_fork (EV_P) loop_fork (EV_P)
{ {
#if EV_USE_PORT #if EV_USE_PORT
@ -1491,7 +1536,7 @@ loop_fork (EV_P)
infy_fork (EV_A); infy_fork (EV_A);
#endif #endif
if (ev_is_active (&pipeev)) if (ev_is_active (&pipe_w))
{ {
/* this "locks" the handlers against writing to the pipe */ /* this "locks" the handlers against writing to the pipe */
/* while we modify the fd vars */ /* while we modify the fd vars */
@ -1501,7 +1546,7 @@ loop_fork (EV_P)
#endif #endif
ev_ref (EV_A); ev_ref (EV_A);
ev_io_stop (EV_A_ &pipeev); ev_io_stop (EV_A_ &pipe_w);
#if EV_USE_EVENTFD #if EV_USE_EVENTFD
if (evfd >= 0) if (evfd >= 0)
@ -1516,7 +1561,7 @@ loop_fork (EV_P)
evpipe_init (EV_A); evpipe_init (EV_A);
/* now iterate over everything, in case we missed something */ /* now iterate over everything, in case we missed something */
pipecb (EV_A_ &pipeev, EV_READ); pipecb (EV_A_ &pipe_w, EV_READ);
} }
postfork = 0; postfork = 0;
@ -1722,7 +1767,7 @@ ev_invoke (EV_P_ void *w, int revents)
EV_CB_INVOKE ((W)w, revents); EV_CB_INVOKE ((W)w, revents);
} }
void inline_speed inline_speed void
call_pending (EV_P) call_pending (EV_P)
{ {
int pri; int pri;
@ -1732,19 +1777,19 @@ call_pending (EV_P)
{ {
ANPENDING *p = pendings [pri] + --pendingcnt [pri]; ANPENDING *p = pendings [pri] + --pendingcnt [pri];
if (expect_true (p->w)) /*assert (("libev: non-pending watcher on pending list", p->w->pending));*/
{ /* ^ this is no longer true, as pending_w could be here */
/*assert (("libev: non-pending watcher on pending list", p->w->pending));*/
p->w->pending = 0; p->w->pending = 0;
EV_CB_INVOKE (p->w, p->events); EV_CB_INVOKE (p->w, p->events);
EV_FREQUENT_CHECK; EV_FREQUENT_CHECK;
}
} }
} }
#if EV_IDLE_ENABLE #if EV_IDLE_ENABLE
void inline_size /* make idle watchers pending. this handles the "call-idle */
/* only when higher priorities are idle" logic */
inline_size void
idle_reify (EV_P) idle_reify (EV_P)
{ {
if (expect_false (idleall)) if (expect_false (idleall))
@ -1766,86 +1811,104 @@ idle_reify (EV_P)
} }
#endif #endif
void inline_size /* make timers pending */
inline_size void
timers_reify (EV_P) timers_reify (EV_P)
{ {
EV_FREQUENT_CHECK; EV_FREQUENT_CHECK;
while (timercnt && ANHE_at (timers [HEAP0]) < mn_now) if (timercnt && ANHE_at (timers [HEAP0]) < mn_now)
{ {
ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]); do
{
ev_timer *w = (ev_timer *)ANHE_w (timers [HEAP0]);
/*assert (("libev: inactive timer on timer heap detected", ev_is_active (w)));*/ /*assert (("libev: inactive timer on timer heap detected", ev_is_active (w)));*/
/* first reschedule or stop timer */ /* first reschedule or stop timer */
if (w->repeat) if (w->repeat)
{ {
ev_at (w) += w->repeat; ev_at (w) += w->repeat;
if (ev_at (w) < mn_now) if (ev_at (w) < mn_now)
ev_at (w) = mn_now; ev_at (w) = mn_now;
assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.)); assert (("libev: negative ev_timer repeat value found while processing timers", w->repeat > 0.));
ANHE_at_cache (timers [HEAP0]); ANHE_at_cache (timers [HEAP0]);
downheap (timers, timercnt, HEAP0); downheap (timers, timercnt, HEAP0);
}
else
ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
EV_FREQUENT_CHECK;
feed_reverse (EV_A_ (W)w);
} }
else while (timercnt && ANHE_at (timers [HEAP0]) < mn_now);
ev_timer_stop (EV_A_ w); /* nonrepeating: stop timer */
EV_FREQUENT_CHECK; feed_reverse_done (EV_A_ EV_TIMEOUT);
ev_feed_event (EV_A_ (W)w, EV_TIMEOUT);
} }
} }
#if EV_PERIODIC_ENABLE #if EV_PERIODIC_ENABLE
void inline_size /* make periodics pending */
inline_size void
periodics_reify (EV_P) periodics_reify (EV_P)
{ {
EV_FREQUENT_CHECK; EV_FREQUENT_CHECK;
while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now) while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now)
{ {
ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]); int feed_count = 0;
/*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/ do
/* first reschedule or stop timer */
if (w->reschedule_cb)
{ {
ev_at (w) = w->reschedule_cb (w, ev_rt_now); ev_periodic *w = (ev_periodic *)ANHE_w (periodics [HEAP0]);
assert (("libev: ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now)); /*assert (("libev: inactive timer on periodic heap detected", ev_is_active (w)));*/
ANHE_at_cache (periodics [HEAP0]); /* first reschedule or stop timer */
downheap (periodics, periodiccnt, HEAP0); if (w->reschedule_cb)
}
else if (w->interval)
{
ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
/* if next trigger time is not sufficiently in the future, put it there */
/* this might happen because of floating point inexactness */
if (ev_at (w) - ev_rt_now < TIME_EPSILON)
{ {
ev_at (w) += w->interval; ev_at (w) = w->reschedule_cb (w, ev_rt_now);
/* if interval is unreasonably low we might still have a time in the past */ assert (("libev: ev_periodic reschedule callback returned time in the past", ev_at (w) >= ev_rt_now));
/* so correct this. this will make the periodic very inexact, but the user */
/* has effectively asked to get triggered more often than possible */ ANHE_at_cache (periodics [HEAP0]);
if (ev_at (w) < ev_rt_now) downheap (periodics, periodiccnt, HEAP0);
ev_at (w) = ev_rt_now;
} }
else if (w->interval)
{
ev_at (w) = w->offset + ceil ((ev_rt_now - w->offset) / w->interval) * w->interval;
/* if next trigger time is not sufficiently in the future, put it there */
/* this might happen because of floating point inexactness */
if (ev_at (w) - ev_rt_now < TIME_EPSILON)
{
ev_at (w) += w->interval;
/* if interval is unreasonably low we might still have a time in the past */
/* so correct this. this will make the periodic very inexact, but the user */
/* has effectively asked to get triggered more often than possible */
if (ev_at (w) < ev_rt_now)
ev_at (w) = ev_rt_now;
}
ANHE_at_cache (periodics [HEAP0]); ANHE_at_cache (periodics [HEAP0]);
downheap (periodics, periodiccnt, HEAP0); downheap (periodics, periodiccnt, HEAP0);
}
else
ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
EV_FREQUENT_CHECK;
feed_reverse (EV_A_ (W)w);
} }
else while (periodiccnt && ANHE_at (periodics [HEAP0]) < ev_rt_now);
ev_periodic_stop (EV_A_ w); /* nonrepeating: stop timer */
EV_FREQUENT_CHECK; feed_reverse_done (EV_A_ EV_PERIODIC);
ev_feed_event (EV_A_ (W)w, EV_PERIODIC);
} }
} }
/* simply recalculate all periodics */
/* TODO: maybe ensure that at leats one event happens when jumping forward? */
static void noinline static void noinline
periodics_reschedule (EV_P) periodics_reschedule (EV_P)
{ {
@ -1868,7 +1931,23 @@ periodics_reschedule (EV_P)
} }
#endif #endif
void inline_speed /* adjust all timers by a given offset */
static void noinline
timers_reschedule (EV_P_ ev_tstamp adjust)
{
int i;
for (i = 0; i < timercnt; ++i)
{
ANHE *he = timers + i + HEAP0;
ANHE_w (*he)->at += adjust;
ANHE_at_cache (*he);
}
}
/* fetch new monotonic and realtime times from the kernel */
/* also detetc if there was a timejump, and act accordingly */
inline_speed void
time_update (EV_P_ ev_tstamp max_block) time_update (EV_P_ ev_tstamp max_block)
{ {
int i; int i;
@ -1911,11 +1990,11 @@ time_update (EV_P_ ev_tstamp max_block)
now_floor = mn_now; now_floor = mn_now;
} }
/* no timer adjustment, as the monotonic clock doesn't jump */
/* timers_reschedule (EV_A_ rtmn_diff - odiff) */
# if EV_PERIODIC_ENABLE # if EV_PERIODIC_ENABLE
periodics_reschedule (EV_A); periodics_reschedule (EV_A);
# endif # endif
/* no timer adjustment, as the monotonic clock doesn't jump */
/* timers_reschedule (EV_A_ rtmn_diff - odiff) */
} }
else else
#endif #endif
@ -1924,40 +2003,17 @@ time_update (EV_P_ ev_tstamp max_block)
if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP)) if (expect_false (mn_now > ev_rt_now || ev_rt_now > mn_now + max_block + MIN_TIMEJUMP))
{ {
/* adjust timers. this is easy, as the offset is the same for all of them */
timers_reschedule (EV_A_ ev_rt_now - mn_now);
#if EV_PERIODIC_ENABLE #if EV_PERIODIC_ENABLE
periodics_reschedule (EV_A); periodics_reschedule (EV_A);
#endif #endif
/* adjust timers. this is easy, as the offset is the same for all of them */
for (i = 0; i < timercnt; ++i)
{
ANHE *he = timers + i + HEAP0;
ANHE_w (*he)->at += ev_rt_now - mn_now;
ANHE_at_cache (*he);
}
} }
mn_now = ev_rt_now; mn_now = ev_rt_now;
} }
} }
void
ev_ref (EV_P)
{
++activecnt;
}
void
ev_unref (EV_P)
{
--activecnt;
}
void
ev_now_update (EV_P)
{
time_update (EV_A_ 1e100);
}
static int loop_done; static int loop_done;
void void
@ -1999,9 +2055,6 @@ ev_loop (EV_P_ int flags)
call_pending (EV_A); call_pending (EV_A);
} }
if (expect_false (!activecnt))
break;
/* we might have forked, so reify kernel state if necessary */ /* we might have forked, so reify kernel state if necessary */
if (expect_false (postfork)) if (expect_false (postfork))
loop_fork (EV_A); loop_fork (EV_A);
@ -2090,16 +2143,54 @@ ev_unloop (EV_P_ int how)
loop_done = how; loop_done = how;
} }
void
ev_ref (EV_P)
{
++activecnt;
}
void
ev_unref (EV_P)
{
--activecnt;
}
void
ev_now_update (EV_P)
{
time_update (EV_A_ 1e100);
}
void
ev_suspend (EV_P)
{
ev_now_update (EV_A);
}
void
ev_resume (EV_P)
{
ev_tstamp mn_prev = mn_now;
ev_now_update (EV_A);
timers_reschedule (EV_A_ mn_now - mn_prev);
#if EV_PERIODIC_ENABLE
/* TODO: really do this? */
periodics_reschedule (EV_A);
#endif
}
/*****************************************************************************/ /*****************************************************************************/
/* singly-linked list management, used when the expected list length is short */
void inline_size inline_size void
wlist_add (WL *head, WL elem) wlist_add (WL *head, WL elem)
{ {
elem->next = *head; elem->next = *head;
*head = elem; *head = elem;
} }
void inline_size inline_size void
wlist_del (WL *head, WL elem) wlist_del (WL *head, WL elem)
{ {
while (*head) while (*head)
@ -2114,12 +2205,13 @@ wlist_del (WL *head, WL elem)
} }
} }
void inline_speed /* internal, faster, version of ev_clear_pending */
inline_speed void
clear_pending (EV_P_ W w) clear_pending (EV_P_ W w)
{ {
if (w->pending) if (w->pending)
{ {
pendings [ABSPRI (w)][w->pending - 1].w = 0; pendings [ABSPRI (w)][w->pending - 1].w = (W)&pending_w;
w->pending = 0; w->pending = 0;
} }
} }
@ -2133,15 +2225,15 @@ ev_clear_pending (EV_P_ void *w)
if (expect_true (pending)) if (expect_true (pending))
{ {
ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1; ANPENDING *p = pendings [ABSPRI (w_)] + pending - 1;
p->w = (W)&pending_w;
w_->pending = 0; w_->pending = 0;
p->w = 0;
return p->events; return p->events;
} }
else else
return 0; return 0;
} }
void inline_size inline_size void
pri_adjust (EV_P_ W w) pri_adjust (EV_P_ W w)
{ {
int pri = w->priority; int pri = w->priority;
@ -2150,7 +2242,7 @@ pri_adjust (EV_P_ W w)
w->priority = pri; w->priority = pri;
} }
void inline_speed inline_speed void
ev_start (EV_P_ W w, int active) ev_start (EV_P_ W w, int active)
{ {
pri_adjust (EV_A_ w); pri_adjust (EV_A_ w);
@ -2158,7 +2250,7 @@ ev_start (EV_P_ W w, int active)
ev_ref (EV_A); ev_ref (EV_A);
} }
void inline_size inline_size void
ev_stop (EV_P_ W w) ev_stop (EV_P_ W w)
{ {
ev_unref (EV_A); ev_unref (EV_A);
@ -2176,7 +2268,7 @@ ev_io_start (EV_P_ ev_io *w)
return; return;
assert (("libev: ev_io_start called with negative fd", fd >= 0)); assert (("libev: ev_io_start called with negative fd", fd >= 0));
assert (("libev: ev_io start called with illegal event mask", !(w->events & ~(EV_IOFDSET | EV_READ | EV_WRITE)))); assert (("libev: ev_io start called with illegal event mask", !(w->events & ~(EV__IOFDSET | EV_READ | EV_WRITE))));
EV_FREQUENT_CHECK; EV_FREQUENT_CHECK;
@ -2184,8 +2276,8 @@ ev_io_start (EV_P_ ev_io *w)
array_needsize (ANFD, anfds, anfdmax, fd + 1, array_init_zero); array_needsize (ANFD, anfds, anfdmax, fd + 1, array_init_zero);
wlist_add (&anfds[fd].head, (WL)w); wlist_add (&anfds[fd].head, (WL)w);
fd_change (EV_A_ fd, w->events & EV_IOFDSET | 1); fd_change (EV_A_ fd, w->events & EV__IOFDSET | 1);
w->events &= ~EV_IOFDSET; w->events &= ~EV__IOFDSET;
EV_FREQUENT_CHECK; EV_FREQUENT_CHECK;
} }
@ -2593,7 +2685,7 @@ infy_cb (EV_P_ ev_io *w, int revents)
infy_wd (EV_A_ ev->wd, ev->wd, ev); infy_wd (EV_A_ ev->wd, ev->wd, ev);
} }
void inline_size inline_size void
check_2625 (EV_P) check_2625 (EV_P)
{ {
/* kernels < 2.6.25 are borked /* kernels < 2.6.25 are borked
@ -2616,7 +2708,7 @@ check_2625 (EV_P)
fs_2625 = 1; fs_2625 = 1;
} }
void inline_size inline_size void
infy_init (EV_P) infy_init (EV_P)
{ {
if (fs_fd != -2) if (fs_fd != -2)
@ -2636,7 +2728,7 @@ infy_init (EV_P)
} }
} }
void inline_size inline_size void
infy_fork (EV_P) infy_fork (EV_P)
{ {
int slot; int slot;
@ -3148,6 +3240,114 @@ ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, vo
} }
} }
/*****************************************************************************/
#if EV_WALK_ENABLE
void
ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w))
{
int i, j;
ev_watcher_list *wl, *wn;
if (types & (EV_IO | EV_EMBED))
for (i = 0; i < anfdmax; ++i)
for (wl = anfds [i].head; wl; )
{
wn = wl->next;
#if EV_EMBED_ENABLE
if (ev_cb ((ev_io *)wl) == embed_io_cb)
{
if (types & EV_EMBED)
cb (EV_A_ EV_EMBED, ((char *)wl) - offsetof (struct ev_embed, io));
}
else
#endif
#if EV_USE_INOTIFY
if (ev_cb ((ev_io *)wl) == infy_cb)
;
else
#endif
if ((ev_io *)wl != &pipe_w)
if (types & EV_IO)
cb (EV_A_ EV_IO, wl);
wl = wn;
}
if (types & (EV_TIMER | EV_STAT))
for (i = timercnt + HEAP0; i-- > HEAP0; )
#if EV_STAT_ENABLE
/*TODO: timer is not always active*/
if (ev_cb ((ev_timer *)ANHE_w (timers [i])) == stat_timer_cb)
{
if (types & EV_STAT)
cb (EV_A_ EV_STAT, ((char *)ANHE_w (timers [i])) - offsetof (struct ev_stat, timer));
}
else
#endif
if (types & EV_TIMER)
cb (EV_A_ EV_TIMER, ANHE_w (timers [i]));
#if EV_PERIODIC_ENABLE
if (types & EV_PERIODIC)
for (i = periodiccnt + HEAP0; i-- > HEAP0; )
cb (EV_A_ EV_PERIODIC, ANHE_w (periodics [i]));
#endif
#if EV_IDLE_ENABLE
if (types & EV_IDLE)
for (j = NUMPRI; i--; )
for (i = idlecnt [j]; i--; )
cb (EV_A_ EV_IDLE, idles [j][i]);
#endif
#if EV_FORK_ENABLE
if (types & EV_FORK)
for (i = forkcnt; i--; )
if (ev_cb (forks [i]) != embed_fork_cb)
cb (EV_A_ EV_FORK, forks [i]);
#endif
#if EV_ASYNC_ENABLE
if (types & EV_ASYNC)
for (i = asynccnt; i--; )
cb (EV_A_ EV_ASYNC, asyncs [i]);
#endif
if (types & EV_PREPARE)
for (i = preparecnt; i--; )
#if EV_EMBED_ENABLE
if (ev_cb (prepares [i]) != embed_prepare_cb)
#endif
cb (EV_A_ EV_PREPARE, prepares [i]);
if (types & EV_CHECK)
for (i = checkcnt; i--; )
cb (EV_A_ EV_CHECK, checks [i]);
if (types & EV_SIGNAL)
for (i = 0; i < signalmax; ++i)
for (wl = signals [i].head; wl; )
{
wn = wl->next;
cb (EV_A_ EV_SIGNAL, wl);
wl = wn;
}
if (types & EV_CHILD)
for (i = EV_PID_HASHSIZE; i--; )
for (wl = childs [i]; wl; )
{
wn = wl->next;
cb (EV_A_ EV_CHILD, wl);
wl = wn;
}
/* EV_STAT 0x00001000 /* stat data changed */
/* EV_EMBED 0x00010000 /* embedded event loop needs sweep */
}
#endif
#if EV_MULTIPLICITY #if EV_MULTIPLICITY
#include "ev_wrap.h" #include "ev_wrap.h"
#endif #endif

98
deps/libev/ev.h

@ -82,6 +82,10 @@ typedef double ev_tstamp;
# define EV_ASYNC_ENABLE 1 # define EV_ASYNC_ENABLE 1
#endif #endif
#ifndef EV_WALK_ENABLE
# define EV_WALK_ENABLE 0 /* not yet */
#endif
#ifndef EV_ATOMIC_T #ifndef EV_ATOMIC_T
# include <signal.h> # include <signal.h>
# define EV_ATOMIC_T sig_atomic_t volatile # define EV_ATOMIC_T sig_atomic_t volatile
@ -133,8 +137,10 @@ struct ev_loop;
#define EV_NONE 0x00 /* no events */ #define EV_NONE 0x00 /* no events */
#define EV_READ 0x01 /* ev_io detected read will not block */ #define EV_READ 0x01 /* ev_io detected read will not block */
#define EV_WRITE 0x02 /* ev_io detected write will not block */ #define EV_WRITE 0x02 /* ev_io detected write will not block */
#define EV_IOFDSET 0x80 /* internal use only */ #define EV__IOFDSET 0x80 /* internal use only */
#define EV_IO EV_READ /* alias for type-detection */
#define EV_TIMEOUT 0x00000100 /* timer timed out */ #define EV_TIMEOUT 0x00000100 /* timer timed out */
#define EV_TIMER EV_TIMEOUT /* alias for type-detection */
#define EV_PERIODIC 0x00000200 /* periodic timer timed out */ #define EV_PERIODIC 0x00000200 /* periodic timer timed out */
#define EV_SIGNAL 0x00000400 /* signal was received */ #define EV_SIGNAL 0x00000400 /* signal was received */
#define EV_CHILD 0x00000800 /* child/pid had status change */ #define EV_CHILD 0x00000800 /* child/pid had status change */
@ -145,6 +151,7 @@ struct ev_loop;
#define EV_EMBED 0x00010000 /* embedded event loop needs sweep */ #define EV_EMBED 0x00010000 /* embedded event loop needs sweep */
#define EV_FORK 0x00020000 /* event loop resumed in child */ #define EV_FORK 0x00020000 /* event loop resumed in child */
#define EV_ASYNC 0x00040000 /* async intra-loop signal */ #define EV_ASYNC 0x00040000 /* async intra-loop signal */
#define EV_CUSTOM 0x01000000 /* for use by user code */
#define EV_ERROR 0x80000000 /* sent when an error occurs */ #define EV_ERROR 0x80000000 /* sent when an error occurs */
/* can be used to add custom fields to all watchers, while losing binary compatibility */ /* can be used to add custom fields to all watchers, while losing binary compatibility */
@ -491,6 +498,13 @@ void ev_default_destroy (void); /* destroy the default loop */
/* you can actually call it at any time, anywhere :) */ /* you can actually call it at any time, anywhere :) */
void ev_default_fork (void); void ev_default_fork (void);
#if EV_WALK_ENABLE
/* walk (almost) all watchers in the loop of a given type, invoking the */
/* callback on every such watcher. The callback might stop the watcher, */
/* but do nothing else with the loop */
void ev_walk (EV_P_ int types, void (*cb)(EV_P_ int type, void *w));
#endif
unsigned int ev_backend (EV_P); /* backend in use by loop */ unsigned int ev_backend (EV_P); /* backend in use by loop */
unsigned int ev_loop_count (EV_P); /* number of loop iterations */ unsigned int ev_loop_count (EV_P); /* number of loop iterations */
#endif /* prototypes */ #endif /* prototypes */
@ -516,8 +530,16 @@ void ev_set_timeout_collect_interval (EV_P_ ev_tstamp interval); /* sleep at lea
void ev_ref (EV_P); void ev_ref (EV_P);
void ev_unref (EV_P); void ev_unref (EV_P);
/* convenience function, wait for a single event, without registering an event watcher */ /*
/* if timeout is < 0, do wait indefinitely */ * stop/start the timer handling.
*/
void ev_suspend (EV_P);
void ev_resume (EV_P);
/*
* convenience function, wait for a single event, without registering an event watcher
* if timeout is < 0, do wait indefinitely
*/
void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg); void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revents, void *arg), void *arg);
#endif #endif
@ -530,43 +552,43 @@ void ev_once (EV_P_ int fd, int events, ev_tstamp timeout, void (*cb)(int revent
ev_set_cb ((ev), cb_); \ ev_set_cb ((ev), cb_); \
} while (0) } while (0)
#define ev_io_set(ev,fd_,events_) do { (ev)->fd = (fd_); (ev)->events = (events_) | EV_IOFDSET; } while (0) #define ev_io_set(ev,fd_,events_) do { (ev)->fd = (fd_); (ev)->events = (events_) | EV__IOFDSET; } while (0)
#define ev_timer_set(ev,after_,repeat_) do { ((ev_watcher_time *)(ev))->at = (after_); (ev)->repeat = (repeat_); } while (0) #define ev_timer_set(ev,after_,repeat_) do { ((ev_watcher_time *)(ev))->at = (after_); (ev)->repeat = (repeat_); } while (0)
#define ev_periodic_set(ev,ofs_,ival_,res_) do { (ev)->offset = (ofs_); (ev)->interval = (ival_); (ev)->reschedule_cb= (res_); } while (0) #define ev_periodic_set(ev,ofs_,ival_,rcb_) do { (ev)->offset = (ofs_); (ev)->interval = (ival_); (ev)->reschedule_cb = (rcb_); } while (0)
#define ev_signal_set(ev,signum_) do { (ev)->signum = (signum_); } while (0) #define ev_signal_set(ev,signum_) do { (ev)->signum = (signum_); } while (0)
#define ev_child_set(ev,pid_,trace_) do { (ev)->pid = (pid_); (ev)->flags = !!(trace_); } while (0) #define ev_child_set(ev,pid_,trace_) do { (ev)->pid = (pid_); (ev)->flags = !!(trace_); } while (0)
#define ev_stat_set(ev,path_,interval_) do { (ev)->path = (path_); (ev)->interval = (interval_); (ev)->wd = -2; } while (0) #define ev_stat_set(ev,path_,interval_) do { (ev)->path = (path_); (ev)->interval = (interval_); (ev)->wd = -2; } while (0)
#define ev_idle_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_idle_set(ev) /* nop, yes, this is a serious in-joke */
#define ev_prepare_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_prepare_set(ev) /* nop, yes, this is a serious in-joke */
#define ev_check_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_check_set(ev) /* nop, yes, this is a serious in-joke */
#define ev_embed_set(ev,other_) do { (ev)->other = (other_); } while (0) #define ev_embed_set(ev,other_) do { (ev)->other = (other_); } while (0)
#define ev_fork_set(ev) /* nop, yes, this is a serious in-joke */ #define ev_fork_set(ev) /* nop, yes, this is a serious in-joke */
#define ev_async_set(ev) do { (ev)->sent = 0; } while (0) #define ev_async_set(ev) do { (ev)->sent = 0; } while (0)
#define ev_io_init(ev,cb,fd,events) do { ev_init ((ev), (cb)); ev_io_set ((ev),(fd),(events)); } while (0) #define ev_io_init(ev,cb,fd,events) do { ev_init ((ev), (cb)); ev_io_set ((ev),(fd),(events)); } while (0)
#define ev_timer_init(ev,cb,after,repeat) do { ev_init ((ev), (cb)); ev_timer_set ((ev),(after),(repeat)); } while (0) #define ev_timer_init(ev,cb,after,repeat) do { ev_init ((ev), (cb)); ev_timer_set ((ev),(after),(repeat)); } while (0)
#define ev_periodic_init(ev,cb,at,ival,res) do { ev_init ((ev), (cb)); ev_periodic_set ((ev),(at),(ival),(res)); } while (0) #define ev_periodic_init(ev,cb,ofs,ival,rcb) do { ev_init ((ev), (cb)); ev_periodic_set ((ev),(ofs),(ival),(rcb)); } while (0)
#define ev_signal_init(ev,cb,signum) do { ev_init ((ev), (cb)); ev_signal_set ((ev), (signum)); } while (0) #define ev_signal_init(ev,cb,signum) do { ev_init ((ev), (cb)); ev_signal_set ((ev), (signum)); } while (0)
#define ev_child_init(ev,cb,pid,trace) do { ev_init ((ev), (cb)); ev_child_set ((ev),(pid),(trace)); } while (0) #define ev_child_init(ev,cb,pid,trace) do { ev_init ((ev), (cb)); ev_child_set ((ev),(pid),(trace)); } while (0)
#define ev_stat_init(ev,cb,path,interval) do { ev_init ((ev), (cb)); ev_stat_set ((ev),(path),(interval)); } while (0) #define ev_stat_init(ev,cb,path,interval) do { ev_init ((ev), (cb)); ev_stat_set ((ev),(path),(interval)); } while (0)
#define ev_idle_init(ev,cb) do { ev_init ((ev), (cb)); ev_idle_set ((ev)); } while (0) #define ev_idle_init(ev,cb) do { ev_init ((ev), (cb)); ev_idle_set ((ev)); } while (0)
#define ev_prepare_init(ev,cb) do { ev_init ((ev), (cb)); ev_prepare_set ((ev)); } while (0) #define ev_prepare_init(ev,cb) do { ev_init ((ev), (cb)); ev_prepare_set ((ev)); } while (0)
#define ev_check_init(ev,cb) do { ev_init ((ev), (cb)); ev_check_set ((ev)); } while (0) #define ev_check_init(ev,cb) do { ev_init ((ev), (cb)); ev_check_set ((ev)); } while (0)
#define ev_embed_init(ev,cb,other) do { ev_init ((ev), (cb)); ev_embed_set ((ev),(other)); } while (0) #define ev_embed_init(ev,cb,other) do { ev_init ((ev), (cb)); ev_embed_set ((ev),(other)); } while (0)
#define ev_fork_init(ev,cb) do { ev_init ((ev), (cb)); ev_fork_set ((ev)); } while (0) #define ev_fork_init(ev,cb) do { ev_init ((ev), (cb)); ev_fork_set ((ev)); } while (0)
#define ev_async_init(ev,cb) do { ev_init ((ev), (cb)); ev_async_set ((ev)); } while (0) #define ev_async_init(ev,cb) do { ev_init ((ev), (cb)); ev_async_set ((ev)); } while (0)
#define ev_is_pending(ev) (0 + ((ev_watcher *)(void *)(ev))->pending) /* ro, true when watcher is waiting for callback invocation */ #define ev_is_pending(ev) (0 + ((ev_watcher *)(void *)(ev))->pending) /* ro, true when watcher is waiting for callback invocation */
#define ev_is_active(ev) (0 + ((ev_watcher *)(void *)(ev))->active) /* ro, true when the watcher has been started */ #define ev_is_active(ev) (0 + ((ev_watcher *)(void *)(ev))->active) /* ro, true when the watcher has been started */
#define ev_priority(ev) ((((ev_watcher *)(void *)(ev))->priority) + 0) #define ev_priority(ev) ((((ev_watcher *)(void *)(ev))->priority) + 0)
#define ev_cb(ev) (ev)->cb /* rw */ #define ev_cb(ev) (ev)->cb /* rw */
#define ev_set_priority(ev,pri) ((ev_watcher *)(void *)(ev))->priority = (pri) #define ev_set_priority(ev,pri) ((ev_watcher *)(void *)(ev))->priority = (pri)
#define ev_periodic_at(ev) (((ev_watcher_time *)(ev))->at + 0.) #define ev_periodic_at(ev) (((ev_watcher_time *)(ev))->at + 0.)
#ifndef ev_set_cb #ifndef ev_set_cb
# define ev_set_cb(ev,cb_) ev_cb (ev) = (cb_) # define ev_set_cb(ev,cb_) ev_cb (ev) = (cb_)
#endif #endif
/* stopping (enabling, adding) a watcher does nothing if it is already running */ /* stopping (enabling, adding) a watcher does nothing if it is already running */

458
deps/libev/ev.pod

@ -64,12 +64,24 @@ libev - a high performance full-featured event loop written in C
return 0; return 0;
} }
=head1 DESCRIPTION =head1 ABOUT THIS DOCUMENT
This document documents the libev software package.
The newest version of this document is also available as an html-formatted The newest version of this document is also available as an html-formatted
web page you might find easier to navigate when reading it for the first web page you might find easier to navigate when reading it for the first
time: L<http://pod.tst.eu/http://cvs.schmorp.de/libev/ev.pod>. time: L<http://pod.tst.eu/http://cvs.schmorp.de/libev/ev.pod>.
While this document tries to be as complete as possible in documenting
libev, its usage and the rationale behind its design, it is not a tutorial
on event-based programming, nor will it introduce event-based programming
with libev.
Familarity with event based programming techniques in general is assumed
throughout this document.
=head1 ABOUT LIBEV
Libev is an event loop: you register interest in certain events (such as a Libev is an event loop: you register interest in certain events (such as a
file descriptor being readable or a timeout occurring), and it will manage file descriptor being readable or a timeout occurring), and it will manage
these event sources and provide your program with events. these event sources and provide your program with events.
@ -112,12 +124,12 @@ this argument.
=head2 TIME REPRESENTATION =head2 TIME REPRESENTATION
Libev represents time as a single floating point number, representing the Libev represents time as a single floating point number, representing
(fractional) number of seconds since the (POSIX) epoch (somewhere near the (fractional) number of seconds since the (POSIX) epoch (somewhere
the beginning of 1970, details are complicated, don't ask). This type is near the beginning of 1970, details are complicated, don't ask). This
called C<ev_tstamp>, which is what you should use too. It usually aliases type is called C<ev_tstamp>, which is what you should use too. It usually
to the C<double> type in C, and when you need to do any calculations on aliases to the C<double> type in C. When you need to do any calculations
it, you should treat it as some floating point value. Unlike the name on it, you should treat it as some floating point value. Unlike the name
component C<stamp> might indicate, it is also used for time differences component C<stamp> might indicate, it is also used for time differences
throughout libev. throughout libev.
@ -634,7 +646,33 @@ This function is rarely useful, but when some event callback runs for a
very long time without entering the event loop, updating libev's idea of very long time without entering the event loop, updating libev's idea of
the current time is a good idea. the current time is a good idea.
See also "The special problem of time updates" in the C<ev_timer> section. See also L<The special problem of time updates> in the C<ev_timer> section.
=item ev_suspend (loop)
=item ev_resume (loop)
These two functions suspend and resume a loop, for use when the loop is
not used for a while and timeouts should not be processed.
A typical use case would be an interactive program such as a game: When
the user presses C<^Z> to suspend the game and resumes it an hour later it
would be best to handle timeouts as if no time had actually passed while
the program was suspended. This can be achieved by calling C<ev_suspend>
in your C<SIGTSTP> handler, sending yourself a C<SIGSTOP> and calling
C<ev_resume> directly afterwards to resume timer processing.
Effectively, all C<ev_timer> watchers will be delayed by the time spend
between C<ev_suspend> and C<ev_resume>, and all C<ev_periodic> watchers
will be rescheduled (that is, they will lose any events that would have
occured while suspended).
After calling C<ev_suspend> you B<must not> call I<any> function on the
given loop other than C<ev_resume>, and you B<must not> call C<ev_resume>
without a previous call to C<ev_suspend>.
Calling C<ev_suspend>/C<ev_resume> has the side effect of updating the
event loop time (see C<ev_now_update>).
=item ev_loop (loop, int flags) =item ev_loop (loop, int flags)
@ -728,13 +766,15 @@ If you have a watcher you never unregister that should not keep C<ev_loop>
from returning, call ev_unref() after starting, and ev_ref() before from returning, call ev_unref() after starting, and ev_ref() before
stopping it. stopping it.
As an example, libev itself uses this for its internal signal pipe: It is As an example, libev itself uses this for its internal signal pipe: It
not visible to the libev user and should not keep C<ev_loop> from exiting is not visible to the libev user and should not keep C<ev_loop> from
if no event watchers registered by it are active. It is also an excellent exiting if no event watchers registered by it are active. It is also an
way to do this for generic recurring timers or from within third-party excellent way to do this for generic recurring timers or from within
libraries. Just remember to I<unref after start> and I<ref before stop> third-party libraries. Just remember to I<unref after start> and I<ref
(but only if the watcher wasn't active before, or was active before, before stop> (but only if the watcher wasn't active before, or was active
respectively). before, respectively. Note also that libev might stop watchers itself
(e.g. non-repeating timers) in which case you have to C<ev_ref>
in the callback).
Example: Create a signal watcher, but keep it from keeping C<ev_loop> Example: Create a signal watcher, but keep it from keeping C<ev_loop>
running when nothing else is active. running when nothing else is active.
@ -928,6 +968,11 @@ C<ev_fork>).
The given async watcher has been asynchronously notified (see C<ev_async>). The given async watcher has been asynchronously notified (see C<ev_async>).
=item C<EV_CUSTOM>
Not ever sent (or otherwise used) by libev itself, but can be freely used
by libev users to signal watchers (e.g. via C<ev_feed_event>).
=item C<EV_ERROR> =item C<EV_ERROR>
An unspecified error has occurred, the watcher has been stopped. This might An unspecified error has occurred, the watcher has been stopped. This might
@ -1052,24 +1097,22 @@ integer between C<EV_MAXPRI> (default: C<2>) and C<EV_MINPRI>
before watchers with lower priority, but priority will not keep watchers before watchers with lower priority, but priority will not keep watchers
from being executed (except for C<ev_idle> watchers). from being executed (except for C<ev_idle> watchers).
This means that priorities are I<only> used for ordering callback
invocation after new events have been received. This is useful, for
example, to reduce latency after idling, or more often, to bind two
watchers on the same event and make sure one is called first.
If you need to suppress invocation when higher priority events are pending If you need to suppress invocation when higher priority events are pending
you need to look at C<ev_idle> watchers, which provide this functionality. you need to look at C<ev_idle> watchers, which provide this functionality.
You I<must not> change the priority of a watcher as long as it is active or You I<must not> change the priority of a watcher as long as it is active or
pending. pending.
The default priority used by watchers when no priority has been set is
always C<0>, which is supposed to not be too high and not be too low :).
Setting a priority outside the range of C<EV_MINPRI> to C<EV_MAXPRI> is Setting a priority outside the range of C<EV_MINPRI> to C<EV_MAXPRI> is
fine, as long as you do not mind that the priority value you query might fine, as long as you do not mind that the priority value you query might
or might not have been clamped to the valid range. or might not have been clamped to the valid range.
The default priority used by watchers when no priority has been set is
always C<0>, which is supposed to not be too high and not be too low :).
See L<WATCHER PRIORITY MODELS>, below, for a more thorough treatment of
priorities.
=item ev_invoke (loop, ev_TYPE *watcher, int revents) =item ev_invoke (loop, ev_TYPE *watcher, int revents)
Invoke the C<watcher> with the given C<loop> and C<revents>. Neither Invoke the C<watcher> with the given C<loop> and C<revents>. Neither
@ -1154,6 +1197,109 @@ programmers):
(((char *)w) - offsetof (struct my_biggy, t2)); (((char *)w) - offsetof (struct my_biggy, t2));
} }
=head2 WATCHER PRIORITY MODELS
Many event loops support I<watcher priorities>, which are usually small
integers that influence the ordering of event callback invocation
between watchers in some way, all else being equal.
In libev, Watcher priorities can be set using C<ev_set_priority>. See its
description for the more technical details such as the actual priority
range.
There are two common ways how these these priorities are being interpreted
by event loops:
In the more common lock-out model, higher priorities "lock out" invocation
of lower priority watchers, which means as long as higher priority
watchers receive events, lower priority watchers are not being invoked.
The less common only-for-ordering model uses priorities solely to order
callback invocation within a single event loop iteration: Higher priority
watchers are invoked before lower priority ones, but they all get invoked
before polling for new events.
Libev uses the second (only-for-ordering) model for all its watchers
except for idle watchers (which use the lock-out model).
The rationale behind this is that implementing the lock-out model for
watchers is not well supported by most kernel interfaces, and most event
libraries will just poll for the same events again and again as long as
their callbacks have not been executed, which is very inefficient in the
common case of one high-priority watcher locking out a mass of lower
priority ones.
Static (ordering) priorities are most useful when you have two or more
watchers handling the same resource: a typical usage example is having an
C<ev_io> watcher to receive data, and an associated C<ev_timer> to handle
timeouts. Under load, data might be received while the program handles
other jobs, but since timers normally get invoked first, the timeout
handler will be executed before checking for data. In that case, giving
the timer a lower priority than the I/O watcher ensures that I/O will be
handled first even under adverse conditions (which is usually, but not
always, what you want).
Since idle watchers use the "lock-out" model, meaning that idle watchers
will only be executed when no same or higher priority watchers have
received events, they can be used to implement the "lock-out" model when
required.
For example, to emulate how many other event libraries handle priorities,
you can associate an C<ev_idle> watcher to each such watcher, and in
the normal watcher callback, you just start the idle watcher. The real
processing is done in the idle watcher callback. This causes libev to
continously poll and process kernel event data for the watcher, but when
the lock-out case is known to be rare (which in turn is rare :), this is
workable.
Usually, however, the lock-out model implemented that way will perform
miserably under the type of load it was designed to handle. In that case,
it might be preferable to stop the real watcher before starting the
idle watcher, so the kernel will not have to process the event in case
the actual processing will be delayed for considerable time.
Here is an example of an I/O watcher that should run at a strictly lower
priority than the default, and which should only process data when no
other events are pending:
ev_idle idle; // actual processing watcher
ev_io io; // actual event watcher
static void
io_cb (EV_P_ ev_io *w, int revents)
{
// stop the I/O watcher, we received the event, but
// are not yet ready to handle it.
ev_io_stop (EV_A_ w);
// start the idle watcher to ahndle the actual event.
// it will not be executed as long as other watchers
// with the default priority are receiving events.
ev_idle_start (EV_A_ &idle);
}
static void
idle-cb (EV_P_ ev_idle *w, int revents)
{
// actual processing
read (STDIN_FILENO, ...);
// have to start the I/O watcher again, as
// we have handled the event
ev_io_start (EV_P_ &io);
}
// initialisation
ev_idle_init (&idle, idle_cb);
ev_io_init (&io, io_cb, STDIN_FILENO, EV_READ);
ev_io_start (EV_DEFAULT_ &io);
In the "real" world, it might also be beneficial to start a timer, so that
low-priority connections can not be locked out forever under load. This
enables your program to keep a lower latency for important connections
during short periods of high load, while not completely locking out less
important ones.
=head1 WATCHER TYPES =head1 WATCHER TYPES
@ -1188,7 +1334,9 @@ required if you know what you are doing).
If you cannot use non-blocking mode, then force the use of a If you cannot use non-blocking mode, then force the use of a
known-to-be-good backend (at the time of this writing, this includes only known-to-be-good backend (at the time of this writing, this includes only
C<EVBACKEND_SELECT> and C<EVBACKEND_POLL>). C<EVBACKEND_SELECT> and C<EVBACKEND_POLL>). The same applies to file
descriptors for which non-blocking operation makes no sense (such as
files) - libev doesn't guarentee any specific behaviour in that case.
Another thing you have to watch out for is that it is quite easy to Another thing you have to watch out for is that it is quite easy to
receive "spurious" readiness notifications, that is your callback might receive "spurious" readiness notifications, that is your callback might
@ -1319,8 +1467,11 @@ detecting time jumps is hard, and some inaccuracies are unavoidable (the
monotonic clock option helps a lot here). monotonic clock option helps a lot here).
The callback is guaranteed to be invoked only I<after> its timeout has The callback is guaranteed to be invoked only I<after> its timeout has
passed, but if multiple timers become ready during the same loop iteration passed (not I<at>, so on systems with very low-resolution clocks this
then order of execution is undefined. might introduce a small delay). If multiple timers become ready during the
same loop iteration then the ones with earlier time-out values are invoked
before ones with later time-out values (but this is no longer true when a
callback calls C<ev_loop> recursively).
=head3 Be smart about timeouts =head3 Be smart about timeouts
@ -1549,7 +1700,7 @@ If the timer is started but non-repeating, stop it (as if it timed out).
If the timer is repeating, either start it if necessary (with the If the timer is repeating, either start it if necessary (with the
C<repeat> value), or reset the running timer to the C<repeat> value. C<repeat> value), or reset the running timer to the C<repeat> value.
This sounds a bit complicated, see "Be smart about timeouts", above, for a This sounds a bit complicated, see L<Be smart about timeouts>, above, for a
usage example. usage example.
=item ev_tstamp repeat [read-write] =item ev_tstamp repeat [read-write]
@ -1598,52 +1749,63 @@ inactivity.
Periodic watchers are also timers of a kind, but they are very versatile Periodic watchers are also timers of a kind, but they are very versatile
(and unfortunately a bit complex). (and unfortunately a bit complex).
Unlike C<ev_timer>'s, they are not based on real time (or relative time) Unlike C<ev_timer>, periodic watchers are not based on real time (or
but on wall clock time (absolute time). You can tell a periodic watcher relative time, the physical time that passes) but on wall clock time
to trigger after some specific point in time. For example, if you tell a (absolute time, the thing you can read on your calender or clock). The
periodic watcher to trigger in 10 seconds (by specifying e.g. C<ev_now () difference is that wall clock time can run faster or slower than real
+ 10.>, that is, an absolute time not a delay) and then reset your system time, and time jumps are not uncommon (e.g. when you adjust your
clock to January of the previous year, then it will take more than year wrist-watch).
to trigger the event (unlike an C<ev_timer>, which would still trigger
roughly 10 seconds later as it uses a relative timeout). You can tell a periodic watcher to trigger after some specific point
in time: for example, if you tell a periodic watcher to trigger "in 10
C<ev_periodic>s can also be used to implement vastly more complex timers, seconds" (by specifying e.g. C<ev_now () + 10.>, that is, an absolute time
such as triggering an event on each "midnight, local time", or other not a delay) and then reset your system clock to January of the previous
complicated rules. year, then it will take a year or more to trigger the event (unlike an
C<ev_timer>, which would still trigger roughly 10 seconds after starting
it, as it uses a relative timeout).
C<ev_periodic> watchers can also be used to implement vastly more complex
timers, such as triggering an event on each "midnight, local time", or
other complicated rules. This cannot be done with C<ev_timer> watchers, as
those cannot react to time jumps.
As with timers, the callback is guaranteed to be invoked only when the As with timers, the callback is guaranteed to be invoked only when the
time (C<at>) has passed, but if multiple periodic timers become ready point in time where it is supposed to trigger has passed. If multiple
during the same loop iteration, then order of execution is undefined. timers become ready during the same loop iteration then the ones with
earlier time-out values are invoked before ones with later time-out values
(but this is no longer true when a callback calls C<ev_loop> recursively).
=head3 Watcher-Specific Functions and Data Members =head3 Watcher-Specific Functions and Data Members
=over 4 =over 4
=item ev_periodic_init (ev_periodic *, callback, ev_tstamp at, ev_tstamp interval, reschedule_cb) =item ev_periodic_init (ev_periodic *, callback, ev_tstamp offset, ev_tstamp interval, reschedule_cb)
=item ev_periodic_set (ev_periodic *, ev_tstamp after, ev_tstamp repeat, reschedule_cb) =item ev_periodic_set (ev_periodic *, ev_tstamp offset, ev_tstamp interval, reschedule_cb)
Lots of arguments, lets sort it out... There are basically three modes of Lots of arguments, let's sort it out... There are basically three modes of
operation, and we will explain them from simplest to most complex: operation, and we will explain them from simplest to most complex:
=over 4 =over 4
=item * absolute timer (at = time, interval = reschedule_cb = 0) =item * absolute timer (offset = absolute time, interval = 0, reschedule_cb = 0)
In this configuration the watcher triggers an event after the wall clock In this configuration the watcher triggers an event after the wall clock
time C<at> has passed. It will not repeat and will not adjust when a time time C<offset> has passed. It will not repeat and will not adjust when a
jump occurs, that is, if it is to be run at January 1st 2011 then it will time jump occurs, that is, if it is to be run at January 1st 2011 then it
only run when the system clock reaches or surpasses this time. will be stopped and invoked when the system clock reaches or surpasses
this point in time.
=item * repeating interval timer (at = offset, interval > 0, reschedule_cb = 0) =item * repeating interval timer (offset = offset within interval, interval > 0, reschedule_cb = 0)
In this mode the watcher will always be scheduled to time out at the next In this mode the watcher will always be scheduled to time out at the next
C<at + N * interval> time (for some integer N, which can also be negative) C<offset + N * interval> time (for some integer N, which can also be
and then repeat, regardless of any time jumps. negative) and then repeat, regardless of any time jumps. The C<offset>
argument is merely an offset into the C<interval> periods.
This can be used to create timers that do not drift with respect to the This can be used to create timers that do not drift with respect to the
system clock, for example, here is a C<ev_periodic> that triggers each system clock, for example, here is an C<ev_periodic> that triggers each
hour, on the hour: hour, on the hour (with respect to UTC):
ev_periodic_set (&periodic, 0., 3600., 0); ev_periodic_set (&periodic, 0., 3600., 0);
@ -1654,9 +1816,9 @@ by 3600.
Another way to think about it (for the mathematically inclined) is that Another way to think about it (for the mathematically inclined) is that
C<ev_periodic> will try to run the callback in this mode at the next possible C<ev_periodic> will try to run the callback in this mode at the next possible
time where C<time = at (mod interval)>, regardless of any time jumps. time where C<time = offset (mod interval)>, regardless of any time jumps.
For numerical stability it is preferable that the C<at> value is near For numerical stability it is preferable that the C<offset> value is near
C<ev_now ()> (the current time), but there is no range requirement for C<ev_now ()> (the current time), but there is no range requirement for
this value, and in fact is often specified as zero. this value, and in fact is often specified as zero.
@ -1665,15 +1827,16 @@ speed for example), so if C<interval> is very small then timing stability
will of course deteriorate. Libev itself tries to be exact to be about one will of course deteriorate. Libev itself tries to be exact to be about one
millisecond (if the OS supports it and the machine is fast enough). millisecond (if the OS supports it and the machine is fast enough).
=item * manual reschedule mode (at and interval ignored, reschedule_cb = callback) =item * manual reschedule mode (offset ignored, interval ignored, reschedule_cb = callback)
In this mode the values for C<interval> and C<at> are both being In this mode the values for C<interval> and C<offset> are both being
ignored. Instead, each time the periodic watcher gets scheduled, the ignored. Instead, each time the periodic watcher gets scheduled, the
reschedule callback will be called with the watcher as first, and the reschedule callback will be called with the watcher as first, and the
current time as second argument. current time as second argument.
NOTE: I<This callback MUST NOT stop or destroy any periodic watcher, NOTE: I<This callback MUST NOT stop or destroy any periodic watcher, ever,
ever, or make ANY event loop modifications whatsoever>. or make ANY other event loop modifications whatsoever, unless explicitly
allowed by documentation here>.
If you need to stop it, return C<now + 1e30> (or so, fudge fudge) and stop If you need to stop it, return C<now + 1e30> (or so, fudge fudge) and stop
it afterwards (e.g. by starting an C<ev_prepare> watcher, which is the it afterwards (e.g. by starting an C<ev_prepare> watcher, which is the
@ -1713,13 +1876,16 @@ program when the crontabs have changed).
=item ev_tstamp ev_periodic_at (ev_periodic *) =item ev_tstamp ev_periodic_at (ev_periodic *)
When active, returns the absolute time that the watcher is supposed to When active, returns the absolute time that the watcher is supposed
trigger next. to trigger next. This is not the same as the C<offset> argument to
C<ev_periodic_set>, but indeed works even in interval and manual
rescheduling modes.
=item ev_tstamp offset [read-write] =item ev_tstamp offset [read-write]
When repeating, this contains the offset value, otherwise this is the When repeating, this contains the offset value, otherwise this is the
absolute point in time (the C<at> value passed to C<ev_periodic_set>). absolute point in time (the C<offset> value passed to C<ev_periodic_set>,
although libev might modify this value for better numerical stability).
Can be modified any time, but changes only take effect when the periodic Can be modified any time, but changes only take effect when the periodic
timer fires or C<ev_periodic_again> is being called. timer fires or C<ev_periodic_again> is being called.
@ -2181,7 +2347,7 @@ event loop has handled all outstanding events.
=over 4 =over 4
=item ev_idle_init (ev_signal *, callback) =item ev_idle_init (ev_idle *, callback)
Initialises and configures the idle watcher - it has no parameters of any Initialises and configures the idle watcher - it has no parameters of any
kind. There is a C<ev_idle_set> macro, but using it is utterly pointless, kind. There is a C<ev_idle_set> macro, but using it is utterly pointless,
@ -2547,6 +2713,39 @@ and only in the child after the fork. If whoever good citizen calling
C<ev_default_fork> cheats and calls it in the wrong process, the fork C<ev_default_fork> cheats and calls it in the wrong process, the fork
handlers will be invoked, too, of course. handlers will be invoked, too, of course.
=head3 The special problem of life after fork - how is it possible?
Most uses of C<fork()> consist of forking, then some simple calls to ste
up/change the process environment, followed by a call to C<exec()>. This
sequence should be handled by libev without any problems.
This changes when the application actually wants to do event handling
in the child, or both parent in child, in effect "continuing" after the
fork.
The default mode of operation (for libev, with application help to detect
forks) is to duplicate all the state in the child, as would be expected
when I<either> the parent I<or> the child process continues.
When both processes want to continue using libev, then this is usually the
wrong result. In that case, usually one process (typically the parent) is
supposed to continue with all watchers in place as before, while the other
process typically wants to start fresh, i.e. without any active watchers.
The cleanest and most efficient way to achieve that with libev is to
simply create a new event loop, which of course will be "empty", and
use that for new watchers. This has the advantage of not touching more
memory than necessary, and thus avoiding the copy-on-write, and the
disadvantage of having to use multiple event loops (which do not support
signal watchers).
When this is not possible, or you want to use the default loop for
other reasons, then in the process that wants to start "fresh", call
C<ev_default_destroy ()> followed by C<ev_default_loop (...)>. Destroying
the default loop will "orphan" (not stop) all registered watchers, so you
have to be careful not to execute code that modifies those watchers. Note
also that in that case, you have to re-register any signal watchers.
=head3 Watcher-Specific Functions and Data Members =head3 Watcher-Specific Functions and Data Members
=over 4 =over 4
@ -2684,9 +2883,14 @@ C<ev_feed_event>, this call is safe to do from other threads, signal or
similar contexts (see the discussion of C<EV_ATOMIC_T> in the embedding similar contexts (see the discussion of C<EV_ATOMIC_T> in the embedding
section below on what exactly this means). section below on what exactly this means).
This call incurs the overhead of a system call only once per loop iteration, Note that, as with other watchers in libev, multiple events might get
so while the overhead might be noticeable, it doesn't apply to repeated compressed into a single callback invocation (another way to look at this
calls to C<ev_async_send>. is that C<ev_async> watchers are level-triggered, set on C<ev_async_send>,
reset when the event loop detects that).
This call incurs the overhead of a system call only once per event loop
iteration, so while the overhead might be noticeable, it doesn't apply to
repeated calls to C<ev_async_send> for the same event loop.
=item bool = ev_async_pending (ev_async *) =item bool = ev_async_pending (ev_async *)
@ -2699,8 +2903,10 @@ the loop iterates next and checks for the watcher to have become active,
it will reset the flag again. C<ev_async_pending> can be used to very it will reset the flag again. C<ev_async_pending> can be used to very
quickly check whether invoking the loop might be a good idea. quickly check whether invoking the loop might be a good idea.
Not that this does I<not> check whether the watcher itself is pending, only Not that this does I<not> check whether the watcher itself is pending,
whether it has been requested to make this watcher pending. only whether it has been requested to make this watcher pending: there
is a time window between the event loop checking and resetting the async
notification, and the callback being invoked.
=back =back
@ -3014,11 +3220,7 @@ L<http://software.schmorp.de/pkg/EV>.
=item Python =item Python
Python bindings can be found at L<http://code.google.com/p/pyev/>. It Python bindings can be found at L<http://code.google.com/p/pyev/>. It
seems to be quite complete and well-documented. Note, however, that the seems to be quite complete and well-documented.
patch they require for libev is outright dangerous as it breaks the ABI
for everybody else, and therefore, should never be applied in an installed
libev (if python requires an incompatible ABI then it needs to embed
libev).
=item Ruby =item Ruby
@ -3030,6 +3232,11 @@ L<http://rev.rubyforge.org/>.
Roger Pack reports that using the link order C<-lws2_32 -lmsvcrt-ruby-190> Roger Pack reports that using the link order C<-lws2_32 -lmsvcrt-ruby-190>
makes rev work even on mingw. makes rev work even on mingw.
=item Haskell
A haskell binding to libev is available at
L<http://hackage.haskell.org/cgi-bin/hackage-scripts/package/hlibev>.
=item D =item D
Leandro Lucarella has written a D language binding (F<ev.d>) for libev, to Leandro Lucarella has written a D language binding (F<ev.d>) for libev, to
@ -3730,6 +3937,9 @@ way (note also that glib is the slowest event library known to man).
There is no supported compilation method available on windows except There is no supported compilation method available on windows except
embedding it into other applications. embedding it into other applications.
Sensible signal handling is officially unsupported by Microsoft - libev
tries its best, but under most conditions, signals will simply not work.
Not a libev limitation but worth mentioning: windows apparently doesn't Not a libev limitation but worth mentioning: windows apparently doesn't
accept large writes: instead of resulting in a partial write, windows will accept large writes: instead of resulting in a partial write, windows will
either accept everything or return C<ENOBUFS> if the buffer is too large, either accept everything or return C<ENOBUFS> if the buffer is too large,
@ -3743,7 +3953,7 @@ is not recommended (and not reasonable). If your program needs to use
more than a hundred or so sockets, then likely it needs to use a totally more than a hundred or so sockets, then likely it needs to use a totally
different implementation for windows, as libev offers the POSIX readiness different implementation for windows, as libev offers the POSIX readiness
notification model, which cannot be implemented efficiently on windows notification model, which cannot be implemented efficiently on windows
(Microsoft monopoly games). (due to Microsoft monopoly games).
A typical way to use libev under windows is to embed it (see the embedding A typical way to use libev under windows is to embed it (see the embedding
section for details) and use the following F<evwrap.h> header file instead section for details) and use the following F<evwrap.h> header file instead
@ -3789,24 +3999,22 @@ Early versions of winsocket's select only supported waiting for a maximum
of C<64> handles (probably owning to the fact that all windows kernels of C<64> handles (probably owning to the fact that all windows kernels
can only wait for C<64> things at the same time internally; Microsoft can only wait for C<64> things at the same time internally; Microsoft
recommends spawning a chain of threads and wait for 63 handles and the recommends spawning a chain of threads and wait for 63 handles and the
previous thread in each. Great). previous thread in each. Sounds great!).
Newer versions support more handles, but you need to define C<FD_SETSIZE> Newer versions support more handles, but you need to define C<FD_SETSIZE>
to some high number (e.g. C<2048>) before compiling the winsocket select to some high number (e.g. C<2048>) before compiling the winsocket select
call (which might be in libev or elsewhere, for example, perl does its own call (which might be in libev or elsewhere, for example, perl and many
select emulation on windows). other interpreters do their own select emulation on windows).
Another limit is the number of file descriptors in the Microsoft runtime Another limit is the number of file descriptors in the Microsoft runtime
libraries, which by default is C<64> (there must be a hidden I<64> fetish libraries, which by default is C<64> (there must be a hidden I<64>
or something like this inside Microsoft). You can increase this by calling fetish or something like this inside Microsoft). You can increase this
C<_setmaxstdio>, which can increase this limit to C<2048> (another by calling C<_setmaxstdio>, which can increase this limit to C<2048>
arbitrary limit), but is broken in many versions of the Microsoft runtime (another arbitrary limit), but is broken in many versions of the Microsoft
libraries. runtime libraries. This might get you to about C<512> or C<2048> sockets
(depending on windows version and/or the phase of the moon). To get more,
This might get you to about C<512> or C<2048> sockets (depending on you need to wrap all I/O functions and provide your own fd management, but
windows version and/or the phase of the moon). To get more, you need to the cost of calling select (O(n²)) will likely make this unworkable.
wrap all I/O functions and provide your own fd management, but the cost of
calling select (O(n²)) will likely make this unworkable.
=back =back
@ -3937,6 +4145,82 @@ involves iterating over all running async watchers or all signal numbers.
=back =back
=head1 GLOSSARY
=over 4
=item active
A watcher is active as long as it has been started (has been attached to
an event loop) but not yet stopped (disassociated from the event loop).
=item application
In this document, an application is whatever is using libev.
=item callback
The address of a function that is called when some event has been
detected. Callbacks are being passed the event loop, the watcher that
received the event, and the actual event bitset.
=item callback invocation
The act of calling the callback associated with a watcher.
=item event
A change of state of some external event, such as data now being available
for reading on a file descriptor, time having passed or simply not having
any other events happening anymore.
In libev, events are represented as single bits (such as C<EV_READ> or
C<EV_TIMEOUT>).
=item event library
A software package implementing an event model and loop.
=item event loop
An entity that handles and processes external events and converts them
into callback invocations.
=item event model
The model used to describe how an event loop handles and processes
watchers and events.
=item pending
A watcher is pending as soon as the corresponding event has been detected,
and stops being pending as soon as the watcher will be invoked or its
pending status is explicitly cleared by the application.
A watcher can be pending, but not active. Stopping a watcher also clears
its pending status.
=item real time
The physical time that is observed. It is apparently strictly monotonic :)
=item wall-clock time
The time and date as shown on clocks. Unlike real time, it can actually
be wrong and jump forwards and backwards, e.g. when the you adjust your
clock.
=item watcher
A data structure that describes interest in certain events. Watchers need
to be started (attached to an event loop) before they can receive events.
=item watcher invocation
The act of calling the callback associated with a watcher.
=back
=head1 AUTHOR =head1 AUTHOR
Marc Lehmann <libev@schmorp.de>, with repeated corrections by Mikael Magnusson. Marc Lehmann <libev@schmorp.de>, with repeated corrections by Mikael Magnusson.

22
deps/libev/ev_vars.h

@ -55,11 +55,24 @@ VARx(ev_tstamp, backend_fudge) /* assumed typical timer resolution */
VAR (backend_modify, void (*backend_modify)(EV_P_ int fd, int oev, int nev)) VAR (backend_modify, void (*backend_modify)(EV_P_ int fd, int oev, int nev))
VAR (backend_poll , void (*backend_poll)(EV_P_ ev_tstamp timeout)) VAR (backend_poll , void (*backend_poll)(EV_P_ ev_tstamp timeout))
VARx(ANFD *, anfds)
VARx(int, anfdmax)
VAR (pendings, ANPENDING *pendings [NUMPRI])
VAR (pendingmax, int pendingmax [NUMPRI])
VAR (pendingcnt, int pendingcnt [NUMPRI])
VARx(ev_prepare, pending_w) /* dummy pending watcher */
/* for reverse feeding of events */
VARx(W *, rfeeds)
VARx(int, rfeedmax)
VARx(int, rfeedcnt)
#if EV_USE_EVENTFD || EV_GENWRAP #if EV_USE_EVENTFD || EV_GENWRAP
VARx(int, evfd) VARx(int, evfd)
#endif #endif
VAR (evpipe, int evpipe [2]) VAR (evpipe, int evpipe [2])
VARx(ev_io, pipeev) VARx(ev_io, pipe_w)
#if !defined(_WIN32) || EV_GENWRAP #if !defined(_WIN32) || EV_GENWRAP
VARx(pid_t, curpid) VARx(pid_t, curpid)
@ -104,13 +117,6 @@ VARx(struct port_event *, port_events)
VARx(int, port_eventmax) VARx(int, port_eventmax)
#endif #endif
VARx(ANFD *, anfds)
VARx(int, anfdmax)
VAR (pendings, ANPENDING *pendings [NUMPRI])
VAR (pendingmax, int pendingmax [NUMPRI])
VAR (pendingcnt, int pendingcnt [NUMPRI])
VARx(int *, fdchanges) VARx(int *, fdchanges)
VARx(int, fdchangemax) VARx(int, fdchangemax)
VARx(int, fdchangecnt) VARx(int, fdchangecnt)

2
deps/libev/ev_win32.c

@ -1,7 +1,7 @@
/* /*
* libev win32 compatibility cruft (_not_ a backend) * libev win32 compatibility cruft (_not_ a backend)
* *
* Copyright (c) 2007,2008 Marc Alexander Lehmann <libev@schmorp.de> * Copyright (c) 2007,2008,2009 Marc Alexander Lehmann <libev@schmorp.de>
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without modifica- * Redistribution and use in source and binary forms, with or without modifica-

32
deps/libev/ev_wrap.h

@ -13,9 +13,18 @@
#define backend_fudge ((loop)->backend_fudge) #define backend_fudge ((loop)->backend_fudge)
#define backend_modify ((loop)->backend_modify) #define backend_modify ((loop)->backend_modify)
#define backend_poll ((loop)->backend_poll) #define backend_poll ((loop)->backend_poll)
#define anfds ((loop)->anfds)
#define anfdmax ((loop)->anfdmax)
#define pendings ((loop)->pendings)
#define pendingmax ((loop)->pendingmax)
#define pendingcnt ((loop)->pendingcnt)
#define pending_w ((loop)->pending_w)
#define rfeeds ((loop)->rfeeds)
#define rfeedmax ((loop)->rfeedmax)
#define rfeedcnt ((loop)->rfeedcnt)
#define evfd ((loop)->evfd) #define evfd ((loop)->evfd)
#define evpipe ((loop)->evpipe) #define evpipe ((loop)->evpipe)
#define pipeev ((loop)->pipeev) #define pipe_w ((loop)->pipe_w)
#define curpid ((loop)->curpid) #define curpid ((loop)->curpid)
#define postfork ((loop)->postfork) #define postfork ((loop)->postfork)
#define vec_ri ((loop)->vec_ri) #define vec_ri ((loop)->vec_ri)
@ -38,11 +47,6 @@
#define kqueue_eventmax ((loop)->kqueue_eventmax) #define kqueue_eventmax ((loop)->kqueue_eventmax)
#define port_events ((loop)->port_events) #define port_events ((loop)->port_events)
#define port_eventmax ((loop)->port_eventmax) #define port_eventmax ((loop)->port_eventmax)
#define anfds ((loop)->anfds)
#define anfdmax ((loop)->anfdmax)
#define pendings ((loop)->pendings)
#define pendingmax ((loop)->pendingmax)
#define pendingcnt ((loop)->pendingcnt)
#define fdchanges ((loop)->fdchanges) #define fdchanges ((loop)->fdchanges)
#define fdchangemax ((loop)->fdchangemax) #define fdchangemax ((loop)->fdchangemax)
#define fdchangecnt ((loop)->fdchangecnt) #define fdchangecnt ((loop)->fdchangecnt)
@ -87,9 +91,18 @@
#undef backend_fudge #undef backend_fudge
#undef backend_modify #undef backend_modify
#undef backend_poll #undef backend_poll
#undef anfds
#undef anfdmax
#undef pendings
#undef pendingmax
#undef pendingcnt
#undef pending_w
#undef rfeeds
#undef rfeedmax
#undef rfeedcnt
#undef evfd #undef evfd
#undef evpipe #undef evpipe
#undef pipeev #undef pipe_w
#undef curpid #undef curpid
#undef postfork #undef postfork
#undef vec_ri #undef vec_ri
@ -112,11 +125,6 @@
#undef kqueue_eventmax #undef kqueue_eventmax
#undef port_events #undef port_events
#undef port_eventmax #undef port_eventmax
#undef anfds
#undef anfdmax
#undef pendings
#undef pendingmax
#undef pendingcnt
#undef fdchanges #undef fdchanges
#undef fdchangemax #undef fdchangemax
#undef fdchangecnt #undef fdchangecnt

226
deps/libev/event_compat.h

@ -1,226 +0,0 @@
/*
* Copyright (c) 2000-2004 Niels Provos <provos@citi.umich.edu>
* Copyright (c) 2008 Marc Alexander Lehmann <libev@schmorp.de>
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifdef __cplusplus
extern "C" {
#endif
#ifdef _WIN32
# define WIN32_LEAN_AND_MEAN
# include <windows.h>
# undef WIN32_LEAN_AND_MEAN
typedef unsigned char u_char;
typedef unsigned short u_short;
#else
# include <sys/types.h>
# include <sys/time.h>
# include <inttypes.h>
#endif
#include <stdarg.h>
/* Fix so that ppl dont have to run with <sys/queue.h> */
#ifndef TAILQ_ENTRY
#define _EVENT_DEFINED_TQENTRY
#define TAILQ_ENTRY(type) \
struct { \
struct type *tqe_next; /* next element */ \
struct type **tqe_prev; /* address of previous next element */ \
}
#endif /* !TAILQ_ENTRY */
#ifndef RB_ENTRY
#define _EVENT_DEFINED_RBENTRY
#define RB_ENTRY(type) \
struct { \
struct type *rbe_left; /* left element */ \
struct type *rbe_right; /* right element */ \
struct type *rbe_parent; /* parent element */ \
int rbe_color; /* node color */ \
}
#endif /* !RB_ENTRY */
/*
* Key-Value pairs. Can be used for HTTP headers but also for
* query argument parsing.
*/
struct evkeyval {
TAILQ_ENTRY(evkeyval) next;
char *key;
char *value;
};
#ifdef _EVENT_DEFINED_TQENTRY
#undef TAILQ_ENTRY
struct event_list;
struct evkeyvalq;
#undef _EVENT_DEFINED_TQENTRY
#else
TAILQ_HEAD (event_list, event);
TAILQ_HEAD (evkeyvalq, evkeyval);
#endif /* _EVENT_DEFINED_TQENTRY */
#ifdef _EVENT_DEFINED_RBENTRY
#undef RB_ENTRY
#undef _EVENT_DEFINED_RBENTRY
#endif /* _EVENT_DEFINED_RBENTRY */
struct eventop {
char *name;
void *(*init)(struct event_base *);
int (*add)(void *, struct event *);
int (*del)(void *, struct event *);
int (*recalc)(struct event_base *, void *, int);
int (*dispatch)(struct event_base *, void *, struct timeval *);
void (*dealloc)(struct event_base *, void *);
};
/* These functions deal with buffering input and output */
struct evbuffer {
u_char *buffer;
u_char *orig_buffer;
size_t misalign;
size_t totallen;
size_t off;
void (*cb)(struct evbuffer *, size_t, size_t, void *);
void *cbarg;
};
/* Just for error reporting - use other constants otherwise */
#define EVBUFFER_READ 0x01
#define EVBUFFER_WRITE 0x02
#define EVBUFFER_EOF 0x10
#define EVBUFFER_ERROR 0x20
#define EVBUFFER_TIMEOUT 0x40
struct bufferevent;
typedef void (*evbuffercb)(struct bufferevent *, void *);
typedef void (*everrorcb)(struct bufferevent *, short what, void *);
struct event_watermark {
size_t low;
size_t high;
};
struct bufferevent {
struct event ev_read;
struct event ev_write;
struct evbuffer *input;
struct evbuffer *output;
struct event_watermark wm_read;
struct event_watermark wm_write;
evbuffercb readcb;
evbuffercb writecb;
everrorcb errorcb;
void *cbarg;
int timeout_read; /* in seconds */
int timeout_write; /* in seconds */
short enabled; /* events that are currently enabled */
};
struct bufferevent *bufferevent_new(int fd,
evbuffercb readcb, evbuffercb writecb, everrorcb errorcb, void *cbarg);
int bufferevent_base_set(struct event_base *base, struct bufferevent *bufev);
int bufferevent_priority_set(struct bufferevent *bufev, int pri);
void bufferevent_free(struct bufferevent *bufev);
int bufferevent_write(struct bufferevent *bufev, const void *data, size_t size);
int bufferevent_write_buffer(struct bufferevent *bufev, struct evbuffer *buf);
size_t bufferevent_read(struct bufferevent *bufev, void *data, size_t size);
int bufferevent_enable(struct bufferevent *bufev, short event);
int bufferevent_disable(struct bufferevent *bufev, short event);
void bufferevent_settimeout(struct bufferevent *bufev,
int timeout_read, int timeout_write);
#define EVBUFFER_LENGTH(x) (x)->off
#define EVBUFFER_DATA(x) (x)->buffer
#define EVBUFFER_INPUT(x) (x)->input
#define EVBUFFER_OUTPUT(x) (x)->output
struct evbuffer *evbuffer_new(void);
void evbuffer_free(struct evbuffer *);
int evbuffer_expand(struct evbuffer *, size_t);
int evbuffer_add(struct evbuffer *, const void *, size_t);
int evbuffer_remove(struct evbuffer *, void *, size_t);
char *evbuffer_readline(struct evbuffer *);
int evbuffer_add_buffer(struct evbuffer *, struct evbuffer *);
int evbuffer_add_printf(struct evbuffer *, const char *fmt, ...);
int evbuffer_add_vprintf(struct evbuffer *, const char *fmt, va_list ap);
void evbuffer_drain(struct evbuffer *, size_t);
int evbuffer_write(struct evbuffer *, int);
int evbuffer_read(struct evbuffer *, int, int);
u_char *evbuffer_find(struct evbuffer *, const u_char *, size_t);
void evbuffer_setcb(struct evbuffer *, void (*)(struct evbuffer *, size_t, size_t, void *), void *);
/*
* Marshaling tagged data - We assume that all tags are inserted in their
* numeric order - so that unknown tags will always be higher than the
* known ones - and we can just ignore the end of an event buffer.
*/
void evtag_init(void);
void evtag_marshal(struct evbuffer *evbuf, uint32_t tag, const void *data,
uint32_t len);
void encode_int(struct evbuffer *evbuf, uint32_t number);
void evtag_marshal_int(struct evbuffer *evbuf, uint32_t tag, uint32_t integer);
void evtag_marshal_string(struct evbuffer *buf, uint32_t tag,
const char *string);
void evtag_marshal_timeval(struct evbuffer *evbuf, uint32_t tag,
struct timeval *tv);
int evtag_unmarshal(struct evbuffer *src, uint32_t *ptag, struct evbuffer *dst);
int evtag_peek(struct evbuffer *evbuf, uint32_t *ptag);
int evtag_peek_length(struct evbuffer *evbuf, uint32_t *plength);
int evtag_payload_length(struct evbuffer *evbuf, uint32_t *plength);
int evtag_consume(struct evbuffer *evbuf);
int evtag_unmarshal_int(struct evbuffer *evbuf, uint32_t need_tag,
uint32_t *pinteger);
int evtag_unmarshal_fixed(struct evbuffer *src, uint32_t need_tag, void *data,
size_t len);
int evtag_unmarshal_string(struct evbuffer *evbuf, uint32_t need_tag,
char **pstring);
int evtag_unmarshal_timeval(struct evbuffer *evbuf, uint32_t need_tag,
struct timeval *ptv);
#ifdef __cplusplus
}
#endif

131
deps/libev/import_libevent

@ -1,131 +0,0 @@
#!/bin/sh
LE=../libevent-1.4.3-stable
if ! [ -e evbuffer.c ]; then
echo do not run this programm unless you know what you are doing
exit 1
fi
# this program combines libev and libevent into a single package
cvs update -AdP libev
rsync -avP libev/. . --exclude CVS
rm -f configure.ac
cp $LE/evdns.h .
perl -i -pe 's%^/.libevent-include./%#include "event_compat.h"%' event.h
perl -ne '
s/\s+char buf\[64\];/\tchar buf[96];/;
if (/#include "event.h"/) {
print "#ifndef EV_STANDALONE\n$_#endif\n";
next;
}
if (/#include "misc.h"/) {
print "#ifndef EV_STANDALONE\n$_#endif\n";
next;
}
if (/#include "(unistd.h|sys\/time.h)"/) {
print "#ifndef WIN32\n$_#endif\n";
next;
}
next if /#include "log.h"/;
print;
' <$LE/evdns.c >evdns.c
cp $LE/autogen.sh .
cp $LE/epoll_sub.c .
cp $LE/evbuffer.c .
cp $LE/buffer.c .
cp $LE/evhttp.h .
cp $LE/evutil.h .
cp $LE/evutil.c .
cp $LE/event-config.h .
cp $LE/event-internal.h .
cp $LE/evrpc.h .
cp $LE/evrpc.c .
cp $LE/evrpc-internal.h .
cp $LE/http.c .
cp $LE/event_tagging.c .
cp $LE/http-internal.h .
cp $LE/strlcpy-internal.h .
cp $LE/log.c .
cp $LE/log.h .
cp $LE/strlcpy.c .
rsync -a $LE/WIN32* $LE/sample $LE/test $LE/compat . --del
#rename 's/libevent/libev/' WIN32-Prj/lib*
cp $LE/aclocal.m4 .
#cp $LE/acconfig.h .
cp $LE/config.h.in .
cp $LE/event_rpcgen.py .
cp $LE/*.3 .
#perl -i -pe 's/libevent/libev/g' sample/Makefile.am
#perl -i -pe 's/libevent/libev/g' test/Makefile.am
perl -i -pe 's/#include <event.h>$/#include "event.h"/' test/*.c
perl -i -ne '
next if /"event-internal.h"/;
s/base\d?->sig.ev_signal_added/0/;
s/base\d?->sig.ev_signal_pair\[0\]/-1/;
s/base->sig.evsignal_caught/0/;
next if /^\ttest_signal_(dealloc|pipeloss|switchbase|assert|restore)\(\)/;
next if /^\ttest_simplesignal\(\)/; # non-default-loop
next if /^\ttest_immediatesignal\(\)/; # non-default-loop
next if /test_priorities\(\d\)/;
print;
' test/regress.c
perl -ne '
s/\bmin_heap.h\b//g;
s/\bsignal.c\b//g;
s/\bevport.c\b//g;
s/\bkqueue.c\b//g;
s/\bdevpoll.c\b//g;
s/\brtsig.c\b//g;
s/\bselect.c\b//g;
s/\bpoll.c\b//g;
s/\bepoll.c\b//g;
s/\bepoll_sub.c\b//g;
s/\bevent-internal.h\b//g;
s/\bevsignal.h\b//g;
s/^(man_MANS\s*=)/$1 ev.3 /;
s/^(EXTRA_DIST\s*=)/$1 libev.m4 ev.h ev_vars.h ev_wrap.h event_compat.h ev++.h ev_epoll.c ev_select.c ev_poll.c ev_kqueue.c ev_port.c ev_win32.c ev.3 ev.pod /;
s/^(include_HEADERS\s*=)/$1 ev.h event_compat.h ev++.h /;
s/^(CORE_SRC\s*=)/$1 ev.c /;
s/^(SYS_LIBS\s*=)/$1 -lm /;
#s/libevent/libev/g;
print;
' <$LE/Makefile.am >Makefile.am
perl -ne '
#s/-Wall/-Wall -Wno-comment -Wunused-function -Wno-unused-value/;
s/-Wall//g;
#s/libevent/libev/g;
#VERSION
s/AM_INIT_AUTOMAKE\s*\(.*,(.*)\)/AM_INIT_AUTOMAKE(libevent-$1+libev,3.1)/;
s/AC_LIBOBJ\(select\)/: ;/g;
s/AC_LIBOBJ\(poll\)/: ;/g;
s/AC_LIBOBJ\(kqueue\)/: ;/g;
s/AC_LIBOBJ\(epoll\)/: ;/g;
s/AC_LIBOBJ\(devpoll\)/: ;/g;
s/AC_LIBOBJ\(evport\)/: ;/g;
s/AC_LIBOBJ\(signal\)/: ;/g;
s/AC_LIBOBJ\(rtsig\)/: ;/g;
print "m4_include([libev.m4])\n" if /^AC_OUTPUT/;
print;
' <$LE/configure.in >configure.in
aclocal-1.7
automake-1.7 --add-missing
autoconf
autoheader
libtoolize
CC="ccache gcc" ./configure --prefix=/opt/libev --disable-shared "$@"

294
deps/libev/install-sh

@ -0,0 +1,294 @@
#!/bin/sh
#
# install - install a program, script, or datafile
#
# This originates from X11R5 (mit/util/scripts/install.sh), which was
# later released in X11R6 (xc/config/util/install.sh) with the
# following copyright and license.
#
# Copyright (C) 1994 X Consortium
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC-
# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# Except as contained in this notice, the name of the X Consortium shall not
# be used in advertising or otherwise to promote the sale, use or other deal-
# ings in this Software without prior written authorization from the X Consor-
# tium.
#
#
# FSF changes to this file are in the public domain.
#
# Calling this script install-sh is preferred over install.sh, to prevent
# `make' implicit rules from creating a file called install from it
# when there is no Makefile.
#
# This script is compatible with the BSD install script, but was written
# from scratch. It can only install one file at a time, a restriction
# shared with many OS's install programs.
# set DOITPROG to echo to test this script
# Don't use :- since 4.3BSD and earlier shells don't like it.
doit="${DOITPROG-}"
# put in absolute paths if you don't have them in your path; or use env. vars.
mvprog="${MVPROG-mv}"
cpprog="${CPPROG-cp}"
chmodprog="${CHMODPROG-chmod}"
chownprog="${CHOWNPROG-chown}"
chgrpprog="${CHGRPPROG-chgrp}"
stripprog="${STRIPPROG-strip}"
rmprog="${RMPROG-rm}"
mkdirprog="${MKDIRPROG-mkdir}"
transformbasename=""
transform_arg=""
instcmd="$mvprog"
chmodcmd="$chmodprog 0755"
chowncmd=""
chgrpcmd=""
stripcmd=""
rmcmd="$rmprog -f"
mvcmd="$mvprog"
src=""
dst=""
dir_arg=""
while [ x"$1" != x ]; do
case $1 in
-c) instcmd=$cpprog
shift
continue;;
-d) dir_arg=true
shift
continue;;
-m) chmodcmd="$chmodprog $2"
shift
shift
continue;;
-o) chowncmd="$chownprog $2"
shift
shift
continue;;
-g) chgrpcmd="$chgrpprog $2"
shift
shift
continue;;
-s) stripcmd=$stripprog
shift
continue;;
-t=*) transformarg=`echo $1 | sed 's/-t=//'`
shift
continue;;
-b=*) transformbasename=`echo $1 | sed 's/-b=//'`
shift
continue;;
*) if [ x"$src" = x ]
then
src=$1
else
# this colon is to work around a 386BSD /bin/sh bug
:
dst=$1
fi
shift
continue;;
esac
done
if [ x"$src" = x ]
then
echo "$0: no input file specified" >&2
exit 1
else
:
fi
if [ x"$dir_arg" != x ]; then
dst=$src
src=""
if [ -d "$dst" ]; then
instcmd=:
chmodcmd=""
else
instcmd=$mkdirprog
fi
else
# Waiting for this to be detected by the "$instcmd $src $dsttmp" command
# might cause directories to be created, which would be especially bad
# if $src (and thus $dsttmp) contains '*'.
if [ -f "$src" ] || [ -d "$src" ]
then
:
else
echo "$0: $src does not exist" >&2
exit 1
fi
if [ x"$dst" = x ]
then
echo "$0: no destination specified" >&2
exit 1
else
:
fi
# If destination is a directory, append the input filename; if your system
# does not like double slashes in filenames, you may need to add some logic
if [ -d "$dst" ]
then
dst=$dst/`basename "$src"`
else
:
fi
fi
## this sed command emulates the dirname command
dstdir=`echo "$dst" | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'`
# Make sure that the destination directory exists.
# this part is taken from Noah Friedman's mkinstalldirs script
# Skip lots of stat calls in the usual case.
if [ ! -d "$dstdir" ]; then
defaultIFS='
'
IFS="${IFS-$defaultIFS}"
oIFS=$IFS
# Some sh's can't handle IFS=/ for some reason.
IFS='%'
set - `echo "$dstdir" | sed -e 's@/@%@g' -e 's@^%@/@'`
IFS=$oIFS
pathcomp=''
while [ $# -ne 0 ] ; do
pathcomp=$pathcomp$1
shift
if [ ! -d "$pathcomp" ] ;
then
$mkdirprog "$pathcomp"
else
:
fi
pathcomp=$pathcomp/
done
fi
if [ x"$dir_arg" != x ]
then
$doit $instcmd "$dst" &&
if [ x"$chowncmd" != x ]; then $doit $chowncmd "$dst"; else : ; fi &&
if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd "$dst"; else : ; fi &&
if [ x"$stripcmd" != x ]; then $doit $stripcmd "$dst"; else : ; fi &&
if [ x"$chmodcmd" != x ]; then $doit $chmodcmd "$dst"; else : ; fi
else
# If we're going to rename the final executable, determine the name now.
if [ x"$transformarg" = x ]
then
dstfile=`basename "$dst"`
else
dstfile=`basename "$dst" $transformbasename |
sed $transformarg`$transformbasename
fi
# don't allow the sed command to completely eliminate the filename
if [ x"$dstfile" = x ]
then
dstfile=`basename "$dst"`
else
:
fi
# Make a couple of temp file names in the proper directory.
dsttmp=$dstdir/_inst.$$_
rmtmp=$dstdir/_rm.$$_
# Trap to clean up temp files at exit.
trap 'status=$?; rm -f "$dsttmp" "$rmtmp" && exit $status' 0
trap '(exit $?); exit' 1 2 13 15
# Move or copy the file name to the temp name
$doit $instcmd "$src" "$dsttmp" &&
# and set any options; do chmod last to preserve setuid bits
# If any of these fail, we abort the whole thing. If we want to
# ignore errors from any of these, just make sure not to ignore
# errors from the above "$doit $instcmd $src $dsttmp" command.
if [ x"$chowncmd" != x ]; then $doit $chowncmd "$dsttmp"; else :;fi &&
if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd "$dsttmp"; else :;fi &&
if [ x"$stripcmd" != x ]; then $doit $stripcmd "$dsttmp"; else :;fi &&
if [ x"$chmodcmd" != x ]; then $doit $chmodcmd "$dsttmp"; else :;fi &&
# Now remove or move aside any old file at destination location. We try this
# two ways since rm can't unlink itself on some systems and the destination
# file might be busy for other reasons. In this case, the final cleanup
# might fail but the new file should still install successfully.
{
if [ -f "$dstdir/$dstfile" ]
then
$doit $rmcmd -f "$dstdir/$dstfile" 2>/dev/null ||
$doit $mvcmd -f "$dstdir/$dstfile" "$rmtmp" 2>/dev/null ||
{
echo "$0: cannot unlink or rename $dstdir/$dstfile" >&2
(exit 1); exit
}
else
:
fi
} &&
# Now rename the file to the real destination.
$doit $mvcmd "$dsttmp" "$dstdir/$dstfile"
fi &&
# The final little trick to "correctly" pass the exit status to the exit trap.
{
(exit 0); exit
}

6871
deps/libev/ltmain.sh

File diff suppressed because it is too large

336
deps/libev/missing

@ -0,0 +1,336 @@
#! /bin/sh
# Common stub for a few missing GNU programs while installing.
# Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003 Free Software Foundation, Inc.
# Originally by Fran,cois Pinard <pinard@iro.umontreal.ca>, 1996.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
# 02111-1307, USA.
# As a special exception to the GNU General Public License, if you
# distribute this file as part of a program that contains a
# configuration script generated by Autoconf, you may include it under
# the same distribution terms that you use for the rest of that program.
if test $# -eq 0; then
echo 1>&2 "Try \`$0 --help' for more information"
exit 1
fi
run=:
# In the cases where this matters, `missing' is being run in the
# srcdir already.
if test -f configure.ac; then
configure_ac=configure.ac
else
configure_ac=configure.in
fi
case "$1" in
--run)
# Try to run requested program, and just exit if it succeeds.
run=
shift
"$@" && exit 0
;;
esac
# If it does not exist, or fails to run (possibly an outdated version),
# try to emulate it.
case "$1" in
-h|--h|--he|--hel|--help)
echo "\
$0 [OPTION]... PROGRAM [ARGUMENT]...
Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an
error status if there is no known handling for PROGRAM.
Options:
-h, --help display this help and exit
-v, --version output version information and exit
--run try to run the given command, and emulate it if it fails
Supported PROGRAM values:
aclocal touch file \`aclocal.m4'
autoconf touch file \`configure'
autoheader touch file \`config.h.in'
automake touch all \`Makefile.in' files
bison create \`y.tab.[ch]', if possible, from existing .[ch]
flex create \`lex.yy.c', if possible, from existing .c
help2man touch the output file
lex create \`lex.yy.c', if possible, from existing .c
makeinfo touch the output file
tar try tar, gnutar, gtar, then tar without non-portable flags
yacc create \`y.tab.[ch]', if possible, from existing .[ch]"
;;
-v|--v|--ve|--ver|--vers|--versi|--versio|--version)
echo "missing 0.4 - GNU automake"
;;
-*)
echo 1>&2 "$0: Unknown \`$1' option"
echo 1>&2 "Try \`$0 --help' for more information"
exit 1
;;
aclocal*)
if test -z "$run" && ($1 --version) > /dev/null 2>&1; then
# We have it, but it failed.
exit 1
fi
echo 1>&2 "\
WARNING: \`$1' is missing on your system. You should only need it if
you modified \`acinclude.m4' or \`${configure_ac}'. You might want
to install the \`Automake' and \`Perl' packages. Grab them from
any GNU archive site."
touch aclocal.m4
;;
autoconf)
if test -z "$run" && ($1 --version) > /dev/null 2>&1; then
# We have it, but it failed.
exit 1
fi
echo 1>&2 "\
WARNING: \`$1' is missing on your system. You should only need it if
you modified \`${configure_ac}'. You might want to install the
\`Autoconf' and \`GNU m4' packages. Grab them from any GNU
archive site."
touch configure
;;
autoheader)
if test -z "$run" && ($1 --version) > /dev/null 2>&1; then
# We have it, but it failed.
exit 1
fi
echo 1>&2 "\
WARNING: \`$1' is missing on your system. You should only need it if
you modified \`acconfig.h' or \`${configure_ac}'. You might want
to install the \`Autoconf' and \`GNU m4' packages. Grab them
from any GNU archive site."
files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' ${configure_ac}`
test -z "$files" && files="config.h"
touch_files=
for f in $files; do
case "$f" in
*:*) touch_files="$touch_files "`echo "$f" |
sed -e 's/^[^:]*://' -e 's/:.*//'`;;
*) touch_files="$touch_files $f.in";;
esac
done
touch $touch_files
;;
automake*)
if test -z "$run" && ($1 --version) > /dev/null 2>&1; then
# We have it, but it failed.
exit 1
fi
echo 1>&2 "\
WARNING: \`$1' is missing on your system. You should only need it if
you modified \`Makefile.am', \`acinclude.m4' or \`${configure_ac}'.
You might want to install the \`Automake' and \`Perl' packages.
Grab them from any GNU archive site."
find . -type f -name Makefile.am -print |
sed 's/\.am$/.in/' |
while read f; do touch "$f"; done
;;
autom4te)
if test -z "$run" && ($1 --version) > /dev/null 2>&1; then
# We have it, but it failed.
exit 1
fi
echo 1>&2 "\
WARNING: \`$1' is needed, and you do not seem to have it handy on your
system. You might have modified some files without having the
proper tools for further handling them.
You can get \`$1' as part of \`Autoconf' from any GNU
archive site."
file=`echo "$*" | sed -n 's/.*--output[ =]*\([^ ]*\).*/\1/p'`
test -z "$file" && file=`echo "$*" | sed -n 's/.*-o[ ]*\([^ ]*\).*/\1/p'`
if test -f "$file"; then
touch $file
else
test -z "$file" || exec >$file
echo "#! /bin/sh"
echo "# Created by GNU Automake missing as a replacement of"
echo "# $ $@"
echo "exit 0"
chmod +x $file
exit 1
fi
;;
bison|yacc)
echo 1>&2 "\
WARNING: \`$1' is missing on your system. You should only need it if
you modified a \`.y' file. You may need the \`Bison' package
in order for those modifications to take effect. You can get
\`Bison' from any GNU archive site."
rm -f y.tab.c y.tab.h
if [ $# -ne 1 ]; then
eval LASTARG="\${$#}"
case "$LASTARG" in
*.y)
SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'`
if [ -f "$SRCFILE" ]; then
cp "$SRCFILE" y.tab.c
fi
SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'`
if [ -f "$SRCFILE" ]; then
cp "$SRCFILE" y.tab.h
fi
;;
esac
fi
if [ ! -f y.tab.h ]; then
echo >y.tab.h
fi
if [ ! -f y.tab.c ]; then
echo 'main() { return 0; }' >y.tab.c
fi
;;
lex|flex)
echo 1>&2 "\
WARNING: \`$1' is missing on your system. You should only need it if
you modified a \`.l' file. You may need the \`Flex' package
in order for those modifications to take effect. You can get
\`Flex' from any GNU archive site."
rm -f lex.yy.c
if [ $# -ne 1 ]; then
eval LASTARG="\${$#}"
case "$LASTARG" in
*.l)
SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'`
if [ -f "$SRCFILE" ]; then
cp "$SRCFILE" lex.yy.c
fi
;;
esac
fi
if [ ! -f lex.yy.c ]; then
echo 'main() { return 0; }' >lex.yy.c
fi
;;
help2man)
if test -z "$run" && ($1 --version) > /dev/null 2>&1; then
# We have it, but it failed.
exit 1
fi
echo 1>&2 "\
WARNING: \`$1' is missing on your system. You should only need it if
you modified a dependency of a manual page. You may need the
\`Help2man' package in order for those modifications to take
effect. You can get \`Help2man' from any GNU archive site."
file=`echo "$*" | sed -n 's/.*-o \([^ ]*\).*/\1/p'`
if test -z "$file"; then
file=`echo "$*" | sed -n 's/.*--output=\([^ ]*\).*/\1/p'`
fi
if [ -f "$file" ]; then
touch $file
else
test -z "$file" || exec >$file
echo ".ab help2man is required to generate this page"
exit 1
fi
;;
makeinfo)
if test -z "$run" && (makeinfo --version) > /dev/null 2>&1; then
# We have makeinfo, but it failed.
exit 1
fi
echo 1>&2 "\
WARNING: \`$1' is missing on your system. You should only need it if
you modified a \`.texi' or \`.texinfo' file, or any other file
indirectly affecting the aspect of the manual. The spurious
call might also be the consequence of using a buggy \`make' (AIX,
DU, IRIX). You might want to install the \`Texinfo' package or
the \`GNU make' package. Grab either from any GNU archive site."
file=`echo "$*" | sed -n 's/.*-o \([^ ]*\).*/\1/p'`
if test -z "$file"; then
file=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'`
file=`sed -n '/^@setfilename/ { s/.* \([^ ]*\) *$/\1/; p; q; }' $file`
fi
touch $file
;;
tar)
shift
if test -n "$run"; then
echo 1>&2 "ERROR: \`tar' requires --run"
exit 1
fi
# We have already tried tar in the generic part.
# Look for gnutar/gtar before invocation to avoid ugly error
# messages.
if (gnutar --version > /dev/null 2>&1); then
gnutar "$@" && exit 0
fi
if (gtar --version > /dev/null 2>&1); then
gtar "$@" && exit 0
fi
firstarg="$1"
if shift; then
case "$firstarg" in
*o*)
firstarg=`echo "$firstarg" | sed s/o//`
tar "$firstarg" "$@" && exit 0
;;
esac
case "$firstarg" in
*h*)
firstarg=`echo "$firstarg" | sed s/h//`
tar "$firstarg" "$@" && exit 0
;;
esac
fi
echo 1>&2 "\
WARNING: I can't seem to be able to run \`tar' with the given arguments.
You may want to install GNU tar or Free paxutils, or check the
command line arguments."
exit 1
;;
*)
echo 1>&2 "\
WARNING: \`$1' is needed, and you do not seem to have it handy on your
system. You might have modified some files without having the
proper tools for further handling them. Check the \`README' file,
it often tells you about the needed prerequisites for installing
this package. You may also peek at any GNU archive site, in case
some other package would contain this missing \`$1' program."
exit 1
;;
esac
exit 0

111
deps/libev/mkinstalldirs

@ -0,0 +1,111 @@
#! /bin/sh
# mkinstalldirs --- make directory hierarchy
# Author: Noah Friedman <friedman@prep.ai.mit.edu>
# Created: 1993-05-16
# Public domain
errstatus=0
dirmode=""
usage="\
Usage: mkinstalldirs [-h] [--help] [-m mode] dir ..."
# process command line arguments
while test $# -gt 0 ; do
case $1 in
-h | --help | --h*) # -h for help
echo "$usage" 1>&2
exit 0
;;
-m) # -m PERM arg
shift
test $# -eq 0 && { echo "$usage" 1>&2; exit 1; }
dirmode=$1
shift
;;
--) # stop option processing
shift
break
;;
-*) # unknown option
echo "$usage" 1>&2
exit 1
;;
*) # first non-opt arg
break
;;
esac
done
for file
do
if test -d "$file"; then
shift
else
break
fi
done
case $# in
0) exit 0 ;;
esac
case $dirmode in
'')
if mkdir -p -- . 2>/dev/null; then
echo "mkdir -p -- $*"
exec mkdir -p -- "$@"
fi
;;
*)
if mkdir -m "$dirmode" -p -- . 2>/dev/null; then
echo "mkdir -m $dirmode -p -- $*"
exec mkdir -m "$dirmode" -p -- "$@"
fi
;;
esac
for file
do
set fnord `echo ":$file" | sed -ne 's/^:\//#/;s/^://;s/\// /g;s/^#/\//;p'`
shift
pathcomp=
for d
do
pathcomp="$pathcomp$d"
case $pathcomp in
-*) pathcomp=./$pathcomp ;;
esac
if test ! -d "$pathcomp"; then
echo "mkdir $pathcomp"
mkdir "$pathcomp" || lasterr=$?
if test ! -d "$pathcomp"; then
errstatus=$lasterr
else
if test ! -z "$dirmode"; then
echo "chmod $dirmode $pathcomp"
lasterr=""
chmod "$dirmode" "$pathcomp" || lasterr=$?
if test ! -z "$lasterr"; then
errstatus=$lasterr
fi
fi
fi
fi
pathcomp="$pathcomp/"
done
done
exit $errstatus
# Local Variables:
# mode: shell-script
# sh-indentation: 2
# End:
# mkinstalldirs ends here

19
deps/libev/update_ev_wrap

@ -1,19 +0,0 @@
#!/bin/sh
(
echo '#define VAR(name,decl) name'
echo '#define EV_GENWRAP 1'
cat ev_vars.h
) | cc -E -o - - | perl -ne '
while (<>) {
push @syms, $1 if /(^\w+)/;
}
print "/* DO NOT EDIT, automatically generated by update_ev_wrap */\n",
"#ifndef EV_WRAP_H\n",
"#define EV_WRAP_H\n",
(map "#define $_ ((loop)->$_)\n", @syms),
"#else\n",
"#undef EV_WRAP_H\n",
(map "#undef $_\n", @syms),
"#endif\n";
' >ev_wrap.h

7
deps/libev/update_symbols

@ -1,7 +0,0 @@
#!/bin/sh
make ev.o event.o || exit
nm ev.o | perl -ne 'print "$1\n" if /\S+ [A-Z] (\S+)/' > Symbols.ev
nm event.o | perl -ne 'print "$1\n" if /\S+ [A-Z] (\S+)/' > Symbols.event

1
deps/v8/AUTHORS

@ -6,6 +6,7 @@
Google Inc. Google Inc.
Alexander Botero-Lowry <alexbl@FreeBSD.org> Alexander Botero-Lowry <alexbl@FreeBSD.org>
Alexandre Vassalotti <avassalotti@gmail.com>
Craig Schlenter <craig.schlenter@gmail.com> Craig Schlenter <craig.schlenter@gmail.com>
Daniel Andersson <kodandersson@gmail.com> Daniel Andersson <kodandersson@gmail.com>
Daniel James <dnljms@gmail.com> Daniel James <dnljms@gmail.com>

31
deps/v8/ChangeLog

@ -1,3 +1,34 @@
2009-06-29: Version 1.2.10
Improved debugger support.
Fixed bug in exception message reporting (issue 390).
Improved overall performance.
2009-06-23: Version 1.2.9
Improved math performance on ARM.
Fixed profiler name-inference bug.
Fixed handling of shared libraries in the profiler tick processor
scripts.
Fixed handling of tests that time out in the test scripts.
Fixed compilation on MacOS X version 10.4.
Fixed two bugs in the regular expression engine.
Fixed a bug in the string type inference.
Fixed a bug in the handling of 'constant function' properties.
Improved overall performance.
2009-06-16: Version 1.2.8 2009-06-16: Version 1.2.8
Optimized math on ARM platforms. Optimized math on ARM platforms.

8
deps/v8/SConstruct

@ -125,7 +125,7 @@ LIBRARY_FLAGS = {
} }
}, },
'os:macos': { 'os:macos': {
'CCFLAGS': ['-ansi'], 'CCFLAGS': ['-ansi', '-mmacosx-version-min=10.4'],
}, },
'os:freebsd': { 'os:freebsd': {
'CPPPATH' : ['/usr/local/include'], 'CPPPATH' : ['/usr/local/include'],
@ -641,7 +641,7 @@ def GetVersionComponents():
def GetVersion(): def GetVersion():
version_components = GetVersionComponents() version_components = GetVersionComponents()
if version_components[len(version_components) - 1] == '0': if version_components[len(version_components) - 1] == '0':
version_components.pop() version_components.pop()
return '.'.join(version_components) return '.'.join(version_components)
@ -649,10 +649,10 @@ def GetVersion():
def GetSpecificSONAME(): def GetSpecificSONAME():
SONAME_PATTERN = re.compile(r"#define\s+SONAME\s+\"(.*)\"") SONAME_PATTERN = re.compile(r"#define\s+SONAME\s+\"(.*)\"")
source = open(join(root_dir, 'src', 'version.cc')).read() source = open(join(root_dir, 'src', 'version.cc')).read()
match = SONAME_PATTERN.search(source) match = SONAME_PATTERN.search(source)
if match: if match:
return match.group(1).strip() return match.group(1).strip()
else: else:

2
deps/v8/benchmarks/revisions.html

@ -1,7 +1,7 @@
<html> <html>
<head> <head>
<title>V8 Benchmark Suite Revisions</title> <title>V8 Benchmark Suite Revisions</title>
<link type="text/css" rel="stylesheet" href="style.css"></link> <link type="text/css" rel="stylesheet" href="style.css" />
</head> </head>
<body> <body>
<div> <div>

37
deps/v8/benchmarks/run.html

@ -1,5 +1,10 @@
<html> <!DOCTYPE html PUBLIC "-//W3C//DTD HTML 4.01//EN"
"http://www.w3.org/TR/html4/strict.dtd">
<html lang="en">
<head> <head>
<meta http-equiv="Content-Type" content="text/html;charset=iso-8859-1">
<meta http-equiv="Content-Script-Type" content="text/javascript">
<meta http-equiv="Content-Style-Type" content="text/css">
<title>V8 Benchmark Suite</title> <title>V8 Benchmark Suite</title>
<script type="text/javascript" src="base.js"></script> <script type="text/javascript" src="base.js"></script>
<script type="text/javascript" src="richards.js"></script> <script type="text/javascript" src="richards.js"></script>
@ -9,7 +14,7 @@
<script type="text/javascript" src="earley-boyer.js"></script> <script type="text/javascript" src="earley-boyer.js"></script>
<script type="text/javascript" src="regexp.js"></script> <script type="text/javascript" src="regexp.js"></script>
<script type="text/javascript" src="splay.js"></script> <script type="text/javascript" src="splay.js"></script>
<link type="text/css" rel="stylesheet" href="style.css"></link> <link type="text/css" rel="stylesheet" href="style.css" />
<script type="text/javascript"> <script type="text/javascript">
var completed = 0; var completed = 0;
var benchmarks = BenchmarkSuite.CountBenchmarks(); var benchmarks = BenchmarkSuite.CountBenchmarks();
@ -25,12 +30,12 @@ function ShowProgress(name) {
function AddResult(name, result) { function AddResult(name, result) {
var text = name + ': ' + result; var text = name + ': ' + result;
var results = document.getElementById("results"); var results = document.getElementById("results");
results.innerHTML += (text + "<br/>"); results.innerHTML += (text + "<br>");
} }
function AddError(name, error) { function AddError(name, error) {
AddResult(name, '<b>error</b>'); AddResult(name, '<b>error<\/b>');
success = false; success = false;
} }
@ -53,11 +58,11 @@ function Run() {
function Load() { function Load() {
var version = BenchmarkSuite.version; var version = BenchmarkSuite.version;
document.getElementById("version").innerHTML = version; document.getElementById("version").innerHTML = version;
window.setTimeout(Run, 200); setTimeout(Run, 200);
} }
</script> </script>
</head> </head>
<body onLoad="Load()"> <body onload="Load()">
<div> <div>
<div class="title"><h1>V8 Benchmark Suite - version <span id="version">?</span></h1></div> <div class="title"><h1>V8 Benchmark Suite - version <span id="version">?</span></h1></div>
<table> <table>
@ -71,15 +76,15 @@ the individual benchmarks and of a reference system (score
higher scores means better performance: <em>Bigger is better!</em> higher scores means better performance: <em>Bigger is better!</em>
<ul> <ul>
<li><b>Richards</b><br/>OS kernel simulation benchmark, originally written in BCPL by Martin Richards (<i>539 lines</i>).</li> <li><b>Richards</b><br>OS kernel simulation benchmark, originally written in BCPL by Martin Richards (<i>539 lines</i>).</li>
<li><b>DeltaBlue</b><br/>One-way constraint solver, originally written in Smalltalk by John Maloney and Mario Wolczko (<i>880 lines</i>).</li> <li><b>DeltaBlue</b><br>One-way constraint solver, originally written in Smalltalk by John Maloney and Mario Wolczko (<i>880 lines</i>).</li>
<li><b>Crypto</b><br/>Encryption and decryption benchmark based on code by Tom Wu (<i>1698 lines</i>).</li> <li><b>Crypto</b><br>Encryption and decryption benchmark based on code by Tom Wu (<i>1698 lines</i>).</li>
<li><b>RayTrace</b><br/>Ray tracer benchmark based on code by <a href="http://flog.co.nz/">Adam Burmister</a> (<i>935 lines</i>).</li> <li><b>RayTrace</b><br>Ray tracer benchmark based on code by <a href="http://flog.co.nz/">Adam Burmister</a> (<i>935 lines</i>).</li>
<li><b>EarleyBoyer</b><br/>Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (<i>4685 lines</i>).</li> <li><b>EarleyBoyer</b><br>Classic Scheme benchmarks, translated to JavaScript by Florian Loitsch's Scheme2Js compiler (<i>4685 lines</i>).</li>
<li><b>RegExp</b><br/>Regular expression benchmark generated by extracting regular expression operations from 50 of the most popular web pages <li><b>RegExp</b><br>Regular expression benchmark generated by extracting regular expression operations from 50 of the most popular web pages
(<i>1614 lines</i>). (<i>1614 lines</i>).
</li> </li>
<li><b>Splay</b><br/>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>378 lines</i>).</li> <li><b>Splay</b><br>Data manipulation benchmark that deals with splay trees and exercises the automatic memory management subsystem (<i>378 lines</i>).</li>
</ul> </ul>
<p> <p>
@ -92,9 +97,9 @@ the <a href="http://v8.googlecode.com/svn/data/benchmarks/current/revisions.html
</td><td style="text-align: center"> </td><td style="text-align: center">
<div class="run"> <div class="run">
<div id="status" style="text-align: center; margin-top: 50px; font-size: 120%; font-weight: bold;">Starting...</div> <div id="status">Starting...</div>
<div style="text-align: left; margin: 30px 0 0 90px;" id="results"> <div id="results">
<div> </div>
</div> </div>
</td></tr></table> </td></tr></table>

38
deps/v8/benchmarks/style.css

@ -1,11 +1,7 @@
body { hr {
font-family: sans-serif;
}
hr{
border: 1px solid; border: 1px solid;
border-color: #36C; border-color: #36C;
margin: 1em 0 margin: 1em 0;
} }
h1, h2, h3, h4 { h1, h2, h3, h4 {
@ -14,27 +10,17 @@ h1, h2, h3, h4 {
} }
h1 { h1 {
font-size: 190%; font-size: 154%;
height: 1.2em;
}
h2{
font-size: 140%;
height: 1.2em; height: 1.2em;
} }
h3{
font-size: 100%;
}
li{ li {
margin: .3em 0 1em 0; margin: .3em 0 1em 0;
} }
body{ body {
font-family: Helvetica,Arial,sans-serif; font-family: Helvetica,Arial,sans-serif;
font-size: small;
color: #000; color: #000;
background-color: #fff; background-color: #fff;
} }
@ -54,7 +40,7 @@ div.subtitle {
} }
td.contents { td.contents {
text-align: start; text-align: left;
} }
div.run { div.run {
@ -68,3 +54,15 @@ div.run {
background-repeat: no-repeat; background-repeat: no-repeat;
border: 1px solid rgb(51, 102, 204); border: 1px solid rgb(51, 102, 204);
} }
#status {
text-align: center;
margin-top: 50px;
font-size: 120%;
font-weight: bold;
}
#results {
text-align: left;
margin: 30px 0 0 90px;
}

6
deps/v8/include/v8.h

@ -1176,6 +1176,12 @@ class V8EXPORT Array : public Object {
public: public:
uint32_t Length() const; uint32_t Length() const;
/**
* Clones an element at index |index|. Returns an empty
* handle if cloning fails (for any reason).
*/
Local<Object> CloneElementAt(uint32_t index);
static Local<Array> New(int length = 0); static Local<Array> New(int length = 0);
static Array* Cast(Value* obj); static Array* Cast(Value* obj);
private: private:

5
deps/v8/src/accessors.cc

@ -511,7 +511,10 @@ Object* Accessors::FunctionGetArguments(Object* object, void*) {
// If there is an arguments variable in the stack, we return that. // If there is an arguments variable in the stack, we return that.
int index = ScopeInfo<>::StackSlotIndex(frame->code(), int index = ScopeInfo<>::StackSlotIndex(frame->code(),
Heap::arguments_symbol()); Heap::arguments_symbol());
if (index >= 0) return frame->GetExpression(index); if (index >= 0) {
Handle<Object> arguments = Handle<Object>(frame->GetExpression(index));
if (!arguments->IsTheHole()) return *arguments;
}
// If there isn't an arguments variable in the stack, we need to // If there isn't an arguments variable in the stack, we need to
// find the frame that holds the actual arguments passed to the // find the frame that holds the actual arguments passed to the

20
deps/v8/src/api.cc

@ -3012,6 +3012,26 @@ uint32_t v8::Array::Length() const {
} }
Local<Object> Array::CloneElementAt(uint32_t index) {
ON_BAILOUT("v8::Array::CloneElementAt()", return Local<Object>());
i::Handle<i::JSObject> self = Utils::OpenHandle(this);
if (!self->HasFastElements()) {
return Local<Object>();
}
i::FixedArray* elms = self->elements();
i::Object* paragon = elms->get(index);
if (!paragon->IsJSObject()) {
return Local<Object>();
}
i::Handle<i::JSObject> paragon_handle(i::JSObject::cast(paragon));
EXCEPTION_PREAMBLE();
i::Handle<i::JSObject> result = i::Copy(paragon_handle);
has_pending_exception = result.is_null();
EXCEPTION_BAILOUT_CHECK(Local<Object>());
return Utils::ToLocal(result);
}
Local<String> v8::String::NewSymbol(const char* data, int length) { Local<String> v8::String::NewSymbol(const char* data, int length) {
EnsureInitialized("v8::String::NewSymbol()"); EnsureInitialized("v8::String::NewSymbol()");
LOG_API("String::NewSymbol(char)"); LOG_API("String::NewSymbol(char)");

2
deps/v8/src/arm/assembler-arm-inl.h

@ -50,7 +50,7 @@ Condition NegateCondition(Condition cc) {
} }
void RelocInfo::apply(int delta) { void RelocInfo::apply(intptr_t delta) {
if (RelocInfo::IsInternalReference(rmode_)) { if (RelocInfo::IsInternalReference(rmode_)) {
// absolute code pointer inside code object moves with the code object. // absolute code pointer inside code object moves with the code object.
int32_t* p = reinterpret_cast<int32_t*>(pc_); int32_t* p = reinterpret_cast<int32_t*>(pc_);

28
deps/v8/src/arm/builtins-arm.cc

@ -67,6 +67,24 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
__ b(ne, &non_function_call); __ b(ne, &non_function_call);
// Jump to the function-specific construct stub.
__ ldr(r2, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
__ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kConstructStubOffset));
__ add(pc, r2, Operand(Code::kHeaderSize - kHeapObjectTag));
// r0: number of arguments
// r1: called object
__ bind(&non_function_call);
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Enter a construct frame. // Enter a construct frame.
__ EnterConstructFrame(); __ EnterConstructFrame();
@ -177,16 +195,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1)); __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2 - 1));
__ add(sp, sp, Operand(kPointerSize)); __ add(sp, sp, Operand(kPointerSize));
__ Jump(lr); __ Jump(lr);
// r0: number of arguments
// r1: called object
__ bind(&non_function_call);
// Set expected number of arguments to zero (not changing r0).
__ mov(r2, Operand(0));
__ GetBuiltinEntry(r3, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ Jump(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
} }

134
deps/v8/src/arm/codegen-arm.cc

@ -1471,85 +1471,6 @@ void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
} }
int CodeGenerator::FastCaseSwitchMaxOverheadFactor() {
return kFastSwitchMaxOverheadFactor;
}
int CodeGenerator::FastCaseSwitchMinCaseCount() {
return kFastSwitchMinCaseCount;
}
void CodeGenerator::GenerateFastCaseSwitchJumpTable(
SwitchStatement* node,
int min_index,
int range,
Label* default_label,
Vector<Label*> case_targets,
Vector<Label> case_labels) {
VirtualFrame::SpilledScope spilled_scope;
JumpTarget setup_default;
JumpTarget is_smi;
// A non-null default label pointer indicates a default case among
// the case labels. Otherwise we use the break target as a
// "default" for failure to hit the jump table.
JumpTarget* default_target =
(default_label == NULL) ? node->break_target() : &setup_default;
ASSERT(kSmiTag == 0 && kSmiTagSize <= 2);
frame_->EmitPop(r0);
// Test for a Smi value in a HeapNumber.
__ tst(r0, Operand(kSmiTagMask));
is_smi.Branch(eq);
__ CompareObjectType(r0, r1, r1, HEAP_NUMBER_TYPE);
default_target->Branch(ne);
frame_->EmitPush(r0);
frame_->CallRuntime(Runtime::kNumberToSmi, 1);
is_smi.Bind();
if (min_index != 0) {
// Small positive numbers can be immediate operands.
if (min_index < 0) {
// If min_index is Smi::kMinValue, -min_index is not a Smi.
if (Smi::IsValid(-min_index)) {
__ add(r0, r0, Operand(Smi::FromInt(-min_index)));
} else {
__ add(r0, r0, Operand(Smi::FromInt(-min_index - 1)));
__ add(r0, r0, Operand(Smi::FromInt(1)));
}
} else {
__ sub(r0, r0, Operand(Smi::FromInt(min_index)));
}
}
__ tst(r0, Operand(0x80000000 | kSmiTagMask));
default_target->Branch(ne);
__ cmp(r0, Operand(Smi::FromInt(range)));
default_target->Branch(ge);
VirtualFrame* start_frame = new VirtualFrame(frame_);
__ SmiJumpTable(r0, case_targets);
GenerateFastCaseSwitchCases(node, case_labels, start_frame);
// If there was a default case among the case labels, we need to
// emit code to jump to it from the default target used for failure
// to hit the jump table.
if (default_label != NULL) {
if (has_valid_frame()) {
node->break_target()->Jump();
}
setup_default.Bind();
frame_->MergeTo(start_frame);
__ b(default_label);
DeleteFrame();
}
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
}
}
void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) { void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
#ifdef DEBUG #ifdef DEBUG
int original_height = frame_->height(); int original_height = frame_->height();
@ -1560,10 +1481,6 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
node->break_target()->set_direction(JumpTarget::FORWARD_ONLY); node->break_target()->set_direction(JumpTarget::FORWARD_ONLY);
LoadAndSpill(node->tag()); LoadAndSpill(node->tag());
if (TryGenerateFastCaseSwitchStatement(node)) {
ASSERT(!has_valid_frame() || frame_->height() == original_height);
return;
}
JumpTarget next_test; JumpTarget next_test;
JumpTarget fall_through; JumpTarget fall_through;
@ -4728,27 +4645,53 @@ static void HandleBinaryOpSlowCases(MacroAssembler* masm,
// Tries to get a signed int32 out of a double precision floating point heap // Tries to get a signed int32 out of a double precision floating point heap
// number. Rounds towards 0. Only succeeds for doubles that are in the ranges // number. Rounds towards 0. Fastest for doubles that are in the ranges
// -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds // -0x7fffffff to -0x40000000 or 0x40000000 to 0x7fffffff. This corresponds
// almost to the range of signed int32 values that are not Smis. Jumps to the // almost to the range of signed int32 values that are not Smis. Jumps to the
// label if the double isn't in the range it can cope with. // label 'slow' if the double isn't in the range -0x80000000.0 to 0x80000000.0
// (excluding the endpoints).
static void GetInt32(MacroAssembler* masm, static void GetInt32(MacroAssembler* masm,
Register source, Register source,
Register dest, Register dest,
Register scratch, Register scratch,
Register scratch2,
Label* slow) { Label* slow) {
Register scratch2 = dest; Label right_exponent, done;
// Get exponent word. // Get exponent word.
__ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset)); __ ldr(scratch, FieldMemOperand(source, HeapNumber::kExponentOffset));
// Get exponent alone in scratch2. // Get exponent alone in scratch2.
__ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask)); __ and_(scratch2, scratch, Operand(HeapNumber::kExponentMask));
// Load dest with zero. We use this either for the final shift or
// for the answer.
__ mov(dest, Operand(0));
// Check whether the exponent matches a 32 bit signed int that is not a Smi. // Check whether the exponent matches a 32 bit signed int that is not a Smi.
// A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). // A non-Smi integer is 1.xxx * 2^30 so the exponent is 30 (biased). This is
// the exponent that we are fastest at and also the highest exponent we can
// handle here.
const uint32_t non_smi_exponent = const uint32_t non_smi_exponent =
(HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift; (HeapNumber::kExponentBias + 30) << HeapNumber::kExponentShift;
__ cmp(scratch2, Operand(non_smi_exponent)); __ cmp(scratch2, Operand(non_smi_exponent));
// If not, then we go slow. // If we have a match of the int32-but-not-Smi exponent then skip some logic.
__ b(ne, slow); __ b(eq, &right_exponent);
// If the exponent is higher than that then go to slow case. This catches
// numbers that don't fit in a signed int32, infinities and NaNs.
__ b(gt, slow);
// We know the exponent is smaller than 30 (biased). If it is less than
// 0 (biased) then the number is smaller in magnitude than 1.0 * 2^0, ie
// it rounds to zero.
const uint32_t zero_exponent =
(HeapNumber::kExponentBias + 0) << HeapNumber::kExponentShift;
__ sub(scratch2, scratch2, Operand(zero_exponent), SetCC);
// Dest already has a Smi zero.
__ b(lt, &done);
// We have a shifted exponent between 0 and 30 in scratch2.
__ mov(dest, Operand(scratch2, LSR, HeapNumber::kExponentShift));
// We now have the exponent in dest. Subtract from 30 to get
// how much to shift down.
__ rsb(dest, dest, Operand(30));
__ bind(&right_exponent);
// Get the top bits of the mantissa. // Get the top bits of the mantissa.
__ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask)); __ and_(scratch2, scratch, Operand(HeapNumber::kMantissaMask));
// Put back the implicit 1. // Put back the implicit 1.
@ -4760,12 +4703,17 @@ static void GetInt32(MacroAssembler* masm,
__ mov(scratch2, Operand(scratch2, LSL, shift_distance)); __ mov(scratch2, Operand(scratch2, LSL, shift_distance));
// Put sign in zero flag. // Put sign in zero flag.
__ tst(scratch, Operand(HeapNumber::kSignMask)); __ tst(scratch, Operand(HeapNumber::kSignMask));
// Get the second half of the double. // Get the second half of the double. For some exponents we don't actually
// need this because the bits get shifted out again, but it's probably slower
// to test than just to do it.
__ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset)); __ ldr(scratch, FieldMemOperand(source, HeapNumber::kMantissaOffset));
// Shift down 22 bits to get the last 10 bits. // Shift down 22 bits to get the last 10 bits.
__ orr(dest, scratch2, Operand(scratch, LSR, 32 - shift_distance)); __ orr(scratch, scratch2, Operand(scratch, LSR, 32 - shift_distance));
// Move down according to the exponent.
__ mov(dest, Operand(scratch, LSR, dest));
// Fix sign if sign bit was set. // Fix sign if sign bit was set.
__ rsb(dest, dest, Operand(0), LeaveCC, ne); __ rsb(dest, dest, Operand(0), LeaveCC, ne);
__ bind(&done);
} }
@ -4785,7 +4733,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
__ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number. __ b(eq, &r1_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE); __ CompareObjectType(r1, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow); __ b(ne, &slow);
GetInt32(masm, r1, r3, r4, &slow); GetInt32(masm, r1, r3, r4, r5, &slow);
__ jmp(&done_checking_r1); __ jmp(&done_checking_r1);
__ bind(&r1_is_smi); __ bind(&r1_is_smi);
__ mov(r3, Operand(r1, ASR, 1)); __ mov(r3, Operand(r1, ASR, 1));
@ -4795,7 +4743,7 @@ void GenericBinaryOpStub::HandleNonSmiBitwiseOp(MacroAssembler* masm) {
__ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number. __ b(eq, &r0_is_smi); // It's a Smi so don't check it's a heap number.
__ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
__ b(ne, &slow); __ b(ne, &slow);
GetInt32(masm, r0, r2, r4, &slow); GetInt32(masm, r0, r2, r4, r5, &slow);
__ jmp(&done_checking_r0); __ jmp(&done_checking_r0);
__ bind(&r0_is_smi); __ bind(&r0_is_smi);
__ mov(r2, Operand(r0, ASR, 1)); __ mov(r2, Operand(r0, ASR, 1));

53
deps/v8/src/arm/codegen-arm.h

@ -358,59 +358,6 @@ class CodeGenerator: public AstVisitor {
inline void GenerateMathSin(ZoneList<Expression*>* args); inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args); inline void GenerateMathCos(ZoneList<Expression*>* args);
// Methods and constants for fast case switch statement support.
//
// Only allow fast-case switch if the range of labels is at most
// this factor times the number of case labels.
// Value is derived from comparing the size of code generated by the normal
// switch code for Smi-labels to the size of a single pointer. If code
// quality increases this number should be decreased to match.
static const int kFastSwitchMaxOverheadFactor = 10;
// Minimal number of switch cases required before we allow jump-table
// optimization.
static const int kFastSwitchMinCaseCount = 5;
// The limit of the range of a fast-case switch, as a factor of the number
// of cases of the switch. Each platform should return a value that
// is optimal compared to the default code generated for a switch statement
// on that platform.
int FastCaseSwitchMaxOverheadFactor();
// The minimal number of cases in a switch before the fast-case switch
// optimization is enabled. Each platform should return a value that
// is optimal compared to the default code generated for a switch statement
// on that platform.
int FastCaseSwitchMinCaseCount();
// Allocate a jump table and create code to jump through it.
// Should call GenerateFastCaseSwitchCases to generate the code for
// all the cases at the appropriate point.
void GenerateFastCaseSwitchJumpTable(SwitchStatement* node,
int min_index,
int range,
Label* default_label,
Vector<Label*> case_targets,
Vector<Label> case_labels);
// Generate the code for cases for the fast case switch.
// Called by GenerateFastCaseSwitchJumpTable.
void GenerateFastCaseSwitchCases(SwitchStatement* node,
Vector<Label> case_labels,
VirtualFrame* start_frame);
// Fast support for constant-Smi switches.
void GenerateFastCaseSwitchStatement(SwitchStatement* node,
int min_index,
int range,
int default_index);
// Fast support for constant-Smi switches. Tests whether switch statement
// permits optimization and calls GenerateFastCaseSwitch if it does.
// Returns true if the fast-case switch was generated, and false if not.
bool TryGenerateFastCaseSwitchStatement(SwitchStatement* node);
// Methods used to indicate which source code is generated for. Source // Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation // positions are collected by the assembler and emitted with the relocation
// information. // information.

4
deps/v8/src/arm/virtual-frame-arm.h

@ -359,14 +359,14 @@ class VirtualFrame : public ZoneObject {
void EmitPush(Register reg); void EmitPush(Register reg);
// Push an element on the virtual frame. // Push an element on the virtual frame.
void Push(Register reg, StaticType static_type = StaticType()); void Push(Register reg);
void Push(Handle<Object> value); void Push(Handle<Object> value);
void Push(Smi* value) { Push(Handle<Object>(value)); } void Push(Smi* value) { Push(Handle<Object>(value)); }
// Pushing a result invalidates it (its contents become owned by the frame). // Pushing a result invalidates it (its contents become owned by the frame).
void Push(Result* result) { void Push(Result* result) {
if (result->is_register()) { if (result->is_register()) {
Push(result->reg(), result->static_type()); Push(result->reg());
} else { } else {
ASSERT(result->is_constant()); ASSERT(result->is_constant());
Push(result->handle()); Push(result->handle());

10
deps/v8/src/assembler.h

@ -183,7 +183,7 @@ class RelocInfo BASE_EMBEDDED {
intptr_t data() const { return data_; } intptr_t data() const { return data_; }
// Apply a relocation by delta bytes // Apply a relocation by delta bytes
INLINE(void apply(int delta)); INLINE(void apply(intptr_t delta));
// Read/modify the code target in the branch/call instruction // Read/modify the code target in the branch/call instruction
// this relocation applies to; // this relocation applies to;
@ -265,8 +265,12 @@ class RelocInfoWriter BASE_EMBEDDED {
last_pc_ = pc; last_pc_ = pc;
} }
// Max size (bytes) of a written RelocInfo. // Max size (bytes) of a written RelocInfo. Longest encoding is
static const int kMaxSize = 12; // ExtraTag, VariableLengthPCJump, ExtraTag, pc_delta, ExtraTag, data_delta.
// On ia32 and arm this is 1 + 4 + 1 + 1 + 1 + 4 = 12.
// On x64 this is 1 + 4 + 1 + 1 + 1 + 8 == 16;
// Here we use the maximum of the two.
static const int kMaxSize = 16;
private: private:
inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta); inline uint32_t WriteVariableLengthPCJump(uint32_t pc_delta);

2
deps/v8/src/ast.cc

@ -68,7 +68,7 @@ VariableProxy::VariableProxy(Handle<String> name,
// names must be canonicalized for fast equality checks // names must be canonicalized for fast equality checks
ASSERT(name->IsSymbol()); ASSERT(name->IsSymbol());
// at least one access, otherwise no need for a VariableProxy // at least one access, otherwise no need for a VariableProxy
var_uses_.RecordAccess(1); var_uses_.RecordRead(1);
} }

16
deps/v8/src/ast.h

@ -802,13 +802,20 @@ class VariableProxy: public Expression {
Variable* AsVariable() { Variable* AsVariable() {
return this == NULL || var_ == NULL ? NULL : var_->AsVariable(); return this == NULL || var_ == NULL ? NULL : var_->AsVariable();
} }
virtual bool IsValidLeftHandSide() { virtual bool IsValidLeftHandSide() {
return var_ == NULL ? true : var_->IsValidLeftHandSide(); return var_ == NULL ? true : var_->IsValidLeftHandSide();
} }
bool IsVariable(Handle<String> n) { bool IsVariable(Handle<String> n) {
return !is_this() && name().is_identical_to(n); return !is_this() && name().is_identical_to(n);
} }
bool IsArguments() {
Variable* variable = AsVariable();
return (variable == NULL) ? false : variable->is_arguments();
}
// If this assertion fails it means that some code has tried to // If this assertion fails it means that some code has tried to
// treat the special "this" variable as an ordinary variable with // treat the special "this" variable as an ordinary variable with
// the name "this". // the name "this".
@ -890,12 +897,13 @@ class Slot: public Expression {
virtual void Accept(AstVisitor* v); virtual void Accept(AstVisitor* v);
// Type testing & conversion // Type testing & conversion
virtual Slot* AsSlot() { return this; } virtual Slot* AsSlot() { return this; }
// Accessors // Accessors
Variable* var() const { return var_; } Variable* var() const { return var_; }
Type type() const { return type_; } Type type() const { return type_; }
int index() const { return index_; } int index() const { return index_; }
bool is_arguments() const { return var_->is_arguments(); }
private: private:
Variable* var_; Variable* var_;

5
deps/v8/src/bootstrapper.cc

@ -1113,11 +1113,8 @@ bool Genesis::InstallNatives() {
} }
#ifdef V8_HOST_ARCH_64_BIT #ifdef V8_HOST_ARCH_64_BIT
// TODO(X64): Remove these tests when code generation works and is stable. // TODO(X64): Remove this test when code generation works and is stable.
MacroAssembler::ConstructAndTestJSFunction();
CodeGenerator::TestCodeGenerator(); CodeGenerator::TestCodeGenerator();
// TODO(X64): Reenable remaining initialization when code generation works.
return true;
#endif // V8_HOST_ARCH_64_BIT #endif // V8_HOST_ARCH_64_BIT

2
deps/v8/src/builtins.h

@ -51,6 +51,7 @@ namespace internal {
#define BUILTIN_LIST_A(V) \ #define BUILTIN_LIST_A(V) \
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED) \ V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructCall, BUILTIN, UNINITIALIZED) \ V(JSConstructCall, BUILTIN, UNINITIALIZED) \
V(JSConstructStubGeneric, BUILTIN, UNINITIALIZED) \
V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \ V(JSEntryTrampoline, BUILTIN, UNINITIALIZED) \
V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \ V(JSConstructEntryTrampoline, BUILTIN, UNINITIALIZED) \
\ \
@ -210,6 +211,7 @@ class Builtins : public AllStatic {
static void Generate_Adaptor(MacroAssembler* masm, CFunctionId id); static void Generate_Adaptor(MacroAssembler* masm, CFunctionId id);
static void Generate_JSConstructCall(MacroAssembler* masm); static void Generate_JSConstructCall(MacroAssembler* masm);
static void Generate_JSConstructStubGeneric(MacroAssembler* masm);
static void Generate_JSEntryTrampoline(MacroAssembler* masm); static void Generate_JSEntryTrampoline(MacroAssembler* masm);
static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm); static void Generate_JSConstructEntryTrampoline(MacroAssembler* masm);
static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm); static void Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm);

125
deps/v8/src/codegen.cc

@ -225,7 +225,7 @@ Handle<Code> CodeGenerator::MakeCode(FunctionLiteral* flit,
bool CodeGenerator::ShouldGenerateLog(Expression* type) { bool CodeGenerator::ShouldGenerateLog(Expression* type) {
ASSERT(type != NULL); ASSERT(type != NULL);
if (!Logger::IsEnabled()) return false; if (!Logger::is_logging()) return false;
Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle()); Handle<String> name = Handle<String>::cast(type->AsLiteral()->handle());
if (FLAG_log_regexp) { if (FLAG_log_regexp) {
static Vector<const char> kRegexp = CStrVector("regexp"); static Vector<const char> kRegexp = CStrVector("regexp");
@ -472,129 +472,6 @@ bool CodeGenerator::PatchInlineRuntimeEntry(Handle<String> name,
} }
void CodeGenerator::GenerateFastCaseSwitchStatement(SwitchStatement* node,
int min_index,
int range,
int default_index) {
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
// Label pointer per number in range.
SmartPointer<Label*> case_targets(NewArray<Label*>(range));
// Label per switch case.
SmartPointer<Label> case_labels(NewArray<Label>(length));
Label* fail_label =
default_index >= 0 ? &(case_labels[default_index]) : NULL;
// Populate array of label pointers for each number in the range.
// Initally put the failure label everywhere.
for (int i = 0; i < range; i++) {
case_targets[i] = fail_label;
}
// Overwrite with label of a case for the number value of that case.
// (In reverse order, so that if the same label occurs twice, the
// first one wins).
for (int i = length - 1; i >= 0 ; i--) {
CaseClause* clause = cases->at(i);
if (!clause->is_default()) {
Object* label_value = *(clause->label()->AsLiteral()->handle());
int case_value = Smi::cast(label_value)->value();
case_targets[case_value - min_index] = &(case_labels[i]);
}
}
GenerateFastCaseSwitchJumpTable(node,
min_index,
range,
fail_label,
Vector<Label*>(*case_targets, range),
Vector<Label>(*case_labels, length));
}
void CodeGenerator::GenerateFastCaseSwitchCases(
SwitchStatement* node,
Vector<Label> case_labels,
VirtualFrame* start_frame) {
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
for (int i = 0; i < length; i++) {
Comment cmnt(masm(), "[ Case clause");
// We may not have a virtual frame if control flow did not fall
// off the end of the previous case. In that case, use the start
// frame. Otherwise, we have to merge the existing one to the
// start frame as part of the previous case.
if (!has_valid_frame()) {
RegisterFile empty;
SetFrame(new VirtualFrame(start_frame), &empty);
} else {
frame_->MergeTo(start_frame);
}
masm()->bind(&case_labels[i]);
VisitStatements(cases->at(i)->statements());
}
}
bool CodeGenerator::TryGenerateFastCaseSwitchStatement(SwitchStatement* node) {
// TODO(238): Due to issue 238, fast case switches can crash on ARM
// and possibly IA32. They are disabled for now.
// See http://code.google.com/p/v8/issues/detail?id=238
return false;
ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length();
if (length < FastCaseSwitchMinCaseCount()) {
return false;
}
// Test whether fast-case should be used.
int default_index = -1;
int min_index = Smi::kMaxValue;
int max_index = Smi::kMinValue;
for (int i = 0; i < length; i++) {
CaseClause* clause = cases->at(i);
if (clause->is_default()) {
if (default_index >= 0) {
// There is more than one default label. Defer to the normal case
// for error.
return false;
}
default_index = i;
} else {
Expression* label = clause->label();
Literal* literal = label->AsLiteral();
if (literal == NULL) {
return false; // fail fast case
}
Object* value = *(literal->handle());
if (!value->IsSmi()) {
return false;
}
int int_value = Smi::cast(value)->value();
min_index = Min(int_value, min_index);
max_index = Max(int_value, max_index);
}
}
// All labels are known to be Smis.
int range = max_index - min_index + 1; // |min..max| inclusive
if (range / FastCaseSwitchMaxOverheadFactor() > length) {
return false; // range of labels is too sparse
}
// Optimization accepted, generate code.
GenerateFastCaseSwitchStatement(node, min_index, range, default_index);
return true;
}
void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) { void CodeGenerator::CodeForFunctionPosition(FunctionLiteral* fun) {
if (FLAG_debug_info) { if (FLAG_debug_info) {
int pos = fun->start_position(); int pos = fun->start_position();

6
deps/v8/src/codegen.h

@ -61,12 +61,6 @@
// FindInlineRuntimeLUT // FindInlineRuntimeLUT
// CheckForInlineRuntimeCall // CheckForInlineRuntimeCall
// PatchInlineRuntimeEntry // PatchInlineRuntimeEntry
// GenerateFastCaseSwitchStatement
// GenerateFastCaseSwitchCases
// TryGenerateFastCaseSwitchStatement
// GenerateFastCaseSwitchJumpTable
// FastCaseSwitchMinCaseCount
// FastCaseSwitchMaxOverheadFactor
// CodeForFunctionPosition // CodeForFunctionPosition
// CodeForReturnPosition // CodeForReturnPosition
// CodeForStatementPosition // CodeForStatementPosition

360
deps/v8/src/compilation-cache.cc

@ -32,28 +32,123 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
enum {
// The number of script generations tell how many GCs a script can // The number of sub caches covering the different types to cache.
// survive in the compilation cache, before it will be flushed if it static const int kSubCacheCount = 4;
// hasn't been used.
NUMBER_OF_SCRIPT_GENERATIONS = 5, // The number of generations for each sub cache.
static const int kScriptGenerations = 5;
// The compilation cache consists of tables - one for each entry static const int kEvalGlobalGenerations = 2;
// kind plus extras for the script generations. static const int kEvalContextualGenerations = 2;
NUMBER_OF_TABLE_ENTRIES = static const int kRegExpGenerations = 2;
CompilationCache::LAST_ENTRY + NUMBER_OF_SCRIPT_GENERATIONS
// Initial of each compilation cache table allocated.
static const int kInitialCacheSize = 64;
// The compilation cache consists of several generational sub-caches which uses
// this class as a base class. A sub-cache contains a compilation cache tables
// for each generation of the sub-cache. As the same source code string has
// different compiled code for scripts and evals. Internally, we use separate
// sub-caches to avoid getting the wrong kind of result when looking up.
class CompilationSubCache {
public:
explicit CompilationSubCache(int generations): generations_(generations) {
tables_ = NewArray<Object*>(generations);
}
// Get the compilation cache tables for a specific generation.
Handle<CompilationCacheTable> GetTable(int generation);
// Age the sub-cache by evicting the oldest generation and creating a new
// young generation.
void Age();
// GC support.
void Iterate(ObjectVisitor* v);
// Clear this sub-cache evicting all its content.
void Clear();
// Number of generations in this sub-cache.
inline int generations() { return generations_; }
private:
int generations_; // Number of generations.
Object** tables_; // Compilation cache tables - one for each generation.
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationSubCache);
}; };
// Sub-cache for scripts.
class CompilationCacheScript : public CompilationSubCache {
public:
explicit CompilationCacheScript(int generations)
: CompilationSubCache(generations) { }
Handle<JSFunction> Lookup(Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset);
void Put(Handle<String> source, Handle<JSFunction> boilerplate);
private:
bool HasOrigin(Handle<JSFunction> boilerplate,
Handle<Object> name,
int line_offset,
int column_offset);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheScript);
};
// Sub-cache for eval scripts.
class CompilationCacheEval: public CompilationSubCache {
public:
explicit CompilationCacheEval(int generations)
: CompilationSubCache(generations) { }
Handle<JSFunction> Lookup(Handle<String> source, Handle<Context> context);
void Put(Handle<String> source,
Handle<Context> context,
Handle<JSFunction> boilerplate);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheEval);
};
// Sub-cache for regular expressions.
class CompilationCacheRegExp: public CompilationSubCache {
public:
explicit CompilationCacheRegExp(int generations)
: CompilationSubCache(generations) { }
Handle<FixedArray> Lookup(Handle<String> source, JSRegExp::Flags flags);
void Put(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data);
DISALLOW_IMPLICIT_CONSTRUCTORS(CompilationCacheRegExp);
};
// Statically allocate all the sub-caches.
static CompilationCacheScript script(kScriptGenerations);
static CompilationCacheEval eval_global(kEvalGlobalGenerations);
static CompilationCacheEval eval_contextual(kEvalContextualGenerations);
static CompilationCacheRegExp reg_exp(kRegExpGenerations);
static CompilationSubCache* subcaches[kSubCacheCount] =
{&script, &eval_global, &eval_contextual, &reg_exp};
// Current enable state of the compilation cache. // Current enable state of the compilation cache.
static bool enabled = true; static bool enabled = true;
static inline bool IsEnabled() { static inline bool IsEnabled() {
return FLAG_compilation_cache && enabled; return FLAG_compilation_cache && enabled;
} }
// Keep separate tables for the different entry kinds.
static Object* tables[NUMBER_OF_TABLE_ENTRIES] = { 0, };
static Handle<CompilationCacheTable> AllocateTable(int size) { static Handle<CompilationCacheTable> AllocateTable(int size) {
CALL_HEAP_FUNCTION(CompilationCacheTable::Allocate(size), CALL_HEAP_FUNCTION(CompilationCacheTable::Allocate(size),
@ -61,54 +156,40 @@ static Handle<CompilationCacheTable> AllocateTable(int size) {
} }
static Handle<CompilationCacheTable> GetTable(int index) { Handle<CompilationCacheTable> CompilationSubCache::GetTable(int generation) {
ASSERT(index >= 0 && index < NUMBER_OF_TABLE_ENTRIES); ASSERT(generation < generations_);
Handle<CompilationCacheTable> result; Handle<CompilationCacheTable> result;
if (tables[index]->IsUndefined()) { if (tables_[generation]->IsUndefined()) {
static const int kInitialCacheSize = 64;
result = AllocateTable(kInitialCacheSize); result = AllocateTable(kInitialCacheSize);
tables[index] = *result; tables_[generation] = *result;
} else { } else {
CompilationCacheTable* table = CompilationCacheTable::cast(tables[index]); CompilationCacheTable* table =
CompilationCacheTable::cast(tables_[generation]);
result = Handle<CompilationCacheTable>(table); result = Handle<CompilationCacheTable>(table);
} }
return result; return result;
} }
static Handle<JSFunction> Lookup(Handle<String> source, void CompilationSubCache::Age() {
Handle<Context> context, // Age the generations implicitly killing off the oldest.
CompilationCache::Entry entry) { for (int i = generations_ - 1; i > 0; i--) {
// Make sure not to leak the table into the surrounding handle tables_[i] = tables_[i - 1];
// scope. Otherwise, we risk keeping old tables around even after
// having cleared the cache.
Object* result;
{ HandleScope scope;
Handle<CompilationCacheTable> table = GetTable(entry);
result = table->LookupEval(*source, *context);
}
if (result->IsJSFunction()) {
return Handle<JSFunction>(JSFunction::cast(result));
} else {
return Handle<JSFunction>::null();
} }
// Set the first generation as unborn.
tables_[0] = Heap::undefined_value();
} }
static Handle<FixedArray> Lookup(Handle<String> source, void CompilationSubCache::Iterate(ObjectVisitor* v) {
JSRegExp::Flags flags) { v->VisitPointers(&tables_[0], &tables_[generations_]);
// Make sure not to leak the table into the surrounding handle }
// scope. Otherwise, we risk keeping old tables around even after
// having cleared the cache.
Object* result; void CompilationSubCache::Clear() {
{ HandleScope scope; for (int i = 0; i < generations_; i++) {
Handle<CompilationCacheTable> table = GetTable(CompilationCache::REGEXP); tables_[i] = Heap::undefined_value();
result = table->LookupRegExp(*source, flags);
}
if (result->IsFixedArray()) {
return Handle<FixedArray>(FixedArray::cast(result));
} else {
return Handle<FixedArray>::null();
} }
} }
@ -116,10 +197,10 @@ static Handle<FixedArray> Lookup(Handle<String> source,
// We only re-use a cached function for some script source code if the // We only re-use a cached function for some script source code if the
// script originates from the same place. This is to avoid issues // script originates from the same place. This is to avoid issues
// when reporting errors, etc. // when reporting errors, etc.
static bool HasOrigin(Handle<JSFunction> boilerplate, bool CompilationCacheScript::HasOrigin(Handle<JSFunction> boilerplate,
Handle<Object> name, Handle<Object> name,
int line_offset, int line_offset,
int column_offset) { int column_offset) {
Handle<Script> script = Handle<Script> script =
Handle<Script>(Script::cast(boilerplate->shared()->script())); Handle<Script>(Script::cast(boilerplate->shared()->script()));
// If the script name isn't set, the boilerplate script should have // If the script name isn't set, the boilerplate script should have
@ -141,24 +222,17 @@ static bool HasOrigin(Handle<JSFunction> boilerplate,
// be cached in the same script generation. Currently the first use // be cached in the same script generation. Currently the first use
// will be cached, but subsequent code from different source / line // will be cached, but subsequent code from different source / line
// won't. // won't.
Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source, Handle<JSFunction> CompilationCacheScript::Lookup(Handle<String> source,
Handle<Object> name, Handle<Object> name,
int line_offset, int line_offset,
int column_offset) { int column_offset) {
if (!IsEnabled()) {
return Handle<JSFunction>::null();
}
// Use an int for the generation index, so value range propagation
// in gcc 4.3+ won't assume it can only go up to LAST_ENTRY when in
// fact it can go up to SCRIPT + NUMBER_OF_SCRIPT_GENERATIONS.
int generation = SCRIPT;
Object* result = NULL; Object* result = NULL;
int generation;
// Probe the script generation tables. Make sure not to leak handles // Probe the script generation tables. Make sure not to leak handles
// into the caller's handle scope. // into the caller's handle scope.
{ HandleScope scope; { HandleScope scope;
while (generation < SCRIPT + NUMBER_OF_SCRIPT_GENERATIONS) { for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation); Handle<CompilationCacheTable> table = GetTable(generation);
Handle<Object> probe(table->Lookup(*source)); Handle<Object> probe(table->Lookup(*source));
if (probe->IsJSFunction()) { if (probe->IsJSFunction()) {
@ -170,20 +244,18 @@ Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
break; break;
} }
} }
// Go to the next generation.
generation++;
} }
} }
static void* script_histogram = StatsTable::CreateHistogram( static void* script_histogram = StatsTable::CreateHistogram(
"V8.ScriptCache", "V8.ScriptCache",
0, 0,
NUMBER_OF_SCRIPT_GENERATIONS, kScriptGenerations,
NUMBER_OF_SCRIPT_GENERATIONS + 1); kScriptGenerations + 1);
if (script_histogram != NULL) { if (script_histogram != NULL) {
// The level NUMBER_OF_SCRIPT_GENERATIONS is equivalent to a cache miss. // The level NUMBER_OF_SCRIPT_GENERATIONS is equivalent to a cache miss.
StatsTable::AddHistogramSample(script_histogram, generation - SCRIPT); StatsTable::AddHistogramSample(script_histogram, generation);
} }
// Once outside the manacles of the handle scope, we need to recheck // Once outside the manacles of the handle scope, we need to recheck
@ -194,7 +266,7 @@ Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
ASSERT(HasOrigin(boilerplate, name, line_offset, column_offset)); ASSERT(HasOrigin(boilerplate, name, line_offset, column_offset));
// If the script was found in a later generation, we promote it to // If the script was found in a later generation, we promote it to
// the first generation to let it survive longer in the cache. // the first generation to let it survive longer in the cache.
if (generation != SCRIPT) PutScript(source, boilerplate); if (generation != 0) Put(source, boilerplate);
Counters::compilation_cache_hits.Increment(); Counters::compilation_cache_hits.Increment();
return boilerplate; return boilerplate;
} else { } else {
@ -204,19 +276,118 @@ Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
} }
void CompilationCacheScript::Put(Handle<String> source,
Handle<JSFunction> boilerplate) {
HandleScope scope;
ASSERT(boilerplate->IsBoilerplate());
Handle<CompilationCacheTable> table = GetTable(0);
CALL_HEAP_FUNCTION_VOID(table->Put(*source, *boilerplate));
}
Handle<JSFunction> CompilationCacheEval::Lookup(Handle<String> source,
Handle<Context> context) {
// Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after
// having cleared the cache.
Object* result = NULL;
int generation;
{ HandleScope scope;
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
result = table->LookupEval(*source, *context);
if (result->IsJSFunction()) {
break;
}
}
}
if (result->IsJSFunction()) {
Handle<JSFunction> boilerplate(JSFunction::cast(result));
if (generation != 0) {
Put(source, context, boilerplate);
}
Counters::compilation_cache_hits.Increment();
return boilerplate;
} else {
Counters::compilation_cache_misses.Increment();
return Handle<JSFunction>::null();
}
}
void CompilationCacheEval::Put(Handle<String> source,
Handle<Context> context,
Handle<JSFunction> boilerplate) {
HandleScope scope;
ASSERT(boilerplate->IsBoilerplate());
Handle<CompilationCacheTable> table = GetTable(0);
CALL_HEAP_FUNCTION_VOID(table->PutEval(*source, *context, *boilerplate));
}
Handle<FixedArray> CompilationCacheRegExp::Lookup(Handle<String> source,
JSRegExp::Flags flags) {
// Make sure not to leak the table into the surrounding handle
// scope. Otherwise, we risk keeping old tables around even after
// having cleared the cache.
Object* result = NULL;
int generation;
{ HandleScope scope;
for (generation = 0; generation < generations(); generation++) {
Handle<CompilationCacheTable> table = GetTable(generation);
result = table->LookupRegExp(*source, flags);
if (result->IsFixedArray()) {
break;
}
}
}
if (result->IsFixedArray()) {
Handle<FixedArray> data(FixedArray::cast(result));
if (generation != 0) {
Put(source, flags, data);
}
Counters::compilation_cache_hits.Increment();
return data;
} else {
Counters::compilation_cache_misses.Increment();
return Handle<FixedArray>::null();
}
}
void CompilationCacheRegExp::Put(Handle<String> source,
JSRegExp::Flags flags,
Handle<FixedArray> data) {
HandleScope scope;
Handle<CompilationCacheTable> table = GetTable(0);
CALL_HEAP_FUNCTION_VOID(table->PutRegExp(*source, flags, *data));
}
Handle<JSFunction> CompilationCache::LookupScript(Handle<String> source,
Handle<Object> name,
int line_offset,
int column_offset) {
if (!IsEnabled()) {
return Handle<JSFunction>::null();
}
return script.Lookup(source, name, line_offset, column_offset);
}
Handle<JSFunction> CompilationCache::LookupEval(Handle<String> source, Handle<JSFunction> CompilationCache::LookupEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
Entry entry) { bool is_global) {
if (!IsEnabled()) { if (!IsEnabled()) {
return Handle<JSFunction>::null(); return Handle<JSFunction>::null();
} }
ASSERT(entry == EVAL_GLOBAL || entry == EVAL_CONTEXTUAL); Handle<JSFunction> result;
Handle<JSFunction> result = Lookup(source, context, entry); if (is_global) {
if (result.is_null()) { result = eval_global.Lookup(source, context);
Counters::compilation_cache_misses.Increment();
} else { } else {
Counters::compilation_cache_hits.Increment(); result = eval_contextual.Lookup(source, context);
} }
return result; return result;
} }
@ -228,13 +399,7 @@ Handle<FixedArray> CompilationCache::LookupRegExp(Handle<String> source,
return Handle<FixedArray>::null(); return Handle<FixedArray>::null();
} }
Handle<FixedArray> result = Lookup(source, flags); return reg_exp.Lookup(source, flags);
if (result.is_null()) {
Counters::compilation_cache_misses.Increment();
} else {
Counters::compilation_cache_hits.Increment();
}
return result;
} }
@ -244,16 +409,14 @@ void CompilationCache::PutScript(Handle<String> source,
return; return;
} }
HandleScope scope;
ASSERT(boilerplate->IsBoilerplate()); ASSERT(boilerplate->IsBoilerplate());
Handle<CompilationCacheTable> table = GetTable(SCRIPT); script.Put(source, boilerplate);
CALL_HEAP_FUNCTION_VOID(table->Put(*source, *boilerplate));
} }
void CompilationCache::PutEval(Handle<String> source, void CompilationCache::PutEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
Entry entry, bool is_global,
Handle<JSFunction> boilerplate) { Handle<JSFunction> boilerplate) {
if (!IsEnabled()) { if (!IsEnabled()) {
return; return;
@ -261,8 +424,11 @@ void CompilationCache::PutEval(Handle<String> source,
HandleScope scope; HandleScope scope;
ASSERT(boilerplate->IsBoilerplate()); ASSERT(boilerplate->IsBoilerplate());
Handle<CompilationCacheTable> table = GetTable(entry); if (is_global) {
CALL_HEAP_FUNCTION_VOID(table->PutEval(*source, *context, *boilerplate)); eval_global.Put(source, context, boilerplate);
} else {
eval_contextual.Put(source, context, boilerplate);
}
} }
@ -274,31 +440,27 @@ void CompilationCache::PutRegExp(Handle<String> source,
return; return;
} }
HandleScope scope; reg_exp.Put(source, flags, data);
Handle<CompilationCacheTable> table = GetTable(REGEXP);
CALL_HEAP_FUNCTION_VOID(table->PutRegExp(*source, flags, *data));
} }
void CompilationCache::Clear() { void CompilationCache::Clear() {
for (int i = 0; i < NUMBER_OF_TABLE_ENTRIES; i++) { for (int i = 0; i < kSubCacheCount; i++) {
tables[i] = Heap::undefined_value(); subcaches[i]->Clear();
} }
} }
void CompilationCache::Iterate(ObjectVisitor* v) { void CompilationCache::Iterate(ObjectVisitor* v) {
v->VisitPointers(&tables[0], &tables[NUMBER_OF_TABLE_ENTRIES]); for (int i = 0; i < kSubCacheCount; i++) {
subcaches[i]->Iterate(v);
}
} }
void CompilationCache::MarkCompactPrologue() { void CompilationCache::MarkCompactPrologue() {
ASSERT(LAST_ENTRY == SCRIPT); for (int i = 0; i < kSubCacheCount; i++) {
for (int i = NUMBER_OF_TABLE_ENTRIES - 1; i > SCRIPT; i--) { subcaches[i]->Age();
tables[i] = tables[i - 1];
}
for (int j = 0; j <= LAST_ENTRY; j++) {
tables[j] = Heap::undefined_value();
} }
} }

17
deps/v8/src/compilation-cache.h

@ -34,20 +34,9 @@ namespace internal {
// The compilation cache keeps function boilerplates for compiled // The compilation cache keeps function boilerplates for compiled
// scripts and evals. The boilerplates are looked up using the source // scripts and evals. The boilerplates are looked up using the source
// string as the key. // string as the key. For regular expressions the compilation data is cached.
class CompilationCache { class CompilationCache {
public: public:
// The same source code string has different compiled code for
// scripts and evals. Internally, we use separate caches to avoid
// getting the wrong kind of entry when looking up.
enum Entry {
EVAL_GLOBAL,
EVAL_CONTEXTUAL,
REGEXP,
SCRIPT,
LAST_ENTRY = SCRIPT
};
// Finds the script function boilerplate for a source // Finds the script function boilerplate for a source
// string. Returns an empty handle if the cache doesn't contain a // string. Returns an empty handle if the cache doesn't contain a
// script for the given source string with the right origin. // script for the given source string with the right origin.
@ -61,7 +50,7 @@ class CompilationCache {
// contain a script for the given source string. // contain a script for the given source string.
static Handle<JSFunction> LookupEval(Handle<String> source, static Handle<JSFunction> LookupEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
Entry entry); bool is_global);
// Returns the regexp data associated with the given regexp if it // Returns the regexp data associated with the given regexp if it
// is in cache, otherwise an empty handle. // is in cache, otherwise an empty handle.
@ -77,7 +66,7 @@ class CompilationCache {
// with the boilerplate. This may overwrite an existing mapping. // with the boilerplate. This may overwrite an existing mapping.
static void PutEval(Handle<String> source, static void PutEval(Handle<String> source,
Handle<Context> context, Handle<Context> context,
Entry entry, bool is_global,
Handle<JSFunction> boilerplate); Handle<JSFunction> boilerplate);
// Associate the (source, flags) pair to the given regexp data. // Associate the (source, flags) pair to the given regexp data.

16
deps/v8/src/compiler.cc

@ -175,7 +175,7 @@ static Handle<JSFunction> MakeFunction(bool is_global,
#if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT #if defined ENABLE_LOGGING_AND_PROFILING || defined ENABLE_OPROFILE_AGENT
// Log the code generation for the script. Check explicit whether logging is // Log the code generation for the script. Check explicit whether logging is
// to avoid allocating when not required. // to avoid allocating when not required.
if (Logger::IsEnabled() || OProfileAgent::is_enabled()) { if (Logger::is_logging() || OProfileAgent::is_enabled()) {
if (script->name()->IsString()) { if (script->name()->IsString()) {
SmartPointer<char> data = SmartPointer<char> data =
String::cast(script->name())->ToCString(DISALLOW_NULLS); String::cast(script->name())->ToCString(DISALLOW_NULLS);
@ -295,14 +295,11 @@ Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
// The VM is in the COMPILER state until exiting this function. // The VM is in the COMPILER state until exiting this function.
VMState state(COMPILER); VMState state(COMPILER);
CompilationCache::Entry entry = is_global
? CompilationCache::EVAL_GLOBAL
: CompilationCache::EVAL_CONTEXTUAL;
// Do a lookup in the compilation cache; if the entry is not there, // Do a lookup in the compilation cache; if the entry is not there,
// invoke the compiler and add the result to the cache. // invoke the compiler and add the result to the cache.
Handle<JSFunction> result = Handle<JSFunction> result =
CompilationCache::LookupEval(source, context, entry); CompilationCache::LookupEval(source, context, is_global);
if (result.is_null()) { if (result.is_null()) {
// Create a script object describing the script to be compiled. // Create a script object describing the script to be compiled.
Handle<Script> script = Factory::NewScript(source); Handle<Script> script = Factory::NewScript(source);
@ -314,7 +311,7 @@ Handle<JSFunction> Compiler::CompileEval(Handle<String> source,
NULL, NULL,
NULL); NULL);
if (!result.is_null()) { if (!result.is_null()) {
CompilationCache::PutEval(source, context, entry, result); CompilationCache::PutEval(source, context, is_global, result);
} }
} }
@ -376,14 +373,11 @@ bool Compiler::CompileLazy(Handle<SharedFunctionInfo> shared,
// Log the code generation. If source information is available include script // Log the code generation. If source information is available include script
// name and line number. Check explicit whether logging is enabled as finding // name and line number. Check explicit whether logging is enabled as finding
// the line number is not for free. // the line number is not for free.
if (Logger::IsEnabled() || OProfileAgent::is_enabled()) { if (Logger::is_logging() || OProfileAgent::is_enabled()) {
Handle<String> func_name(name->length() > 0 ? Handle<String> func_name(name->length() > 0 ?
*name : shared->inferred_name()); *name : shared->inferred_name());
if (script->name()->IsString()) { if (script->name()->IsString()) {
int line_num = GetScriptLineNumber(script, start_position); int line_num = GetScriptLineNumber(script, start_position) + 1;
if (line_num > 0) {
line_num += script->line_offset()->value() + 1;
}
LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name, LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, *code, *func_name,
String::cast(script->name()), line_num)); String::cast(script->name()), line_num));
OProfileAgent::CreateNativeCodeRegion(*func_name, OProfileAgent::CreateNativeCodeRegion(*func_name,

2
deps/v8/src/contexts.cc

@ -149,7 +149,7 @@ Handle<Object> Context::Lookup(Handle<String> name, ContextLookupFlags flags,
// check parameter locals in context // check parameter locals in context
int param_index = ScopeInfo<>::ParameterIndex(*code, *name); int param_index = ScopeInfo<>::ParameterIndex(*code, *name);
if (param_index >= 0) { if (param_index >= 0) {
// slot found // slot found.
int index = int index =
ScopeInfo<>::ContextSlotIndex(*code, ScopeInfo<>::ContextSlotIndex(*code,
Heap::arguments_shadow_symbol(), Heap::arguments_shadow_symbol(),

2
deps/v8/src/conversions.cc

@ -327,7 +327,7 @@ static double InternalStringToDouble(S* str,
index++; index++;
if (!SubStringEquals(str, index, "Infinity")) if (!SubStringEquals(str, index, "Infinity"))
return JUNK_STRING_VALUE; return JUNK_STRING_VALUE;
result = is_negative ? -INFINITY : INFINITY; result = is_negative ? -V8_INFINITY : V8_INFINITY;
index += 8; index += 8;
} }
} }

5
deps/v8/src/date-delay.js

@ -150,6 +150,8 @@ var DST_offset_cache = {
}; };
// NOTE: The implementation relies on the fact that no time zones have
// more than one daylight savings offset change per month.
function DaylightSavingsOffset(t) { function DaylightSavingsOffset(t) {
// Load the cache object from the builtins object. // Load the cache object from the builtins object.
var cache = DST_offset_cache; var cache = DST_offset_cache;
@ -530,7 +532,8 @@ function GetUTCHoursFrom(aDate) {
function GetFullYearFrom(aDate) { function GetFullYearFrom(aDate) {
var t = GetTimeFrom(aDate); var t = GetTimeFrom(aDate);
if ($isNaN(t)) return t; if ($isNaN(t)) return t;
return YearFromTime(LocalTimeNoCheck(t)); // Ignore the DST offset for year computations.
return YearFromTime(t + local_time_offset);
} }

16
deps/v8/src/debug-delay.js

@ -388,7 +388,7 @@ ScriptBreakPoint.prototype.clear = function () {
function UpdateScriptBreakPoints(script) { function UpdateScriptBreakPoints(script) {
for (var i = 0; i < script_break_points.length; i++) { for (var i = 0; i < script_break_points.length; i++) {
if (script_break_points[i].type() == Debug.ScriptBreakPointType.ScriptName && if (script_break_points[i].type() == Debug.ScriptBreakPointType.ScriptName &&
script_break_points[i].script_name() == script.name) { script_break_points[i].matchesScript(script)) {
script_break_points[i].set(script); script_break_points[i].set(script);
} }
} }
@ -1194,6 +1194,13 @@ DebugCommandProcessor.prototype.processDebugJSONRequest = function(json_request)
throw new Error('Command not specified'); throw new Error('Command not specified');
} }
// TODO(yurys): remove request.arguments.compactFormat check once
// ChromeDevTools are switched to 'inlineRefs'
if (request.arguments && (request.arguments.inlineRefs ||
request.arguments.compactFormat)) {
response.setOption('inlineRefs', true);
}
if (request.command == 'continue') { if (request.command == 'continue') {
this.continueRequest_(request, response); this.continueRequest_(request, response);
} else if (request.command == 'break') { } else if (request.command == 'break') {
@ -1504,9 +1511,6 @@ DebugCommandProcessor.prototype.backtraceRequest_ = function(request, response)
if (from_index < 0 || to_index < 0) { if (from_index < 0 || to_index < 0) {
return response.failed('Invalid frame number'); return response.failed('Invalid frame number');
} }
if (request.arguments.compactFormat) {
response.setOption('compactFormat', true);
}
} }
// Adjust the index. // Adjust the index.
@ -1696,10 +1700,6 @@ DebugCommandProcessor.prototype.lookupRequest_ = function(request, response) {
response.setOption('includeSource', includeSource); response.setOption('includeSource', includeSource);
} }
if (request.arguments.compactFormat) {
response.setOption('compactFormat', true);
}
// Lookup handles. // Lookup handles.
var mirrors = {}; var mirrors = {};
for (var i = 0; i < handles.length; i++) { for (var i = 0; i < handles.length; i++) {

5
deps/v8/src/dtoa-config.c

@ -77,6 +77,11 @@
#define __NO_ISOCEXT #define __NO_ISOCEXT
#endif /* __MINGW32__ */ #endif /* __MINGW32__ */
/* On 64-bit systems, we need to make sure that a Long is only 32 bits. */
#ifdef V8_TARGET_ARCH_X64
#define Long int
#endif /* V8_TARGET_ARCH_X64 */
/* Make sure we use the David M. Gay version of strtod(). On Linux, we /* Make sure we use the David M. Gay version of strtod(). On Linux, we
* cannot use the same name (maybe the function does not have weak * cannot use the same name (maybe the function does not have weak
* linkage?). */ * linkage?). */

2
deps/v8/src/factory.cc

@ -92,8 +92,6 @@ Handle<String> Factory::NewRawTwoByteString(int length,
Handle<String> Factory::NewConsString(Handle<String> first, Handle<String> Factory::NewConsString(Handle<String> first,
Handle<String> second) { Handle<String> second) {
if (first->length() == 0) return second;
if (second->length() == 0) return first;
CALL_HEAP_FUNCTION(Heap::AllocateConsString(*first, *second), String); CALL_HEAP_FUNCTION(Heap::AllocateConsString(*first, *second), String);
} }

49
deps/v8/src/frame-element.h

@ -54,8 +54,7 @@ class FrameElement BASE_EMBEDDED {
// The default constructor creates an invalid frame element. // The default constructor creates an invalid frame element.
FrameElement() { FrameElement() {
value_ = StaticTypeField::encode(StaticType::UNKNOWN_TYPE) value_ = TypeField::encode(INVALID)
| TypeField::encode(INVALID)
| CopiedField::encode(false) | CopiedField::encode(false)
| SyncedField::encode(false) | SyncedField::encode(false)
| DataField::encode(0); | DataField::encode(0);
@ -75,9 +74,8 @@ class FrameElement BASE_EMBEDDED {
// Factory function to construct an in-register frame element. // Factory function to construct an in-register frame element.
static FrameElement RegisterElement(Register reg, static FrameElement RegisterElement(Register reg,
SyncFlag is_synced, SyncFlag is_synced) {
StaticType static_type = StaticType()) { return FrameElement(REGISTER, reg, is_synced);
return FrameElement(REGISTER, reg, is_synced, static_type);
} }
// Factory function to construct a frame element whose value is known at // Factory function to construct a frame element whose value is known at
@ -143,15 +141,6 @@ class FrameElement BASE_EMBEDDED {
return DataField::decode(value_); return DataField::decode(value_);
} }
StaticType static_type() {
return StaticType(StaticTypeField::decode(value_));
}
void set_static_type(StaticType static_type) {
value_ = value_ & ~StaticTypeField::mask();
value_ = value_ | StaticTypeField::encode(static_type.static_type_);
}
bool Equals(FrameElement other) { bool Equals(FrameElement other) {
uint32_t masked_difference = (value_ ^ other.value_) & ~CopiedField::mask(); uint32_t masked_difference = (value_ ^ other.value_) & ~CopiedField::mask();
if (!masked_difference) { if (!masked_difference) {
@ -184,13 +173,8 @@ class FrameElement BASE_EMBEDDED {
if (!other->is_valid()) return other; if (!other->is_valid()) return other;
if (!SameLocation(other)) return NULL; if (!SameLocation(other)) return NULL;
// If either is unsynced, the result is. The result static type is // If either is unsynced, the result is.
// the merge of the static types. It's safe to set it on one of the
// frame elements, and harmless too (because we are only going to
// merge the reaching frames and will ensure that the types are
// coherent, and changing the static type does not emit code).
FrameElement* result = is_synced() ? other : this; FrameElement* result = is_synced() ? other : this;
result->set_static_type(static_type().merge(other->static_type()));
return result; return result;
} }
@ -205,16 +189,7 @@ class FrameElement BASE_EMBEDDED {
// Used to construct memory and register elements. // Used to construct memory and register elements.
FrameElement(Type type, Register reg, SyncFlag is_synced) { FrameElement(Type type, Register reg, SyncFlag is_synced) {
value_ = StaticTypeField::encode(StaticType::UNKNOWN_TYPE) value_ = TypeField::encode(type)
| TypeField::encode(type)
| CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED)
| DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
}
FrameElement(Type type, Register reg, SyncFlag is_synced, StaticType stype) {
value_ = StaticTypeField::encode(stype.static_type_)
| TypeField::encode(type)
| CopiedField::encode(false) | CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED) | SyncedField::encode(is_synced != NOT_SYNCED)
| DataField::encode(reg.code_ > 0 ? reg.code_ : 0); | DataField::encode(reg.code_ > 0 ? reg.code_ : 0);
@ -222,8 +197,7 @@ class FrameElement BASE_EMBEDDED {
// Used to construct constant elements. // Used to construct constant elements.
FrameElement(Handle<Object> value, SyncFlag is_synced) { FrameElement(Handle<Object> value, SyncFlag is_synced) {
value_ = StaticTypeField::encode(StaticType::TypeOf(*value).static_type_) value_ = TypeField::encode(CONSTANT)
| TypeField::encode(CONSTANT)
| CopiedField::encode(false) | CopiedField::encode(false)
| SyncedField::encode(is_synced != NOT_SYNCED) | SyncedField::encode(is_synced != NOT_SYNCED)
| DataField::encode(ConstantList()->length()); | DataField::encode(ConstantList()->length());
@ -248,14 +222,13 @@ class FrameElement BASE_EMBEDDED {
value_ = value_ | DataField::encode(new_reg.code_); value_ = value_ | DataField::encode(new_reg.code_);
} }
// Encode static type, type, copied, synced and data in one 32 bit integer. // Encode type, copied, synced and data in one 32 bit integer.
uint32_t value_; uint32_t value_;
class StaticTypeField: public BitField<StaticType::StaticTypeEnum, 0, 3> {}; class TypeField: public BitField<Type, 0, 3> {};
class TypeField: public BitField<Type, 3, 3> {}; class CopiedField: public BitField<uint32_t, 3, 1> {};
class CopiedField: public BitField<uint32_t, 6, 1> {}; class SyncedField: public BitField<uint32_t, 4, 1> {};
class SyncedField: public BitField<uint32_t, 7, 1> {}; class DataField: public BitField<uint32_t, 5, 32 - 6> {};
class DataField: public BitField<uint32_t, 8, 32 - 9> {};
friend class VirtualFrame; friend class VirtualFrame;
}; };

2
deps/v8/src/globals.h

@ -120,8 +120,10 @@ const int kIntptrSize = sizeof(intptr_t); // NOLINT
#if V8_HOST_ARCH_64_BIT #if V8_HOST_ARCH_64_BIT
const int kPointerSizeLog2 = 3; const int kPointerSizeLog2 = 3;
const intptr_t kIntptrSignBit = V8_INT64_C(0x8000000000000000);
#else #else
const int kPointerSizeLog2 = 2; const int kPointerSizeLog2 = 2;
const intptr_t kIntptrSignBit = 0x80000000;
#endif #endif
const int kObjectAlignmentBits = kPointerSizeLog2; const int kObjectAlignmentBits = kPointerSizeLog2;

22
deps/v8/src/heap-inl.h

@ -34,7 +34,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
int Heap::MaxHeapObjectSize() { int Heap::MaxObjectSizeInPagedSpace() {
return Page::kMaxHeapObjectSize; return Page::kMaxHeapObjectSize;
} }
@ -215,26 +215,6 @@ void Heap::ScavengeObject(HeapObject** p, HeapObject* object) {
} }
Object* Heap::GetKeyedLookupCache() {
if (keyed_lookup_cache()->IsUndefined()) {
Object* obj = LookupCache::Allocate(4);
if (obj->IsFailure()) return obj;
keyed_lookup_cache_ = obj;
}
return keyed_lookup_cache();
}
void Heap::SetKeyedLookupCache(LookupCache* cache) {
keyed_lookup_cache_ = cache;
}
void Heap::ClearKeyedLookupCache() {
keyed_lookup_cache_ = undefined_value();
}
void Heap::SetLastScriptId(Object* last_script_id) { void Heap::SetLastScriptId(Object* last_script_id) {
last_script_id_ = last_script_id; last_script_id_ = last_script_id;
} }

221
deps/v8/src/heap.cc

@ -500,7 +500,9 @@ void Heap::MarkCompact(GCTracer* tracer) {
void Heap::MarkCompactPrologue(bool is_compacting) { void Heap::MarkCompactPrologue(bool is_compacting) {
// At any old GC clear the keyed lookup cache to enable collection of unused // At any old GC clear the keyed lookup cache to enable collection of unused
// maps. // maps.
ClearKeyedLookupCache(); KeyedLookupCache::Clear();
ContextSlotCache::Clear();
DescriptorLookupCache::Clear();
CompilationCache::MarkCompactPrologue(); CompilationCache::MarkCompactPrologue();
@ -629,6 +631,9 @@ void Heap::Scavenge() {
// Implements Cheney's copying algorithm // Implements Cheney's copying algorithm
LOG(ResourceEvent("scavenge", "begin")); LOG(ResourceEvent("scavenge", "begin"));
// Clear descriptor cache.
DescriptorLookupCache::Clear();
// Used for updating survived_since_last_expansion_ at function end. // Used for updating survived_since_last_expansion_ at function end.
int survived_watermark = PromotedSpaceSize(); int survived_watermark = PromotedSpaceSize();
@ -943,17 +948,15 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
// If the object should be promoted, we try to copy it to old space. // If the object should be promoted, we try to copy it to old space.
if (ShouldBePromoted(object->address(), object_size)) { if (ShouldBePromoted(object->address(), object_size)) {
OldSpace* target_space = Heap::TargetSpace(object); Object* result;
ASSERT(target_space == Heap::old_pointer_space_ || if (object_size > MaxObjectSizeInPagedSpace()) {
target_space == Heap::old_data_space_); result = lo_space_->AllocateRawFixedArray(object_size);
Object* result = target_space->AllocateRaw(object_size); if (!result->IsFailure()) {
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
if (target_space == Heap::old_pointer_space_) {
// Save the from-space object pointer and its map pointer at the // Save the from-space object pointer and its map pointer at the
// top of the to space to be swept and copied later. Write the // top of the to space to be swept and copied later. Write the
// forwarding address over the map word of the from-space // forwarding address over the map word of the from-space
// object. // object.
HeapObject* target = HeapObject::cast(result);
promotion_queue.insert(object, first_word.ToMap()); promotion_queue.insert(object, first_word.ToMap());
object->set_map_word(MapWord::FromForwardingAddress(target)); object->set_map_word(MapWord::FromForwardingAddress(target));
@ -964,21 +967,45 @@ void Heap::ScavengeObjectSlow(HeapObject** p, HeapObject* object) {
node->set_size(object_size); node->set_size(object_size);
*p = target; *p = target;
} else { return;
// Objects promoted to the data space can be copied immediately }
// and not revisited---we will never sweep that space for } else {
// pointers and the copied objects do not contain pointers to OldSpace* target_space = Heap::TargetSpace(object);
// new space objects. ASSERT(target_space == Heap::old_pointer_space_ ||
*p = MigrateObject(object, target, object_size); target_space == Heap::old_data_space_);
result = target_space->AllocateRaw(object_size);
if (!result->IsFailure()) {
HeapObject* target = HeapObject::cast(result);
if (target_space == Heap::old_pointer_space_) {
// Save the from-space object pointer and its map pointer at the
// top of the to space to be swept and copied later. Write the
// forwarding address over the map word of the from-space
// object.
promotion_queue.insert(object, first_word.ToMap());
object->set_map_word(MapWord::FromForwardingAddress(target));
// Give the space allocated for the result a proper map by
// treating it as a free list node (not linked into the free
// list).
FreeListNode* node = FreeListNode::FromAddress(target->address());
node->set_size(object_size);
*p = target;
} else {
// Objects promoted to the data space can be copied immediately
// and not revisited---we will never sweep that space for
// pointers and the copied objects do not contain pointers to
// new space objects.
*p = MigrateObject(object, target, object_size);
#ifdef DEBUG #ifdef DEBUG
VerifyNonPointerSpacePointersVisitor v; VerifyNonPointerSpacePointersVisitor v;
(*p)->Iterate(&v); (*p)->Iterate(&v);
#endif #endif
}
return;
} }
return;
} }
} }
// The object should remain in new space or the old space allocation failed. // The object should remain in new space or the old space allocation failed.
Object* result = new_space_.AllocateRaw(object_size); Object* result = new_space_.AllocateRaw(object_size);
// Failed allocation at this point is utterly unexpected. // Failed allocation at this point is utterly unexpected.
@ -1364,7 +1391,13 @@ bool Heap::CreateInitialObjects() {
last_script_id_ = undefined_value(); last_script_id_ = undefined_value();
// Initialize keyed lookup cache. // Initialize keyed lookup cache.
ClearKeyedLookupCache(); KeyedLookupCache::Clear();
// Initialize context slot cache.
ContextSlotCache::Clear();
// Initialize descriptor cache.
DescriptorLookupCache::Clear();
// Initialize compilation cache. // Initialize compilation cache.
CompilationCache::Clear(); CompilationCache::Clear();
@ -1488,6 +1521,8 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
share->set_name(name); share->set_name(name);
Code* illegal = Builtins::builtin(Builtins::Illegal); Code* illegal = Builtins::builtin(Builtins::Illegal);
share->set_code(illegal); share->set_code(illegal);
Code* construct_stub = Builtins::builtin(Builtins::JSConstructStubGeneric);
share->set_construct_stub(construct_stub);
share->set_expected_nof_properties(0); share->set_expected_nof_properties(0);
share->set_length(0); share->set_length(0);
share->set_formal_parameter_count(0); share->set_formal_parameter_count(0);
@ -1501,14 +1536,24 @@ Object* Heap::AllocateSharedFunctionInfo(Object* name) {
} }
Object* Heap::AllocateConsString(String* first, Object* Heap::AllocateConsString(String* first, String* second) {
String* second) {
int first_length = first->length(); int first_length = first->length();
if (first_length == 0) return second;
int second_length = second->length(); int second_length = second->length();
if (second_length == 0) return first;
int length = first_length + second_length; int length = first_length + second_length;
bool is_ascii = first->IsAsciiRepresentation() bool is_ascii = first->IsAsciiRepresentation()
&& second->IsAsciiRepresentation(); && second->IsAsciiRepresentation();
// Make sure that an out of memory exception is thrown if the length
// of the new cons string is too large to fit in a Smi.
if (length > Smi::kMaxValue || length < -0) {
Top::context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
// If the resulting string is small make a flat string. // If the resulting string is small make a flat string.
if (length < String::kMinNonFlatLength) { if (length < String::kMinNonFlatLength) {
ASSERT(first->IsFlat()); ASSERT(first->IsFlat());
@ -1518,8 +1563,12 @@ Object* Heap::AllocateConsString(String* first,
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
// Copy the characters into the new object. // Copy the characters into the new object.
char* dest = SeqAsciiString::cast(result)->GetChars(); char* dest = SeqAsciiString::cast(result)->GetChars();
String::WriteToFlat(first, dest, 0, first_length); // Copy first part.
String::WriteToFlat(second, dest + first_length, 0, second_length); char* src = SeqAsciiString::cast(first)->GetChars();
for (int i = 0; i < first_length; i++) *dest++ = src[i];
// Copy second part.
src = SeqAsciiString::cast(second)->GetChars();
for (int i = 0; i < second_length; i++) *dest++ = src[i];
return result; return result;
} else { } else {
Object* result = AllocateRawTwoByteString(length); Object* result = AllocateRawTwoByteString(length);
@ -1698,7 +1747,7 @@ Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
} }
int size = ByteArray::SizeFor(length); int size = ByteArray::SizeFor(length);
AllocationSpace space = AllocationSpace space =
size > MaxHeapObjectSize() ? LO_SPACE : OLD_DATA_SPACE; size > MaxObjectSizeInPagedSpace() ? LO_SPACE : OLD_DATA_SPACE;
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE); Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
@ -1713,7 +1762,7 @@ Object* Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
Object* Heap::AllocateByteArray(int length) { Object* Heap::AllocateByteArray(int length) {
int size = ByteArray::SizeFor(length); int size = ByteArray::SizeFor(length);
AllocationSpace space = AllocationSpace space =
size > MaxHeapObjectSize() ? LO_SPACE : NEW_SPACE; size > MaxObjectSizeInPagedSpace() ? LO_SPACE : NEW_SPACE;
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE); Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
@ -1748,7 +1797,7 @@ Object* Heap::CreateCode(const CodeDesc& desc,
int obj_size = Code::SizeFor(body_size, sinfo_size); int obj_size = Code::SizeFor(body_size, sinfo_size);
ASSERT(IsAligned(obj_size, Code::kCodeAlignment)); ASSERT(IsAligned(obj_size, Code::kCodeAlignment));
Object* result; Object* result;
if (obj_size > MaxHeapObjectSize()) { if (obj_size > MaxObjectSizeInPagedSpace()) {
result = lo_space_->AllocateRawCode(obj_size); result = lo_space_->AllocateRawCode(obj_size);
} else { } else {
result = code_space_->AllocateRaw(obj_size); result = code_space_->AllocateRaw(obj_size);
@ -1788,7 +1837,7 @@ Object* Heap::CopyCode(Code* code) {
// Allocate an object the same size as the code object. // Allocate an object the same size as the code object.
int obj_size = code->Size(); int obj_size = code->Size();
Object* result; Object* result;
if (obj_size > MaxHeapObjectSize()) { if (obj_size > MaxObjectSizeInPagedSpace()) {
result = lo_space_->AllocateRawCode(obj_size); result = lo_space_->AllocateRawCode(obj_size);
} else { } else {
result = code_space_->AllocateRaw(obj_size); result = code_space_->AllocateRaw(obj_size);
@ -1963,7 +2012,7 @@ Object* Heap::AllocateJSObjectFromMap(Map* map, PretenureFlag pretenure) {
// Allocate the JSObject. // Allocate the JSObject.
AllocationSpace space = AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; (pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
if (map->instance_size() > MaxHeapObjectSize()) space = LO_SPACE; if (map->instance_size() > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
Object* obj = Allocate(map, space); Object* obj = Allocate(map, space);
if (obj->IsFailure()) return obj; if (obj->IsFailure()) return obj;
@ -2250,7 +2299,7 @@ Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
// Allocate string. // Allocate string.
AllocationSpace space = AllocationSpace space =
(size > MaxHeapObjectSize()) ? LO_SPACE : OLD_DATA_SPACE; (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_DATA_SPACE;
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE); Object* result = AllocateRaw(size, space, OLD_DATA_SPACE);
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
@ -2272,13 +2321,16 @@ Object* Heap::AllocateInternalSymbol(unibrow::CharacterStream* buffer,
Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) { Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
int size = SeqAsciiString::SizeFor(length); int size = SeqAsciiString::SizeFor(length);
if (size > MaxHeapObjectSize()) {
space = LO_SPACE;
}
// Use AllocateRaw rather than Allocate because the object's size cannot be Object* result = Failure::OutOfMemoryException();
// determined from the map. if (space == NEW_SPACE) {
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE); result = size <= kMaxObjectSizeInNewSpace
? new_space_.AllocateRaw(size)
: lo_space_->AllocateRawFixedArray(size);
} else {
if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
result = AllocateRaw(size, space, OLD_DATA_SPACE);
}
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
// Determine the map based on the string's length. // Determine the map based on the string's length.
@ -2302,13 +2354,16 @@ Object* Heap::AllocateRawAsciiString(int length, PretenureFlag pretenure) {
Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) { Object* Heap::AllocateRawTwoByteString(int length, PretenureFlag pretenure) {
AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE; AllocationSpace space = (pretenure == TENURED) ? OLD_DATA_SPACE : NEW_SPACE;
int size = SeqTwoByteString::SizeFor(length); int size = SeqTwoByteString::SizeFor(length);
if (size > MaxHeapObjectSize()) {
space = LO_SPACE;
}
// Use AllocateRaw rather than Allocate because the object's size cannot be Object* result = Failure::OutOfMemoryException();
// determined from the map. if (space == NEW_SPACE) {
Object* result = AllocateRaw(size, space, OLD_DATA_SPACE); result = size <= kMaxObjectSizeInNewSpace
? new_space_.AllocateRaw(size)
: lo_space_->AllocateRawFixedArray(size);
} else {
if (size > MaxObjectSizeInPagedSpace()) space = LO_SPACE;
result = AllocateRaw(size, space, OLD_DATA_SPACE);
}
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
// Determine the map based on the string's length. // Determine the map based on the string's length.
@ -2345,9 +2400,9 @@ Object* Heap::AllocateRawFixedArray(int length) {
if (always_allocate()) return AllocateFixedArray(length, NOT_TENURED); if (always_allocate()) return AllocateFixedArray(length, NOT_TENURED);
// Allocate the raw data for a fixed array. // Allocate the raw data for a fixed array.
int size = FixedArray::SizeFor(length); int size = FixedArray::SizeFor(length);
return (size > MaxHeapObjectSize()) return size <= kMaxObjectSizeInNewSpace
? lo_space_->AllocateRawFixedArray(size) ? new_space_.AllocateRaw(size)
: new_space_.AllocateRaw(size); : lo_space_->AllocateRawFixedArray(size);
} }
@ -2395,16 +2450,22 @@ Object* Heap::AllocateFixedArray(int length, PretenureFlag pretenure) {
if (length == 0) return empty_fixed_array(); if (length == 0) return empty_fixed_array();
int size = FixedArray::SizeFor(length); int size = FixedArray::SizeFor(length);
Object* result; Object* result = Failure::OutOfMemoryException();
if (size > MaxHeapObjectSize()) { if (pretenure != TENURED) {
result = lo_space_->AllocateRawFixedArray(size); result = size <= kMaxObjectSizeInNewSpace
} else { ? new_space_.AllocateRaw(size)
AllocationSpace space = : lo_space_->AllocateRawFixedArray(size);
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE; }
result = AllocateRaw(size, space, OLD_POINTER_SPACE); if (result->IsFailure()) {
if (size > MaxObjectSizeInPagedSpace()) {
result = lo_space_->AllocateRawFixedArray(size);
} else {
AllocationSpace space =
(pretenure == TENURED) ? OLD_POINTER_SPACE : NEW_SPACE;
result = AllocateRaw(size, space, OLD_POINTER_SPACE);
}
if (result->IsFailure()) return result;
} }
if (result->IsFailure()) return result;
// Initialize the object. // Initialize the object.
reinterpret_cast<Array*>(result)->set_map(fixed_array_map()); reinterpret_cast<Array*>(result)->set_map(fixed_array_map());
FixedArray* array = FixedArray::cast(result); FixedArray* array = FixedArray::cast(result);
@ -2504,7 +2565,7 @@ STRUCT_LIST(MAKE_CASE)
} }
int size = map->instance_size(); int size = map->instance_size();
AllocationSpace space = AllocationSpace space =
(size > MaxHeapObjectSize()) ? LO_SPACE : OLD_POINTER_SPACE; (size > MaxObjectSizeInPagedSpace()) ? LO_SPACE : OLD_POINTER_SPACE;
Object* result = Heap::Allocate(map, space); Object* result = Heap::Allocate(map, space);
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
Struct::cast(result)->InitializeBody(size); Struct::cast(result)->InitializeBody(size);
@ -3478,6 +3539,58 @@ const char* GCTracer::CollectorString() {
} }
int KeyedLookupCache::Hash(Map* map, String* name) {
// Uses only lower 32 bits if pointers are larger.
uintptr_t addr_hash =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map)) >> 2;
return (addr_hash ^ name->Hash()) % kLength;
}
int KeyedLookupCache::Lookup(Map* map, String* name) {
int index = Hash(map, name);
Key& key = keys_[index];
if ((key.map == map) && key.name->Equals(name)) {
return field_offsets_[index];
}
return -1;
}
void KeyedLookupCache::Update(Map* map, String* name, int field_offset) {
String* symbol;
if (Heap::LookupSymbolIfExists(name, &symbol)) {
int index = Hash(map, symbol);
Key& key = keys_[index];
key.map = map;
key.name = symbol;
field_offsets_[index] = field_offset;
}
}
void KeyedLookupCache::Clear() {
for (int index = 0; index < kLength; index++) keys_[index].map = NULL;
}
KeyedLookupCache::Key KeyedLookupCache::keys_[KeyedLookupCache::kLength];
int KeyedLookupCache::field_offsets_[KeyedLookupCache::kLength];
void DescriptorLookupCache::Clear() {
for (int index = 0; index < kLength; index++) keys_[index].array = NULL;
}
DescriptorLookupCache::Key
DescriptorLookupCache::keys_[DescriptorLookupCache::kLength];
int DescriptorLookupCache::results_[DescriptorLookupCache::kLength];
#ifdef DEBUG #ifdef DEBUG
bool Heap::GarbageCollectionGreedyCheck() { bool Heap::GarbageCollectionGreedyCheck() {
ASSERT(FLAG_gc_greedy); ASSERT(FLAG_gc_greedy);

117
deps/v8/src/heap.h

@ -126,7 +126,6 @@ namespace internal {
V(FixedArray, number_string_cache) \ V(FixedArray, number_string_cache) \
V(FixedArray, single_character_string_cache) \ V(FixedArray, single_character_string_cache) \
V(FixedArray, natives_source_cache) \ V(FixedArray, natives_source_cache) \
V(Object, keyed_lookup_cache) \
V(Object, last_script_id) V(Object, last_script_id)
@ -243,9 +242,8 @@ class Heap : public AllStatic {
// all available bytes. Check MaxHeapObjectSize() instead. // all available bytes. Check MaxHeapObjectSize() instead.
static int Available(); static int Available();
// Returns the maximum object size that heap supports. Objects larger than // Returns the maximum object size in paged space.
// the maximum heap object size are allocated in a large object space. static inline int MaxObjectSizeInPagedSpace();
static inline int MaxHeapObjectSize();
// Returns of size of all objects residing in the heap. // Returns of size of all objects residing in the heap.
static int SizeOfObjects(); static int SizeOfObjects();
@ -446,17 +444,6 @@ class Heap : public AllStatic {
// Allocates a new utility object in the old generation. // Allocates a new utility object in the old generation.
static Object* AllocateStruct(InstanceType type); static Object* AllocateStruct(InstanceType type);
// Initializes a function with a shared part and prototype.
// Returns the function.
// Note: this code was factored out of AllocateFunction such that
// other parts of the VM could use it. Specifically, a function that creates
// instances of type JS_FUNCTION_TYPE benefit from the use of this function.
// Please note this does not perform a garbage collection.
static Object* InitializeFunction(JSFunction* function,
SharedFunctionInfo* shared,
Object* prototype);
// Allocates a function initialized with a shared part. // Allocates a function initialized with a shared part.
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
@ -520,8 +507,7 @@ class Heap : public AllStatic {
// Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation // Returns Failure::RetryAfterGC(requested_bytes, space) if the allocation
// failed. // failed.
// Please note this does not perform a garbage collection. // Please note this does not perform a garbage collection.
static Object* AllocateConsString(String* first, static Object* AllocateConsString(String* first, String* second);
String* second);
// Allocates a new sliced string object which is a slice of an underlying // Allocates a new sliced string object which is a slice of an underlying
// string buffer stretching from the index start (inclusive) to the index // string buffer stretching from the index start (inclusive) to the index
@ -700,11 +686,6 @@ class Heap : public AllStatic {
non_monomorphic_cache_ = value; non_monomorphic_cache_ = value;
} }
// Gets, sets and clears the lookup cache used for keyed access.
static inline Object* GetKeyedLookupCache();
static inline void SetKeyedLookupCache(LookupCache* cache);
static inline void ClearKeyedLookupCache();
// Update the next script id. // Update the next script id.
static inline void SetLastScriptId(Object* last_script_id); static inline void SetLastScriptId(Object* last_script_id);
@ -836,6 +817,8 @@ class Heap : public AllStatic {
static const int kMaxMapSpaceSize = 8*MB; static const int kMaxMapSpaceSize = 8*MB;
static const int kMaxObjectSizeInNewSpace = 256*KB;
static NewSpace new_space_; static NewSpace new_space_;
static OldSpace* old_pointer_space_; static OldSpace* old_pointer_space_;
static OldSpace* old_data_space_; static OldSpace* old_data_space_;
@ -989,7 +972,17 @@ class Heap : public AllStatic {
static void ScavengeObjectSlow(HeapObject** p, HeapObject* object); static void ScavengeObjectSlow(HeapObject** p, HeapObject* object);
// Copy memory from src to dst. // Copy memory from src to dst.
inline static void CopyBlock(Object** dst, Object** src, int byte_size); static inline void CopyBlock(Object** dst, Object** src, int byte_size);
// Initializes a function with a shared part and prototype.
// Returns the function.
// Note: this code was factored out of AllocateFunction such that
// other parts of the VM could use it. Specifically, a function that creates
// instances of type JS_FUNCTION_TYPE benefit from the use of this function.
// Please note this does not perform a garbage collection.
static inline Object* InitializeFunction(JSFunction* function,
SharedFunctionInfo* shared,
Object* prototype);
static const int kInitialSymbolTableSize = 2048; static const int kInitialSymbolTableSize = 2048;
static const int kInitialEvalCacheSize = 64; static const int kInitialEvalCacheSize = 64;
@ -1140,6 +1133,84 @@ class HeapIterator BASE_EMBEDDED {
}; };
// Cache for mapping (map, property name) into field offset.
// Cleared at startup and prior to mark sweep collection.
class KeyedLookupCache {
public:
// Lookup field offset for (map, name). If absent, -1 is returned.
static int Lookup(Map* map, String* name);
// Update an element in the cache.
static void Update(Map* map, String* name, int field_offset);
// Clear the cache.
static void Clear();
private:
inline static int Hash(Map* map, String* name);
static const int kLength = 64;
struct Key {
Map* map;
String* name;
};
static Key keys_[kLength];
static int field_offsets_[kLength];
};
// Cache for mapping (array, property name) into descriptor index.
// The cache contains both positive and negative results.
// Descriptor index equals kNotFound means the property is absent.
// Cleared at startup and prior to any gc.
class DescriptorLookupCache {
public:
// Lookup descriptor index for (map, name).
// If absent, kAbsent is returned.
static int Lookup(DescriptorArray* array, String* name) {
if (!StringShape(name).IsSymbol()) return kAbsent;
int index = Hash(array, name);
Key& key = keys_[index];
if ((key.array == array) && (key.name == name)) return results_[index];
return kAbsent;
}
// Update an element in the cache.
static void Update(DescriptorArray* array, String* name, int result) {
ASSERT(result != kAbsent);
if (StringShape(name).IsSymbol()) {
int index = Hash(array, name);
Key& key = keys_[index];
key.array = array;
key.name = name;
results_[index] = result;
}
}
// Clear the cache.
static void Clear();
static const int kAbsent = -2;
private:
static int Hash(DescriptorArray* array, String* name) {
// Uses only lower 32 bits if pointers are larger.
uintptr_t array_hash =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(array)) >> 2;
uintptr_t name_hash =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(name)) >> 2;
return (array_hash ^ name_hash) % kLength;
}
static const int kLength = 64;
struct Key {
DescriptorArray* array;
String* name;
};
static Key keys_[kLength];
static int results_[kLength];
};
// ---------------------------------------------------------------------------- // ----------------------------------------------------------------------------
// Marking stack for tracing live objects. // Marking stack for tracing live objects.

2
deps/v8/src/ia32/assembler-ia32-inl.h

@ -48,7 +48,7 @@ Condition NegateCondition(Condition cc) {
// The modes possibly affected by apply must be in kApplyMask. // The modes possibly affected by apply must be in kApplyMask.
void RelocInfo::apply(int delta) { void RelocInfo::apply(intptr_t delta) {
if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) { if (rmode_ == RUNTIME_ENTRY || IsCodeTarget(rmode_)) {
int32_t* p = reinterpret_cast<int32_t*>(pc_); int32_t* p = reinterpret_cast<int32_t*>(pc_);
*p -= delta; // relocate entry *p -= delta; // relocate entry

15
deps/v8/src/ia32/assembler-ia32.cc

@ -1417,7 +1417,7 @@ void Assembler::call(const Operand& adr) {
} }
void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) { void Assembler::call(Handle<Code> code, RelocInfo::Mode rmode) {
WriteRecordedPositions(); WriteRecordedPositions();
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
@ -1815,7 +1815,7 @@ void Assembler::fcompp() {
void Assembler::fnstsw_ax() { void Assembler::fnstsw_ax() {
EnsureSpace ensure_space(this); EnsureSpace ensure_space(this);
last_pc_ = pc_; last_pc_ = pc_;
EMIT(0xdF); EMIT(0xDF);
EMIT(0xE0); EMIT(0xE0);
} }
@ -2182,17 +2182,6 @@ void Assembler::RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data) {
} }
void Assembler::WriteInternalReference(int position, const Label& bound_label) {
ASSERT(bound_label.is_bound());
ASSERT(0 <= position);
ASSERT(position + static_cast<int>(sizeof(uint32_t)) <= pc_offset());
ASSERT(long_at(position) == 0); // only initialize once!
uint32_t label_loc = reinterpret_cast<uint32_t>(addr_at(bound_label.pos()));
long_at_put(position, label_loc);
}
#ifdef GENERATED_CODE_COVERAGE #ifdef GENERATED_CODE_COVERAGE
static FILE* coverage_log = NULL; static FILE* coverage_log = NULL;

16
deps/v8/src/ia32/assembler-ia32.h

@ -396,10 +396,15 @@ class CpuFeatures : public AllStatic {
class Assembler : public Malloced { class Assembler : public Malloced {
private: private:
// The relocation writer's position is kGap bytes below the end of // We check before assembling an instruction that there is sufficient
// space to write an instruction and its relocation information.
// The relocation writer's position must be kGap bytes above the end of
// the generated instructions. This leaves enough space for the // the generated instructions. This leaves enough space for the
// longest possible ia32 instruction (17 bytes as of 9/26/06) and // longest possible ia32 instruction, 15 bytes, and the longest possible
// allows for a single, fast space check per instruction. // relocation information encoding, RelocInfoWriter::kMaxLength == 16.
// (There is a 15 byte limit on ia32 instruction length that rules out some
// otherwise valid instructions.)
// This allows for a single, fast space check per instruction.
static const int kGap = 32; static const int kGap = 32;
public: public:
@ -731,11 +736,6 @@ class Assembler : public Malloced {
// Used for inline tables, e.g., jump-tables. // Used for inline tables, e.g., jump-tables.
void dd(uint32_t data, RelocInfo::Mode reloc_info); void dd(uint32_t data, RelocInfo::Mode reloc_info);
// Writes the absolute address of a bound label at the given position in
// the generated code. That positions should have the relocation mode
// internal_reference!
void WriteInternalReference(int position, const Label& bound_label);
int pc_offset() const { return pc_ - buffer_; } int pc_offset() const { return pc_ - buffer_; }
int current_statement_position() const { return current_statement_position_; } int current_statement_position() const { return current_statement_position_; }
int current_position() const { return current_position_; } int current_position() const { return current_position_; }

33
deps/v8/src/ia32/builtins-ia32.cc

@ -63,6 +63,25 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx); __ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
__ j(not_equal, &non_function_call); __ j(not_equal, &non_function_call);
// Jump to the function-specific construct stub.
__ mov(ebx, FieldOperand(edi, JSFunction::kSharedFunctionInfoOffset));
__ mov(ebx, FieldOperand(ebx, SharedFunctionInfo::kConstructStubOffset));
__ lea(ebx, FieldOperand(ebx, Code::kHeaderSize));
__ jmp(Operand(ebx));
// edi: called object
// eax: number of arguments
__ bind(&non_function_call);
// Set expected number of arguments to zero (not changing eax).
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
}
void Builtins::Generate_JSConstructStubGeneric(MacroAssembler* masm) {
// Enter a construct frame. // Enter a construct frame.
__ EnterConstructFrame(); __ EnterConstructFrame();
@ -113,7 +132,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// Make sure that the maximum heap object size will never cause us // Make sure that the maximum heap object size will never cause us
// problem here, because it is always greater than the maximum // problem here, because it is always greater than the maximum
// instance size that can be represented in a byte. // instance size that can be represented in a byte.
ASSERT(Heap::MaxHeapObjectSize() >= (1 << kBitsPerByte)); ASSERT(Heap::MaxObjectSizeInPagedSpace() >= (1 << kBitsPerByte));
ExternalReference new_space_allocation_top = ExternalReference new_space_allocation_top =
ExternalReference::new_space_allocation_top_address(); ExternalReference::new_space_allocation_top_address();
__ mov(ebx, Operand::StaticVariable(new_space_allocation_top)); __ mov(ebx, Operand::StaticVariable(new_space_allocation_top));
@ -175,7 +194,7 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
// ebx: JSObject // ebx: JSObject
// edi: start of next object (will be start of FixedArray) // edi: start of next object (will be start of FixedArray)
// edx: number of elements in properties array // edx: number of elements in properties array
ASSERT(Heap::MaxHeapObjectSize() > ASSERT(Heap::MaxObjectSizeInPagedSpace() >
(FixedArray::kHeaderSize + 255*kPointerSize)); (FixedArray::kHeaderSize + 255*kPointerSize));
__ lea(ecx, Operand(edi, edx, times_4, FixedArray::kHeaderSize)); __ lea(ecx, Operand(edi, edx, times_4, FixedArray::kHeaderSize));
__ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit)); __ cmp(ecx, Operand::StaticVariable(new_space_allocation_limit));
@ -305,16 +324,6 @@ void Builtins::Generate_JSConstructCall(MacroAssembler* masm) {
__ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver __ lea(esp, Operand(esp, ebx, times_2, 1 * kPointerSize)); // 1 ~ receiver
__ push(ecx); __ push(ecx);
__ ret(0); __ ret(0);
// edi: called object
// eax: number of arguments
__ bind(&non_function_call);
// Set expected number of arguments to zero (not changing eax).
__ Set(ebx, Immediate(0));
__ GetBuiltinEntry(edx, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR);
__ jmp(Handle<Code>(builtin(ArgumentsAdaptorTrampoline)),
RelocInfo::CODE_TARGET);
} }

625
deps/v8/src/ia32/codegen-ia32.cc

@ -175,18 +175,7 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
function_return_.set_direction(JumpTarget::BIDIRECTIONAL); function_return_.set_direction(JumpTarget::BIDIRECTIONAL);
function_return_is_shadowed_ = false; function_return_is_shadowed_ = false;
// Allocate the arguments object and copy the parameters into it. // Allocate the local context if needed.
if (scope_->arguments() != NULL) {
ASSERT(scope_->arguments_shadow() != NULL);
Comment cmnt(masm_, "[ Allocate arguments object");
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
frame_->PushFunction();
frame_->PushReceiverSlotAddress();
frame_->Push(Smi::FromInt(scope_->num_parameters()));
Result answer = frame_->CallStub(&stub, 3);
frame_->Push(&answer);
}
if (scope_->num_heap_slots() > 0) { if (scope_->num_heap_slots() > 0) {
Comment cmnt(masm_, "[ allocate local context"); Comment cmnt(masm_, "[ allocate local context");
// Allocate local context. // Allocate local context.
@ -247,27 +236,11 @@ void CodeGenerator::GenCode(FunctionLiteral* fun) {
} }
} }
// This section stores the pointer to the arguments object that
// was allocated and copied into above. If the address was not
// saved to TOS, we push ecx onto the stack.
//
// Store the arguments object. This must happen after context // Store the arguments object. This must happen after context
// initialization because the arguments object may be stored in the // initialization because the arguments object may be stored in
// context. // the context.
if (scope_->arguments() != NULL) { if (ArgumentsMode() != NO_ARGUMENTS_ALLOCATION) {
Comment cmnt(masm_, "[ store arguments object"); StoreArgumentsObject(true);
{ Reference shadow_ref(this, scope_->arguments_shadow());
ASSERT(shadow_ref.is_slot());
{ Reference arguments_ref(this, scope_->arguments());
ASSERT(arguments_ref.is_slot());
// Here we rely on the convenient property that references to slot
// take up zero space in the frame (ie, it doesn't matter that the
// stored value is actually below the reference on the frame).
arguments_ref.SetValue(NOT_CONST_INIT);
}
shadow_ref.SetValue(NOT_CONST_INIT);
}
frame_->Drop(); // Value is no longer needed.
} }
// Generate code to 'execute' declarations and initialize functions // Generate code to 'execute' declarations and initialize functions
@ -591,6 +564,71 @@ void CodeGenerator::LoadTypeofExpression(Expression* x) {
} }
ArgumentsAllocationMode CodeGenerator::ArgumentsMode() const {
if (scope_->arguments() == NULL) return NO_ARGUMENTS_ALLOCATION;
ASSERT(scope_->arguments_shadow() != NULL);
// We don't want to do lazy arguments allocation for functions that
// have heap-allocated contexts, because it interfers with the
// uninitialized const tracking in the context objects.
return (scope_->num_heap_slots() > 0)
? EAGER_ARGUMENTS_ALLOCATION
: LAZY_ARGUMENTS_ALLOCATION;
}
Result CodeGenerator::StoreArgumentsObject(bool initial) {
ArgumentsAllocationMode mode = ArgumentsMode();
ASSERT(mode != NO_ARGUMENTS_ALLOCATION);
Comment cmnt(masm_, "[ store arguments object");
if (mode == LAZY_ARGUMENTS_ALLOCATION && initial) {
// When using lazy arguments allocation, we store the hole value
// as a sentinel indicating that the arguments object hasn't been
// allocated yet.
frame_->Push(Factory::the_hole_value());
} else {
ArgumentsAccessStub stub(ArgumentsAccessStub::NEW_OBJECT);
frame_->PushFunction();
frame_->PushReceiverSlotAddress();
frame_->Push(Smi::FromInt(scope_->num_parameters()));
Result result = frame_->CallStub(&stub, 3);
frame_->Push(&result);
}
{ Reference shadow_ref(this, scope_->arguments_shadow());
Reference arguments_ref(this, scope_->arguments());
ASSERT(shadow_ref.is_slot() && arguments_ref.is_slot());
// Here we rely on the convenient property that references to slot
// take up zero space in the frame (ie, it doesn't matter that the
// stored value is actually below the reference on the frame).
JumpTarget done;
bool skip_arguments = false;
if (mode == LAZY_ARGUMENTS_ALLOCATION && !initial) {
// We have to skip storing into the arguments slot if it has
// already been written to. This can happen if the a function
// has a local variable named 'arguments'.
LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
Result arguments = frame_->Pop();
if (arguments.is_constant()) {
// We have to skip updating the arguments object if it has
// been assigned a proper value.
skip_arguments = !arguments.handle()->IsTheHole();
} else {
__ cmp(Operand(arguments.reg()), Immediate(Factory::the_hole_value()));
arguments.Unuse();
done.Branch(not_equal);
}
}
if (!skip_arguments) {
arguments_ref.SetValue(NOT_CONST_INIT);
if (mode == LAZY_ARGUMENTS_ALLOCATION) done.Bind();
}
shadow_ref.SetValue(NOT_CONST_INIT);
}
return frame_->Pop();
}
Reference::Reference(CodeGenerator* cgen, Expression* expression) Reference::Reference(CodeGenerator* cgen, Expression* expression)
: cgen_(cgen), expression_(expression), type_(ILLEGAL) { : cgen_(cgen), expression_(expression), type_(ILLEGAL) {
cgen->LoadReference(this); cgen->LoadReference(this);
@ -881,15 +919,15 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
Result left = frame_->Pop(); Result left = frame_->Pop();
if (op == Token::ADD) { if (op == Token::ADD) {
bool left_is_string = left.static_type().is_jsstring(); bool left_is_string = left.is_constant() && left.handle()->IsString();
bool right_is_string = right.static_type().is_jsstring(); bool right_is_string = right.is_constant() && right.handle()->IsString();
if (left_is_string || right_is_string) { if (left_is_string || right_is_string) {
frame_->Push(&left); frame_->Push(&left);
frame_->Push(&right); frame_->Push(&right);
Result answer; Result answer;
if (left_is_string) { if (left_is_string) {
if (right_is_string) { if (right_is_string) {
// TODO(lrn): if (left.is_constant() && right.is_constant()) // TODO(lrn): if both are constant strings
// -- do a compile time cons, if allocation during codegen is allowed. // -- do a compile time cons, if allocation during codegen is allowed.
answer = frame_->CallRuntime(Runtime::kStringAdd, 2); answer = frame_->CallRuntime(Runtime::kStringAdd, 2);
} else { } else {
@ -900,7 +938,6 @@ void CodeGenerator::GenericBinaryOperation(Token::Value op,
answer = answer =
frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2); frame_->InvokeBuiltin(Builtins::STRING_ADD_RIGHT, CALL_FUNCTION, 2);
} }
answer.set_static_type(StaticType::jsstring());
frame_->Push(&answer); frame_->Push(&answer);
return; return;
} }
@ -1387,7 +1424,11 @@ class DeferredInlineSmiOperation: public DeferredCode {
void DeferredInlineSmiOperation::Generate() { void DeferredInlineSmiOperation::Generate() {
__ push(src_); __ push(src_);
__ push(Immediate(value_)); __ push(Immediate(value_));
GenericBinaryOpStub stub(op_, overwrite_mode_, SMI_CODE_INLINED); // For mod we don't generate all the Smi code inline.
GenericBinaryOpStub stub(
op_,
overwrite_mode_,
(op_ == Token::MOD) ? SMI_CODE_IN_STUB : SMI_CODE_INLINED);
__ CallStub(&stub); __ CallStub(&stub);
if (!dst_.is(eax)) __ mov(dst_, eax); if (!dst_.is(eax)) __ mov(dst_, eax);
} }
@ -1772,6 +1813,33 @@ void CodeGenerator::ConstantSmiBinaryOperation(Token::Value op,
break; break;
} }
// Generate inline code for mod of powers of 2 and negative powers of 2.
case Token::MOD:
if (!reversed &&
int_value != 0 &&
(IsPowerOf2(int_value) || IsPowerOf2(-int_value))) {
operand->ToRegister();
frame_->Spill(operand->reg());
DeferredCode* deferred = new DeferredInlineSmiOperation(op,
operand->reg(),
operand->reg(),
smi_value,
overwrite_mode);
// Check for negative or non-Smi left hand side.
__ test(operand->reg(), Immediate(kSmiTagMask | 0x80000000));
deferred->Branch(not_zero);
if (int_value < 0) int_value = -int_value;
if (int_value == 1) {
__ mov(operand->reg(), Immediate(Smi::FromInt(0)));
} else {
__ and_(operand->reg(), (int_value << kSmiTagSize) - 1);
}
deferred->BindExit();
frame_->Push(operand);
break;
}
// Fall through if we did not find a power of 2 on the right hand side!
default: { default: {
Result constant_operand(value); Result constant_operand(value);
if (reversed) { if (reversed) {
@ -1806,6 +1874,12 @@ class CompareStub: public CodeStub {
return (static_cast<int>(cc_) << 1) | (strict_ ? 1 : 0); return (static_cast<int>(cc_) << 1) | (strict_ ? 1 : 0);
} }
// Branch to the label if the given object isn't a symbol.
void BranchIfNonSymbol(MacroAssembler* masm,
Label* label,
Register object,
Register scratch);
#ifdef DEBUG #ifdef DEBUG
void Print() { void Print() {
PrintF("CompareStub (cc %d), (strict %s)\n", PrintF("CompareStub (cc %d), (strict %s)\n",
@ -2053,6 +2127,176 @@ void CodeGenerator::CallWithArguments(ZoneList<Expression*>* args,
} }
void CodeGenerator::CallApplyLazy(Property* apply,
Expression* receiver,
VariableProxy* arguments,
int position) {
ASSERT(ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION);
ASSERT(arguments->IsArguments());
JumpTarget slow, done;
// Load the apply function onto the stack. This will usually
// give us a megamorphic load site. Not super, but it works.
Reference ref(this, apply);
ref.GetValue(NOT_INSIDE_TYPEOF);
ASSERT(ref.type() == Reference::NAMED);
// Load the receiver and the existing arguments object onto the
// expression stack. Avoid allocating the arguments object here.
Load(receiver);
LoadFromSlot(scope_->arguments()->var()->slot(), NOT_INSIDE_TYPEOF);
// Emit the source position information after having loaded the
// receiver and the arguments.
CodeForSourcePosition(position);
// Check if the arguments object has been lazily allocated
// already. If so, just use that instead of copying the arguments
// from the stack. This also deals with cases where a local variable
// named 'arguments' has been introduced.
frame_->Dup();
Result probe = frame_->Pop();
bool try_lazy = true;
if (probe.is_constant()) {
try_lazy = probe.handle()->IsTheHole();
} else {
__ cmp(Operand(probe.reg()), Immediate(Factory::the_hole_value()));
probe.Unuse();
slow.Branch(not_equal);
}
if (try_lazy) {
JumpTarget build_args;
// Get rid of the arguments object probe.
frame_->Drop();
// Before messing with the execution stack, we sync all
// elements. This is bound to happen anyway because we're
// about to call a function.
frame_->SyncRange(0, frame_->element_count() - 1);
// Check that the receiver really is a JavaScript object.
{ frame_->PushElementAt(0);
Result receiver = frame_->Pop();
receiver.ToRegister();
__ test(receiver.reg(), Immediate(kSmiTagMask));
build_args.Branch(zero);
Result tmp = allocator_->Allocate();
// We allow all JSObjects including JSFunctions. As long as
// JS_FUNCTION_TYPE is the last instance type and it is right
// after LAST_JS_OBJECT_TYPE, we do not have to check the upper
// bound.
ASSERT(LAST_TYPE == JS_FUNCTION_TYPE);
ASSERT(JS_FUNCTION_TYPE == LAST_JS_OBJECT_TYPE + 1);
__ CmpObjectType(receiver.reg(), FIRST_JS_OBJECT_TYPE, tmp.reg());
build_args.Branch(less);
}
// Verify that we're invoking Function.prototype.apply.
{ frame_->PushElementAt(1);
Result apply = frame_->Pop();
apply.ToRegister();
__ test(apply.reg(), Immediate(kSmiTagMask));
build_args.Branch(zero);
Result tmp = allocator_->Allocate();
__ CmpObjectType(apply.reg(), JS_FUNCTION_TYPE, tmp.reg());
build_args.Branch(not_equal);
__ mov(tmp.reg(),
FieldOperand(apply.reg(), JSFunction::kSharedFunctionInfoOffset));
Handle<Code> apply_code(Builtins::builtin(Builtins::FunctionApply));
__ cmp(FieldOperand(tmp.reg(), SharedFunctionInfo::kCodeOffset),
Immediate(apply_code));
build_args.Branch(not_equal);
}
// Get the function receiver from the stack. Check that it
// really is a function.
__ mov(edi, Operand(esp, 2 * kPointerSize));
__ test(edi, Immediate(kSmiTagMask));
build_args.Branch(zero);
__ CmpObjectType(edi, JS_FUNCTION_TYPE, ecx);
build_args.Branch(not_equal);
// Copy the arguments to this function possibly from the
// adaptor frame below it.
Label invoke, adapted;
__ mov(edx, Operand(ebp, StandardFrameConstants::kCallerFPOffset));
__ mov(ecx, Operand(edx, StandardFrameConstants::kContextOffset));
__ cmp(ecx, ArgumentsAdaptorFrame::SENTINEL);
__ j(equal, &adapted);
// No arguments adaptor frame. Copy fixed number of arguments.
__ mov(eax, Immediate(scope_->num_parameters()));
for (int i = 0; i < scope_->num_parameters(); i++) {
__ push(frame_->ParameterAt(i));
}
__ jmp(&invoke);
// Arguments adaptor frame present. Copy arguments from there, but
// avoid copying too many arguments to avoid stack overflows.
__ bind(&adapted);
static const uint32_t kArgumentsLimit = 1 * KB;
__ mov(eax, Operand(edx, ArgumentsAdaptorFrameConstants::kLengthOffset));
__ shr(eax, kSmiTagSize);
__ mov(ecx, Operand(eax));
__ cmp(eax, kArgumentsLimit);
build_args.Branch(above);
// Loop through the arguments pushing them onto the execution
// stack. We don't inform the virtual frame of the push, so we don't
// have to worry about getting rid of the elements from the virtual
// frame.
Label loop;
__ bind(&loop);
__ test(ecx, Operand(ecx));
__ j(zero, &invoke);
__ push(Operand(edx, ecx, times_4, 1 * kPointerSize));
__ dec(ecx);
__ jmp(&loop);
// Invoke the function. The virtual frame knows about the receiver
// so make sure to forget that explicitly.
__ bind(&invoke);
ParameterCount actual(eax);
__ InvokeFunction(edi, actual, CALL_FUNCTION);
frame_->Forget(1);
Result result = allocator()->Allocate(eax);
frame_->SetElementAt(0, &result);
done.Jump();
// Slow-case: Allocate the arguments object since we know it isn't
// there, and fall-through to the slow-case where we call
// Function.prototype.apply.
build_args.Bind();
Result arguments_object = StoreArgumentsObject(false);
frame_->Push(&arguments_object);
slow.Bind();
}
// Flip the apply function and the function to call on the stack, so
// the function looks like the receiver of the apply call. This way,
// the generic Function.prototype.apply implementation can deal with
// the call like it usually does.
Result a2 = frame_->Pop();
Result a1 = frame_->Pop();
Result ap = frame_->Pop();
Result fn = frame_->Pop();
frame_->Push(&ap);
frame_->Push(&fn);
frame_->Push(&a1);
frame_->Push(&a2);
CallFunctionStub call_function(2, NOT_IN_LOOP);
Result res = frame_->CallStub(&call_function, 3);
frame_->Push(&res);
// All done. Restore context register after call.
if (try_lazy) done.Bind();
frame_->RestoreContextRegister();
}
class DeferredStackCheck: public DeferredCode { class DeferredStackCheck: public DeferredCode {
public: public:
DeferredStackCheck() { DeferredStackCheck() {
@ -2420,131 +2664,6 @@ void CodeGenerator::VisitWithExitStatement(WithExitStatement* node) {
} }
int CodeGenerator::FastCaseSwitchMaxOverheadFactor() {
return kFastSwitchMaxOverheadFactor;
}
int CodeGenerator::FastCaseSwitchMinCaseCount() {
return kFastSwitchMinCaseCount;
}
// Generate a computed jump to a switch case.
void CodeGenerator::GenerateFastCaseSwitchJumpTable(
SwitchStatement* node,
int min_index,
int range,
Label* default_label,
Vector<Label*> case_targets,
Vector<Label> case_labels) {
// Notice: Internal references, used by both the jmp instruction and
// the table entries, need to be relocated if the buffer grows. This
// prevents the forward use of Labels, since a displacement cannot
// survive relocation, and it also cannot safely be distinguished
// from a real address. Instead we put in zero-values as
// placeholders, and fill in the addresses after the labels have been
// bound.
JumpTarget setup_default;
JumpTarget is_smi;
// A non-null default label pointer indicates a default case among
// the case labels. Otherwise we use the break target as a
// "default".
JumpTarget* default_target =
(default_label == NULL) ? node->break_target() : &setup_default;
// Test whether input is a smi.
ASSERT(kSmiTagSize == 1 && kSmiTag == 0);
Result switch_value = frame_->Pop();
switch_value.ToRegister();
__ test(switch_value.reg(), Immediate(kSmiTagMask));
is_smi.Branch(equal, &switch_value, taken);
// It's a heap object, not a smi or a failure. Check if it is a
// heap number.
Result temp = allocator()->Allocate();
ASSERT(temp.is_valid());
__ CmpObjectType(switch_value.reg(), HEAP_NUMBER_TYPE, temp.reg());
temp.Unuse();
default_target->Branch(not_equal);
// The switch value is a heap number. Convert it to a smi.
frame_->Push(&switch_value);
Result smi_value = frame_->CallRuntime(Runtime::kNumberToSmi, 1);
is_smi.Bind(&smi_value);
smi_value.ToRegister();
// Convert the switch value to a 0-based table index.
if (min_index != 0) {
frame_->Spill(smi_value.reg());
__ sub(Operand(smi_value.reg()), Immediate(min_index << kSmiTagSize));
}
// Go to the default case if the table index is negative or not a smi.
__ test(smi_value.reg(), Immediate(0x80000000 | kSmiTagMask));
default_target->Branch(not_equal, not_taken);
__ cmp(smi_value.reg(), range << kSmiTagSize);
default_target->Branch(greater_equal, not_taken);
// The expected frame at all the case labels is a version of the
// current one (the bidirectional entry frame, which an arbitrary
// frame of the correct height can be merged to). Keep a copy to
// restore at the start of every label. Create a jump target and
// bind it to set its entry frame properly.
JumpTarget entry_target(JumpTarget::BIDIRECTIONAL);
entry_target.Bind(&smi_value);
VirtualFrame* start_frame = new VirtualFrame(frame_);
// 0 is placeholder.
// Jump to the address at table_address + 2 * smi_value.reg().
// The target of the jump is read from table_address + 4 * switch_value.
// The Smi encoding of smi_value.reg() is 2 * switch_value.
smi_value.ToRegister();
__ jmp(Operand(smi_value.reg(), smi_value.reg(),
times_1, 0x0, RelocInfo::INTERNAL_REFERENCE));
smi_value.Unuse();
// Calculate address to overwrite later with actual address of table.
int32_t jump_table_ref = masm_->pc_offset() - sizeof(int32_t);
__ Align(4);
Label table_start;
__ bind(&table_start);
__ WriteInternalReference(jump_table_ref, table_start);
for (int i = 0; i < range; i++) {
// These are the table entries. 0x0 is the placeholder for case address.
__ dd(0x0, RelocInfo::INTERNAL_REFERENCE);
}
GenerateFastCaseSwitchCases(node, case_labels, start_frame);
// If there was a default case, we need to emit the code to match it.
if (default_label != NULL) {
if (has_valid_frame()) {
node->break_target()->Jump();
}
setup_default.Bind();
frame_->MergeTo(start_frame);
__ jmp(default_label);
DeleteFrame();
}
if (node->break_target()->is_linked()) {
node->break_target()->Bind();
}
for (int i = 0, entry_pos = table_start.pos();
i < range;
i++, entry_pos += sizeof(uint32_t)) {
if (case_targets[i] == NULL) {
__ WriteInternalReference(entry_pos,
*node->break_target()->entry_label());
} else {
__ WriteInternalReference(entry_pos, *case_targets[i]);
}
}
}
void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) { void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
ASSERT(!in_spilled_code()); ASSERT(!in_spilled_code());
Comment cmnt(masm_, "[ SwitchStatement"); Comment cmnt(masm_, "[ SwitchStatement");
@ -2554,10 +2673,6 @@ void CodeGenerator::VisitSwitchStatement(SwitchStatement* node) {
// Compile the switch value. // Compile the switch value.
Load(node->tag()); Load(node->tag());
if (TryGenerateFastCaseSwitchStatement(node)) {
return;
}
ZoneList<CaseClause*>* cases = node->cases(); ZoneList<CaseClause*>* cases = node->cases();
int length = cases->length(); int length = cases->length();
CaseClause* default_clause = NULL; CaseClause* default_clause = NULL;
@ -3707,6 +3822,44 @@ void CodeGenerator::LoadFromSlot(Slot* slot, TypeofState typeof_state) {
} }
void CodeGenerator::LoadFromSlotCheckForArguments(Slot* slot,
TypeofState state) {
LoadFromSlot(slot, state);
// Bail out quickly if we're not using lazy arguments allocation.
if (ArgumentsMode() != LAZY_ARGUMENTS_ALLOCATION) return;
// ... or if the slot isn't a non-parameter arguments slot.
if (slot->type() == Slot::PARAMETER || !slot->is_arguments()) return;
// Pop the loaded value from the stack.
Result value = frame_->Pop();
// If the loaded value is a constant, we know if the arguments
// object has been lazily loaded yet.
if (value.is_constant()) {
if (value.handle()->IsTheHole()) {
Result arguments = StoreArgumentsObject(false);
frame_->Push(&arguments);
} else {
frame_->Push(&value);
}
return;
}
// The loaded value is in a register. If it is the sentinel that
// indicates that we haven't loaded the arguments object yet, we
// need to do it now.
JumpTarget exit;
__ cmp(Operand(value.reg()), Immediate(Factory::the_hole_value()));
frame_->Push(&value);
exit.Branch(not_equal);
Result arguments = StoreArgumentsObject(false);
frame_->SetElementAt(0, &arguments);
exit.Bind();
}
Result CodeGenerator::LoadFromGlobalSlotCheckExtensions( Result CodeGenerator::LoadFromGlobalSlotCheckExtensions(
Slot* slot, Slot* slot,
TypeofState typeof_state, TypeofState typeof_state,
@ -3879,7 +4032,7 @@ void CodeGenerator::StoreToSlot(Slot* slot, InitState init_state) {
void CodeGenerator::VisitSlot(Slot* node) { void CodeGenerator::VisitSlot(Slot* node) {
Comment cmnt(masm_, "[ Slot"); Comment cmnt(masm_, "[ Slot");
LoadFromSlot(node, typeof_state()); LoadFromSlotCheckForArguments(node, typeof_state());
} }
@ -4441,23 +4594,40 @@ void CodeGenerator::VisitCall(Call* node) {
// JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)' // JavaScript example: 'object.foo(1, 2, 3)' or 'map["key"](1, 2, 3)'
// ------------------------------------------------------------------ // ------------------------------------------------------------------
// Push the name of the function and the receiver onto the stack. Handle<String> name = Handle<String>::cast(literal->handle());
frame_->Push(literal->handle());
Load(property->obj());
// Load the arguments. if (ArgumentsMode() == LAZY_ARGUMENTS_ALLOCATION &&
int arg_count = args->length(); name->IsEqualTo(CStrVector("apply")) &&
for (int i = 0; i < arg_count; i++) { args->length() == 2 &&
Load(args->at(i)); args->at(1)->AsVariableProxy() != NULL &&
} args->at(1)->AsVariableProxy()->IsArguments()) {
// Use the optimized Function.prototype.apply that avoids
// allocating lazily allocated arguments objects.
CallApplyLazy(property,
args->at(0),
args->at(1)->AsVariableProxy(),
node->position());
// Call the IC initialization code. } else {
CodeForSourcePosition(node->position()); // Push the name of the function and the receiver onto the stack.
Result result = frame_->Push(name);
frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count, loop_nesting()); Load(property->obj());
frame_->RestoreContextRegister();
// Replace the function on the stack with the result. // Load the arguments.
frame_->SetElementAt(0, &result); int arg_count = args->length();
for (int i = 0; i < arg_count; i++) {
Load(args->at(i));
}
// Call the IC initialization code.
CodeForSourcePosition(node->position());
Result result =
frame_->CallCallIC(RelocInfo::CODE_TARGET, arg_count,
loop_nesting());
frame_->RestoreContextRegister();
// Replace the function on the stack with the result.
frame_->SetElementAt(0, &result);
}
} else { } else {
// ------------------------------------------- // -------------------------------------------
@ -5925,12 +6095,19 @@ void Reference::GetValue(TypeofState typeof_state) {
ASSERT(cgen_->HasValidEntryRegisters()); ASSERT(cgen_->HasValidEntryRegisters());
ASSERT(!is_illegal()); ASSERT(!is_illegal());
MacroAssembler* masm = cgen_->masm(); MacroAssembler* masm = cgen_->masm();
// Record the source position for the property load.
Property* property = expression_->AsProperty();
if (property != NULL) {
cgen_->CodeForSourcePosition(property->position());
}
switch (type_) { switch (type_) {
case SLOT: { case SLOT: {
Comment cmnt(masm, "[ Load from Slot"); Comment cmnt(masm, "[ Load from Slot");
Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot(); Slot* slot = expression_->AsVariableProxy()->AsVariable()->slot();
ASSERT(slot != NULL); ASSERT(slot != NULL);
cgen_->LoadFromSlot(slot, typeof_state); cgen_->LoadFromSlotCheckForArguments(slot, typeof_state);
break; break;
} }
@ -6016,6 +6193,7 @@ void Reference::GetValue(TypeofState typeof_state) {
Variable* var = expression_->AsVariableProxy()->AsVariable(); Variable* var = expression_->AsVariableProxy()->AsVariable();
bool is_global = var != NULL; bool is_global = var != NULL;
ASSERT(!is_global || var->is_global()); ASSERT(!is_global || var->is_global());
// Inline array load code if inside of a loop. We do not know // Inline array load code if inside of a loop. We do not know
// the receiver map yet, so we initially generate the code with // the receiver map yet, so we initially generate the code with
// a check against an invalid map. In the inline cache code, we // a check against an invalid map. In the inline cache code, we
@ -6143,13 +6321,16 @@ void Reference::TakeValue(TypeofState typeof_state) {
ASSERT(slot != NULL); ASSERT(slot != NULL);
if (slot->type() == Slot::LOOKUP || if (slot->type() == Slot::LOOKUP ||
slot->type() == Slot::CONTEXT || slot->type() == Slot::CONTEXT ||
slot->var()->mode() == Variable::CONST) { slot->var()->mode() == Variable::CONST ||
slot->is_arguments()) {
GetValue(typeof_state); GetValue(typeof_state);
return; return;
} }
// Only non-constant, frame-allocated parameters and locals can reach // Only non-constant, frame-allocated parameters and locals can
// here. // reach here. Be careful not to use the optimizations for arguments
// object access since it may not have been initialized yet.
ASSERT(!slot->is_arguments());
if (slot->type() == Slot::PARAMETER) { if (slot->type() == Slot::PARAMETER) {
cgen_->frame()->TakeParameterAt(slot->index()); cgen_->frame()->TakeParameterAt(slot->index());
} else { } else {
@ -6687,9 +6868,45 @@ void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
// result. // result.
__ bind(&call_runtime); __ bind(&call_runtime);
switch (op_) { switch (op_) {
case Token::ADD: case Token::ADD: {
// Test for string arguments before calling runtime.
Label not_strings, both_strings, not_string1, string1;
Result answer;
__ mov(eax, Operand(esp, 2 * kPointerSize)); // First argument.
__ mov(edx, Operand(esp, 1 * kPointerSize)); // Second argument.
__ test(eax, Immediate(kSmiTagMask));
__ j(zero, &not_string1);
__ CmpObjectType(eax, FIRST_NONSTRING_TYPE, eax);
__ j(above_equal, &not_string1);
// First argument is a a string, test second.
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &string1);
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
__ j(above_equal, &string1);
// First and second argument are strings.
__ TailCallRuntime(ExternalReference(Runtime::kStringAdd), 2);
// Only first argument is a string.
__ bind(&string1);
__ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION);
// First argument was not a string, test second.
__ bind(&not_string1);
__ test(edx, Immediate(kSmiTagMask));
__ j(zero, &not_strings);
__ CmpObjectType(edx, FIRST_NONSTRING_TYPE, edx);
__ j(above_equal, &not_strings);
// Only second argument is a string.
__ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION);
__ bind(&not_strings);
// Neither argument is a string.
__ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
break; break;
}
case Token::SUB: case Token::SUB:
__ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
break; break;
@ -7121,17 +7338,16 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ bind(&slow); __ bind(&slow);
} }
// Save the return address (and get it off the stack). // Push arguments below the return address.
__ pop(ecx); __ pop(ecx);
// Push arguments.
__ push(eax); __ push(eax);
__ push(edx); __ push(edx);
__ push(ecx); __ push(ecx);
// Inlined floating point compare. // Inlined floating point compare.
// Call builtin if operands are not floating point or smi. // Call builtin if operands are not floating point or smi.
FloatingPointHelper::CheckFloatOperands(masm, &call_builtin, ebx); Label check_for_symbols;
FloatingPointHelper::CheckFloatOperands(masm, &check_for_symbols, ebx);
FloatingPointHelper::LoadFloatOperands(masm, ecx); FloatingPointHelper::LoadFloatOperands(masm, ecx);
__ FCmp(); __ FCmp();
@ -7155,6 +7371,18 @@ void CompareStub::Generate(MacroAssembler* masm) {
__ mov(eax, 1); __ mov(eax, 1);
__ ret(2 * kPointerSize); // eax, edx were pushed __ ret(2 * kPointerSize); // eax, edx were pushed
// Fast negative check for symbol-to-symbol equality.
__ bind(&check_for_symbols);
if (cc_ == equal) {
BranchIfNonSymbol(masm, &call_builtin, eax, ecx);
BranchIfNonSymbol(masm, &call_builtin, edx, ecx);
// We've already checked for object identity, so if both operands
// are symbols they aren't equal. Register eax already holds a
// non-zero value, which indicates not equal, so just return.
__ ret(2 * kPointerSize);
}
__ bind(&call_builtin); __ bind(&call_builtin);
// must swap argument order // must swap argument order
__ pop(ecx); __ pop(ecx);
@ -7188,6 +7416,20 @@ void CompareStub::Generate(MacroAssembler* masm) {
} }
void CompareStub::BranchIfNonSymbol(MacroAssembler* masm,
Label* label,
Register object,
Register scratch) {
__ test(object, Immediate(kSmiTagMask));
__ j(zero, label);
__ mov(scratch, FieldOperand(object, HeapObject::kMapOffset));
__ movzx_b(scratch, FieldOperand(scratch, Map::kInstanceTypeOffset));
__ and_(scratch, kIsSymbolMask | kIsNotStringMask);
__ cmp(scratch, kSymbolTag | kStringTag);
__ j(not_equal, label);
}
void StackCheckStub::Generate(MacroAssembler* masm) { void StackCheckStub::Generate(MacroAssembler* masm) {
// Because builtins always remove the receiver from the stack, we // Because builtins always remove the receiver from the stack, we
// have to fake one to avoid underflowing the stack. The receiver // have to fake one to avoid underflowing the stack. The receiver
@ -7230,7 +7472,6 @@ void CallFunctionStub::Generate(MacroAssembler* masm) {
} }
void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) {
// eax holds the exception. // eax holds the exception.

78
deps/v8/src/ia32/codegen-ia32.h

@ -273,6 +273,14 @@ class CodeGenState BASE_EMBEDDED {
}; };
// -------------------------------------------------------------------------
// Arguments allocation mode
enum ArgumentsAllocationMode {
NO_ARGUMENTS_ALLOCATION,
EAGER_ARGUMENTS_ALLOCATION,
LAZY_ARGUMENTS_ALLOCATION
};
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
@ -332,12 +340,11 @@ class CodeGenerator: public AstVisitor {
// Accessors // Accessors
Scope* scope() const { return scope_; } Scope* scope() const { return scope_; }
bool is_eval() { return is_eval_; }
// Generating deferred code. // Generating deferred code.
void ProcessDeferred(); void ProcessDeferred();
bool is_eval() { return is_eval_; }
// State // State
TypeofState typeof_state() const { return state_->typeof_state(); } TypeofState typeof_state() const { return state_->typeof_state(); }
ControlDestination* destination() const { return state_->destination(); } ControlDestination* destination() const { return state_->destination(); }
@ -373,6 +380,12 @@ class CodeGenerator: public AstVisitor {
// target (which can not be done more than once). // target (which can not be done more than once).
void GenerateReturnSequence(Result* return_value); void GenerateReturnSequence(Result* return_value);
// Returns the arguments allocation mode.
ArgumentsAllocationMode ArgumentsMode() const;
// Store the arguments object and allocate it if necessary.
Result StoreArgumentsObject(bool initial);
// The following are used by class Reference. // The following are used by class Reference.
void LoadReference(Reference* ref); void LoadReference(Reference* ref);
void UnloadReference(Reference* ref); void UnloadReference(Reference* ref);
@ -408,6 +421,7 @@ class CodeGenerator: public AstVisitor {
// Read a value from a slot and leave it on top of the expression stack. // Read a value from a slot and leave it on top of the expression stack.
void LoadFromSlot(Slot* slot, TypeofState typeof_state); void LoadFromSlot(Slot* slot, TypeofState typeof_state);
void LoadFromSlotCheckForArguments(Slot* slot, TypeofState typeof_state);
Result LoadFromGlobalSlotCheckExtensions(Slot* slot, Result LoadFromGlobalSlotCheckExtensions(Slot* slot,
TypeofState typeof_state, TypeofState typeof_state,
JumpTarget* slow); JumpTarget* slow);
@ -470,6 +484,14 @@ class CodeGenerator: public AstVisitor {
void CallWithArguments(ZoneList<Expression*>* arguments, int position); void CallWithArguments(ZoneList<Expression*>* arguments, int position);
// Use an optimized version of Function.prototype.apply that avoid
// allocating the arguments object and just copies the arguments
// from the stack.
void CallApplyLazy(Property* apply,
Expression* receiver,
VariableProxy* arguments,
int position);
void CheckStack(); void CheckStack();
struct InlineRuntimeLUT { struct InlineRuntimeLUT {
@ -527,58 +549,6 @@ class CodeGenerator: public AstVisitor {
inline void GenerateMathSin(ZoneList<Expression*>* args); inline void GenerateMathSin(ZoneList<Expression*>* args);
inline void GenerateMathCos(ZoneList<Expression*>* args); inline void GenerateMathCos(ZoneList<Expression*>* args);
// Methods and constants for fast case switch statement support.
//
// Only allow fast-case switch if the range of labels is at most
// this factor times the number of case labels.
// Value is derived from comparing the size of code generated by the normal
// switch code for Smi-labels to the size of a single pointer. If code
// quality increases this number should be decreased to match.
static const int kFastSwitchMaxOverheadFactor = 5;
// Minimal number of switch cases required before we allow jump-table
// optimization.
static const int kFastSwitchMinCaseCount = 5;
// The limit of the range of a fast-case switch, as a factor of the number
// of cases of the switch. Each platform should return a value that
// is optimal compared to the default code generated for a switch statement
// on that platform.
int FastCaseSwitchMaxOverheadFactor();
// The minimal number of cases in a switch before the fast-case switch
// optimization is enabled. Each platform should return a value that
// is optimal compared to the default code generated for a switch statement
// on that platform.
int FastCaseSwitchMinCaseCount();
// Allocate a jump table and create code to jump through it.
// Should call GenerateFastCaseSwitchCases to generate the code for
// all the cases at the appropriate point.
void GenerateFastCaseSwitchJumpTable(SwitchStatement* node,
int min_index,
int range,
Label* fail_label,
Vector<Label*> case_targets,
Vector<Label> case_labels);
// Generate the code for cases for the fast case switch.
// Called by GenerateFastCaseSwitchJumpTable.
void GenerateFastCaseSwitchCases(SwitchStatement* node,
Vector<Label> case_labels,
VirtualFrame* start_frame);
// Fast support for constant-Smi switches.
void GenerateFastCaseSwitchStatement(SwitchStatement* node,
int min_index,
int range,
int default_index);
// Fast support for constant-Smi switches. Tests whether switch statement
// permits optimization and calls GenerateFastCaseSwitch if it does.
// Returns true if the fast-case switch was generated, and false if not.
bool TryGenerateFastCaseSwitchStatement(SwitchStatement* node);
// Methods used to indicate which source code is generated for. Source // Methods used to indicate which source code is generated for. Source
// positions are collected by the assembler and emitted with the relocation // positions are collected by the assembler and emitted with the relocation
// information. // information.

3
deps/v8/src/ia32/ic-ia32.cc

@ -141,6 +141,9 @@ static void GenerateCheckNonObjectOrLoaded(MacroAssembler* masm, Label* miss,
} }
const int LoadIC::kOffsetToLoadInstruction = 13;
void LoadIC::GenerateArrayLength(MacroAssembler* masm) { void LoadIC::GenerateArrayLength(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
// -- ecx : name // -- ecx : name

20
deps/v8/src/ia32/virtual-frame-ia32.cc

@ -189,7 +189,7 @@ void VirtualFrame::MakeMergable() {
backing_element = elements_[element.index()]; backing_element = elements_[element.index()];
} }
Result fresh = cgen()->allocator()->Allocate(); Result fresh = cgen()->allocator()->Allocate();
ASSERT(fresh.is_valid()); ASSERT(fresh.is_valid()); // A register was spilled if all were in use.
elements_[i] = elements_[i] =
FrameElement::RegisterElement(fresh.reg(), FrameElement::RegisterElement(fresh.reg(),
FrameElement::NOT_SYNCED); FrameElement::NOT_SYNCED);
@ -218,14 +218,12 @@ void VirtualFrame::MakeMergable() {
} }
} }
} }
// No need to set the copied flag---there are no copies of // No need to set the copied flag --- there are no copies.
// copies or constants so the original was not copied.
elements_[i].set_static_type(element.static_type());
} else { } else {
// Clear the copy flag of non-constant, non-copy elements above // Clear the copy flag of non-constant, non-copy elements.
// the high water mark. They cannot be copied because copes are // They cannot be copied because copies are not allowed.
// always higher than their backing store and copies are not // The copy flag is not relied on before the end of this loop,
// allowed above the water mark. // including when registers are spilled.
elements_[i].clear_copied(); elements_[i].clear_copied();
} }
} }
@ -998,7 +996,6 @@ Result VirtualFrame::Pop() {
if (element.is_memory()) { if (element.is_memory()) {
Result temp = cgen()->allocator()->Allocate(); Result temp = cgen()->allocator()->Allocate();
ASSERT(temp.is_valid()); ASSERT(temp.is_valid());
temp.set_static_type(element.static_type());
__ pop(temp.reg()); __ pop(temp.reg());
return temp; return temp;
} }
@ -1030,12 +1027,11 @@ Result VirtualFrame::Pop() {
FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED); FrameElement::RegisterElement(temp.reg(), FrameElement::SYNCED);
// Preserve the copy flag on the element. // Preserve the copy flag on the element.
if (element.is_copied()) new_element.set_copied(); if (element.is_copied()) new_element.set_copied();
new_element.set_static_type(element.static_type());
elements_[index] = new_element; elements_[index] = new_element;
__ mov(temp.reg(), Operand(ebp, fp_relative(index))); __ mov(temp.reg(), Operand(ebp, fp_relative(index)));
return Result(temp.reg(), element.static_type()); return Result(temp.reg());
} else if (element.is_register()) { } else if (element.is_register()) {
return Result(element.reg(), element.static_type()); return Result(element.reg());
} else { } else {
ASSERT(element.is_constant()); ASSERT(element.is_constant());
return Result(element.handle()); return Result(element.handle());

70
deps/v8/src/ia32/virtual-frame-ia32.h

@ -43,7 +43,7 @@ namespace internal {
// as random access to the expression stack elements, locals, and // as random access to the expression stack elements, locals, and
// parameters. // parameters.
class VirtualFrame : public ZoneObject { class VirtualFrame: public ZoneObject {
public: public:
// A utility class to introduce a scope where the virtual frame is // A utility class to introduce a scope where the virtual frame is
// expected to remain spilled. The constructor spills the code // expected to remain spilled. The constructor spills the code
@ -65,7 +65,7 @@ class VirtualFrame : public ZoneObject {
private: private:
bool previous_state_; bool previous_state_;
CodeGenerator* cgen() { return CodeGeneratorScope::Current(); } CodeGenerator* cgen() {return CodeGeneratorScope::Current();}
}; };
// An illegal index into the virtual frame. // An illegal index into the virtual frame.
@ -78,6 +78,7 @@ class VirtualFrame : public ZoneObject {
explicit VirtualFrame(VirtualFrame* original); explicit VirtualFrame(VirtualFrame* original);
CodeGenerator* cgen() { return CodeGeneratorScope::Current(); } CodeGenerator* cgen() { return CodeGeneratorScope::Current(); }
MacroAssembler* masm() { return cgen()->masm(); } MacroAssembler* masm() { return cgen()->masm(); }
// Create a duplicate of an existing valid frame element. // Create a duplicate of an existing valid frame element.
@ -87,9 +88,7 @@ class VirtualFrame : public ZoneObject {
int element_count() { return elements_.length(); } int element_count() { return elements_.length(); }
// The height of the virtual expression stack. // The height of the virtual expression stack.
int height() { int height() { return element_count() - expression_base_index(); }
return element_count() - expression_base_index();
}
int register_location(int num) { int register_location(int num) {
ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters); ASSERT(num >= 0 && num < RegisterAllocator::kNumRegisters);
@ -255,7 +254,9 @@ class VirtualFrame : public ZoneObject {
void PushReceiverSlotAddress(); void PushReceiverSlotAddress();
// Push the function on top of the frame. // Push the function on top of the frame.
void PushFunction() { PushFrameSlotAt(function_index()); } void PushFunction() {
PushFrameSlotAt(function_index());
}
// Save the value of the esi register to the context frame slot. // Save the value of the esi register to the context frame slot.
void SaveContextRegister(); void SaveContextRegister();
@ -290,7 +291,9 @@ class VirtualFrame : public ZoneObject {
} }
// The receiver frame slot. // The receiver frame slot.
Operand Receiver() { return ParameterAt(-1); } Operand Receiver() {
return ParameterAt(-1);
}
// Push a try-catch or try-finally handler on top of the virtual frame. // Push a try-catch or try-finally handler on top of the virtual frame.
void PushTryHandler(HandlerType type); void PushTryHandler(HandlerType type);
@ -320,9 +323,7 @@ class VirtualFrame : public ZoneObject {
// Invoke builtin given the number of arguments it expects on (and // Invoke builtin given the number of arguments it expects on (and
// removes from) the stack. // removes from) the stack.
Result InvokeBuiltin(Builtins::JavaScript id, Result InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag, int arg_count);
InvokeFlag flag,
int arg_count);
// Call load IC. Name and receiver are found on top of the frame. // Call load IC. Name and receiver are found on top of the frame.
// Receiver is not dropped. // Receiver is not dropped.
@ -357,10 +358,14 @@ class VirtualFrame : public ZoneObject {
void Drop(int count); void Drop(int count);
// Drop one element. // Drop one element.
void Drop() { Drop(1); } void Drop() {
Drop(1);
}
// Duplicate the top element of the frame. // Duplicate the top element of the frame.
void Dup() { PushFrameSlotAt(element_count() - 1); } void Dup() {
PushFrameSlotAt(element_count() - 1);
}
// Pop an element from the top of the expression stack. Returns a // Pop an element from the top of the expression stack. Returns a
// Result, which may be a constant or a register. // Result, which may be a constant or a register.
@ -378,15 +383,17 @@ class VirtualFrame : public ZoneObject {
void EmitPush(Immediate immediate); void EmitPush(Immediate immediate);
// Push an element on the virtual frame. // Push an element on the virtual frame.
void Push(Register reg, StaticType static_type = StaticType()); void Push(Register reg);
void Push(Handle<Object> value); void Push(Handle<Object> value);
void Push(Smi* value) { Push(Handle<Object>(value)); } void Push(Smi* value) {
Push(Handle<Object> (value));
}
// Pushing a result invalidates it (its contents become owned by the // Pushing a result invalidates it (its contents become owned by the
// frame). // frame).
void Push(Result* result) { void Push(Result* result) {
if (result->is_register()) { if (result->is_register()) {
Push(result->reg(), result->static_type()); Push(result->reg());
} else { } else {
ASSERT(result->is_constant()); ASSERT(result->is_constant());
Push(result->handle()); Push(result->handle());
@ -418,32 +425,48 @@ class VirtualFrame : public ZoneObject {
int register_locations_[RegisterAllocator::kNumRegisters]; int register_locations_[RegisterAllocator::kNumRegisters];
// The number of frame-allocated locals and parameters respectively. // The number of frame-allocated locals and parameters respectively.
int parameter_count() { return cgen()->scope()->num_parameters(); } int parameter_count() {
int local_count() { return cgen()->scope()->num_stack_slots(); } return cgen()->scope()->num_parameters();
}
int local_count() {
return cgen()->scope()->num_stack_slots();
}
// The index of the element that is at the processor's frame pointer // The index of the element that is at the processor's frame pointer
// (the ebp register). The parameters, receiver, and return address // (the ebp register). The parameters, receiver, and return address
// are below the frame pointer. // are below the frame pointer.
int frame_pointer() { return parameter_count() + 2; } int frame_pointer() {
return parameter_count() + 2;
}
// The index of the first parameter. The receiver lies below the first // The index of the first parameter. The receiver lies below the first
// parameter. // parameter.
int param0_index() { return 1; } int param0_index() {
return 1;
}
// The index of the context slot in the frame. It is immediately // The index of the context slot in the frame. It is immediately
// above the frame pointer. // above the frame pointer.
int context_index() { return frame_pointer() + 1; } int context_index() {
return frame_pointer() + 1;
}
// The index of the function slot in the frame. It is above the frame // The index of the function slot in the frame. It is above the frame
// pointer and the context slot. // pointer and the context slot.
int function_index() { return frame_pointer() + 2; } int function_index() {
return frame_pointer() + 2;
}
// The index of the first local. Between the frame pointer and the // The index of the first local. Between the frame pointer and the
// locals lie the context and the function. // locals lie the context and the function.
int local0_index() { return frame_pointer() + 3; } int local0_index() {
return frame_pointer() + 3;
}
// The index of the base of the expression stack. // The index of the base of the expression stack.
int expression_base_index() { return local0_index() + local_count(); } int expression_base_index() {
return local0_index() + local_count();
}
// Convert a frame index into a frame pointer relative offset into the // Convert a frame index into a frame pointer relative offset into the
// actual stack. // actual stack.
@ -547,7 +570,6 @@ class VirtualFrame : public ZoneObject {
friend class JumpTarget; friend class JumpTarget;
}; };
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_IA32_VIRTUAL_FRAME_IA32_H_ #endif // V8_IA32_VIRTUAL_FRAME_IA32_H_

22
deps/v8/src/ic.cc

@ -863,6 +863,25 @@ static bool StoreICableLookup(LookupResult* lookup) {
} }
static bool LookupForStoreIC(JSObject* object,
String* name,
LookupResult* lookup) {
object->LocalLookup(name, lookup);
if (!StoreICableLookup(lookup)) {
return false;
}
if (lookup->type() == INTERCEPTOR) {
if (object->GetNamedInterceptor()->setter()->IsUndefined()) {
object->LocalLookupRealNamedProperty(name, lookup);
return StoreICableLookup(lookup);
}
}
return true;
}
Object* StoreIC::Store(State state, Object* StoreIC::Store(State state,
Handle<Object> object, Handle<Object> object,
Handle<String> name, Handle<String> name,
@ -889,8 +908,7 @@ Object* StoreIC::Store(State state,
// Lookup the property locally in the receiver. // Lookup the property locally in the receiver.
if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) { if (FLAG_use_ic && !receiver->IsJSGlobalProxy()) {
LookupResult lookup; LookupResult lookup;
receiver->LocalLookup(*name, &lookup); if (LookupForStoreIC(*receiver, *name, &lookup)) {
if (StoreICableLookup(&lookup)) {
UpdateCaches(&lookup, state, receiver, name, value); UpdateCaches(&lookup, state, receiver, name, value);
} }
} }

2
deps/v8/src/ic.h

@ -221,7 +221,7 @@ class LoadIC: public IC {
// The offset from the inlined patch site to the start of the // The offset from the inlined patch site to the start of the
// inlined load instruction. It is 7 bytes (test eax, imm) plus // inlined load instruction. It is 7 bytes (test eax, imm) plus
// 6 bytes (jne slow_label). // 6 bytes (jne slow_label).
static const int kOffsetToLoadInstruction = 13; static const int kOffsetToLoadInstruction;
private: private:
static void Generate(MacroAssembler* masm, const ExternalReference& f); static void Generate(MacroAssembler* masm, const ExternalReference& f);

65
deps/v8/src/jsregexp.cc

@ -405,7 +405,6 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
// Prepare space for the return values. // Prepare space for the return values.
int number_of_capture_registers = int number_of_capture_registers =
(IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2; (IrregexpNumberOfCaptures(FixedArray::cast(jsregexp->data())) + 1) * 2;
OffsetsVector offsets(number_of_capture_registers);
#ifdef DEBUG #ifdef DEBUG
if (FLAG_trace_regexp_bytecodes) { if (FLAG_trace_regexp_bytecodes) {
@ -421,15 +420,19 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
last_match_info->EnsureSize(number_of_capture_registers + kLastMatchOverhead); last_match_info->EnsureSize(number_of_capture_registers + kLastMatchOverhead);
int* offsets_vector = offsets.vector();
bool rc; bool rc;
// We have to initialize this with something to make gcc happy but we can't
// initialize it with its real value until after the GC-causing things are
// over.
FixedArray* array = NULL;
// Dispatch to the correct RegExp implementation. // Dispatch to the correct RegExp implementation.
Handle<String> original_subject = subject; Handle<String> original_subject = subject;
Handle<FixedArray> regexp(FixedArray::cast(jsregexp->data())); Handle<FixedArray> regexp(FixedArray::cast(jsregexp->data()));
if (UseNativeRegexp()) { if (UseNativeRegexp()) {
#if V8_TARGET_ARCH_IA32 #if V8_TARGET_ARCH_IA32
OffsetsVector captures(number_of_capture_registers);
int* captures_vector = captures.vector();
RegExpMacroAssemblerIA32::Result res; RegExpMacroAssemblerIA32::Result res;
do { do {
bool is_ascii = subject->IsAsciiRepresentation(); bool is_ascii = subject->IsAsciiRepresentation();
@ -439,8 +442,8 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
Handle<Code> code(RegExpImpl::IrregexpNativeCode(*regexp, is_ascii)); Handle<Code> code(RegExpImpl::IrregexpNativeCode(*regexp, is_ascii));
res = RegExpMacroAssemblerIA32::Match(code, res = RegExpMacroAssemblerIA32::Match(code,
subject, subject,
offsets_vector, captures_vector,
offsets.length(), captures.length(),
previous_index); previous_index);
// If result is RETRY, the string have changed representation, and we // If result is RETRY, the string have changed representation, and we
// must restart from scratch. // must restart from scratch.
@ -453,7 +456,16 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
|| res == RegExpMacroAssemblerIA32::FAILURE); || res == RegExpMacroAssemblerIA32::FAILURE);
rc = (res == RegExpMacroAssemblerIA32::SUCCESS); rc = (res == RegExpMacroAssemblerIA32::SUCCESS);
#else if (!rc) return Factory::null_value();
array = last_match_info->elements();
ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
// The captures come in (start, end+1) pairs.
for (int i = 0; i < number_of_capture_registers; i += 2) {
SetCapture(array, i, captures_vector[i]);
SetCapture(array, i + 1, captures_vector[i + 1]);
}
#else // !V8_TARGET_ARCH_IA32
UNREACHABLE(); UNREACHABLE();
#endif #endif
} else { } else {
@ -461,33 +473,36 @@ Handle<Object> RegExpImpl::IrregexpExec(Handle<JSRegExp> jsregexp,
if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) { if (!EnsureCompiledIrregexp(jsregexp, is_ascii)) {
return Handle<Object>::null(); return Handle<Object>::null();
} }
// Now that we have done EnsureCompiledIrregexp we can get the number of
// registers.
int number_of_registers =
IrregexpNumberOfRegisters(FixedArray::cast(jsregexp->data()));
OffsetsVector registers(number_of_registers);
int* register_vector = registers.vector();
for (int i = number_of_capture_registers - 1; i >= 0; i--) { for (int i = number_of_capture_registers - 1; i >= 0; i--) {
offsets_vector[i] = -1; register_vector[i] = -1;
} }
Handle<ByteArray> byte_codes(IrregexpByteCode(*regexp, is_ascii)); Handle<ByteArray> byte_codes(IrregexpByteCode(*regexp, is_ascii));
rc = IrregexpInterpreter::Match(byte_codes, rc = IrregexpInterpreter::Match(byte_codes,
subject, subject,
offsets_vector, register_vector,
previous_index); previous_index);
if (!rc) return Factory::null_value();
array = last_match_info->elements();
ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
// The captures come in (start, end+1) pairs.
for (int i = 0; i < number_of_capture_registers; i += 2) {
SetCapture(array, i, register_vector[i]);
SetCapture(array, i + 1, register_vector[i + 1]);
}
} }
// Handle results from RegExp implementation.
if (!rc) {
return Factory::null_value();
}
FixedArray* array = last_match_info->elements();
ASSERT(array->length() >= number_of_capture_registers + kLastMatchOverhead);
// The captures come in (start, end+1) pairs.
SetLastCaptureCount(array, number_of_capture_registers); SetLastCaptureCount(array, number_of_capture_registers);
SetLastSubject(array, *original_subject); SetLastSubject(array, *original_subject);
SetLastInput(array, *original_subject); SetLastInput(array, *original_subject);
for (int i = 0; i < number_of_capture_registers; i+=2) {
SetCapture(array, i, offsets_vector[i]);
SetCapture(array, i + 1, offsets_vector[i + 1]);
}
return last_match_info; return last_match_info;
} }
@ -896,12 +911,13 @@ void Trace::PerformDeferredActions(RegExpMacroAssembler* assembler,
// The "+1" is to avoid a push_limit of zero if stack_limit_slack() is 1. // The "+1" is to avoid a push_limit of zero if stack_limit_slack() is 1.
const int push_limit = (assembler->stack_limit_slack() + 1) / 2; const int push_limit = (assembler->stack_limit_slack() + 1) / 2;
// Count pushes performed to force a stack limit check occasionally.
int pushes = 0;
for (int reg = 0; reg <= max_register; reg++) { for (int reg = 0; reg <= max_register; reg++) {
if (!affected_registers.Get(reg)) { if (!affected_registers.Get(reg)) {
continue; continue;
} }
// Count pushes performed to force a stack limit check occasionally.
int pushes = 0;
// The chronologically first deferred action in the trace // The chronologically first deferred action in the trace
// is used to infer the action needed to restore a register // is used to infer the action needed to restore a register
@ -1885,7 +1901,8 @@ void TextNode::GetQuickCheckDetails(QuickCheckDetails* details,
uint32_t differing_bits = (from ^ to); uint32_t differing_bits = (from ^ to);
// A mask and compare is only perfect if the differing bits form a // A mask and compare is only perfect if the differing bits form a
// number like 00011111 with one single block of trailing 1s. // number like 00011111 with one single block of trailing 1s.
if ((differing_bits & (differing_bits + 1)) == 0) { if ((differing_bits & (differing_bits + 1)) == 0 &&
from + differing_bits == to) {
pos->determines_perfectly = true; pos->determines_perfectly = true;
} }
uint32_t common_bits = ~SmearBitsRight(differing_bits); uint32_t common_bits = ~SmearBitsRight(differing_bits);

26
deps/v8/src/jump-target.cc

@ -81,17 +81,12 @@ void JumpTarget::ComputeEntryFrame() {
// frame. // frame.
for (int i = 0; i < length; i++) { for (int i = 0; i < length; i++) {
FrameElement element = initial_frame->elements_[i]; FrameElement element = initial_frame->elements_[i];
// We do not allow copies or constants in bidirectional frames. All // We do not allow copies or constants in bidirectional frames.
// elements above the water mark on bidirectional frames have
// unknown static types.
if (direction_ == BIDIRECTIONAL) { if (direction_ == BIDIRECTIONAL) {
if (element.is_constant() || element.is_copy()) { if (element.is_constant() || element.is_copy()) {
elements.Add(NULL); elements.Add(NULL);
continue; continue;
} }
// It's safe to change the static type on the initial frame
// element, see comment in JumpTarget::Combine.
initial_frame->elements_[i].set_static_type(StaticType::unknown());
} }
elements.Add(&initial_frame->elements_[i]); elements.Add(&initial_frame->elements_[i]);
} }
@ -142,18 +137,12 @@ void JumpTarget::ComputeEntryFrame() {
for (int i = length - 1; i >= 0; i--) { for (int i = length - 1; i >= 0; i--) {
if (elements[i] == NULL) { if (elements[i] == NULL) {
// Loop over all the reaching frames to check whether the element // Loop over all the reaching frames to check whether the element
// is synced on all frames, to count the registers it occupies, // is synced on all frames and to count the registers it occupies.
// and to compute a merged static type.
bool is_synced = true; bool is_synced = true;
RegisterFile candidate_registers; RegisterFile candidate_registers;
int best_count = kMinInt; int best_count = kMinInt;
int best_reg_num = RegisterAllocator::kInvalidRegister; int best_reg_num = RegisterAllocator::kInvalidRegister;
StaticType type; // Initially invalid.
if (direction_ != BIDIRECTIONAL) {
type = reaching_frames_[0]->elements_[i].static_type();
}
for (int j = 0; j < reaching_frames_.length(); j++) { for (int j = 0; j < reaching_frames_.length(); j++) {
FrameElement element = reaching_frames_[j]->elements_[i]; FrameElement element = reaching_frames_[j]->elements_[i];
is_synced = is_synced && element.is_synced(); is_synced = is_synced && element.is_synced();
@ -167,7 +156,6 @@ void JumpTarget::ComputeEntryFrame() {
best_reg_num = num; best_reg_num = num;
} }
} }
type = type.merge(element.static_type());
} }
// If the value is synced on all frames, put it in memory. This // If the value is synced on all frames, put it in memory. This
@ -175,7 +163,6 @@ void JumpTarget::ComputeEntryFrame() {
// memory-to-register move when the value is needed later. // memory-to-register move when the value is needed later.
if (is_synced) { if (is_synced) {
// Already recorded as a memory element. // Already recorded as a memory element.
entry_frame_->elements_[i].set_static_type(type);
continue; continue;
} }
@ -190,20 +177,15 @@ void JumpTarget::ComputeEntryFrame() {
} }
} }
if (best_reg_num == RegisterAllocator::kInvalidRegister) { if (best_reg_num != RegisterAllocator::kInvalidRegister) {
// If there was no register found, the element is already
// recorded as in memory.
entry_frame_->elements_[i].set_static_type(type);
} else {
// If there was a register choice, use it. Preserve the copied // If there was a register choice, use it. Preserve the copied
// flag on the element. Set the static type as computed. // flag on the element.
bool is_copied = entry_frame_->elements_[i].is_copied(); bool is_copied = entry_frame_->elements_[i].is_copied();
Register reg = RegisterAllocator::ToRegister(best_reg_num); Register reg = RegisterAllocator::ToRegister(best_reg_num);
entry_frame_->elements_[i] = entry_frame_->elements_[i] =
FrameElement::RegisterElement(reg, FrameElement::RegisterElement(reg,
FrameElement::NOT_SYNCED); FrameElement::NOT_SYNCED);
if (is_copied) entry_frame_->elements_[i].set_copied(); if (is_copied) entry_frame_->elements_[i].set_copied();
entry_frame_->elements_[i].set_static_type(type);
entry_frame_->set_register_location(reg, i); entry_frame_->set_register_location(reg, i);
} }
} }

126
deps/v8/src/log-inl.h

@ -0,0 +1,126 @@
// Copyright 2006-2009 the V8 project authors. All rights reserved.
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following
// disclaimer in the documentation and/or other materials provided
// with the distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived
// from this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#ifndef V8_LOG_INL_H_
#define V8_LOG_INL_H_
#include "log.h"
namespace v8 {
namespace internal {
//
// VMState class implementation. A simple stack of VM states held by the
// logger and partially threaded through the call stack. States are pushed by
// VMState construction and popped by destruction.
//
#ifdef ENABLE_LOGGING_AND_PROFILING
inline const char* StateToString(StateTag state) {
switch (state) {
case JS:
return "JS";
case GC:
return "GC";
case COMPILER:
return "COMPILER";
case OTHER:
return "OTHER";
default:
UNREACHABLE();
return NULL;
}
}
VMState::VMState(StateTag state) : disabled_(true) {
if (!Logger::is_logging()) {
return;
}
disabled_ = false;
#if !defined(ENABLE_HEAP_PROTECTION)
// When not protecting the heap, there is no difference between
// EXTERNAL and OTHER. As an optimization in that case, we will not
// perform EXTERNAL->OTHER transitions through the API. We thus
// compress the two states into one.
if (state == EXTERNAL) state = OTHER;
#endif
state_ = state;
previous_ = Logger::current_state_;
Logger::current_state_ = this;
if (FLAG_log_state_changes) {
LOG(UncheckedStringEvent("Entering", StateToString(state_)));
if (previous_ != NULL) {
LOG(UncheckedStringEvent("From", StateToString(previous_->state_)));
}
}
#ifdef ENABLE_HEAP_PROTECTION
if (FLAG_protect_heap && previous_ != NULL) {
if (state_ == EXTERNAL) {
// We are leaving V8.
ASSERT(previous_->state_ != EXTERNAL);
Heap::Protect();
} else if (previous_->state_ == EXTERNAL) {
// We are entering V8.
Heap::Unprotect();
}
}
#endif
}
VMState::~VMState() {
if (disabled_) return;
Logger::current_state_ = previous_;
if (FLAG_log_state_changes) {
LOG(UncheckedStringEvent("Leaving", StateToString(state_)));
if (previous_ != NULL) {
LOG(UncheckedStringEvent("To", StateToString(previous_->state_)));
}
}
#ifdef ENABLE_HEAP_PROTECTION
if (FLAG_protect_heap && previous_ != NULL) {
if (state_ == EXTERNAL) {
// We are reentering V8.
ASSERT(previous_->state_ != EXTERNAL);
Heap::Unprotect();
} else if (previous_->state_ == EXTERNAL) {
// We are leaving V8.
Heap::Protect();
}
}
#endif
}
#endif
} } // namespace v8::internal
#endif // V8_LOG_INL_H_

18
deps/v8/src/log-utils.cc

@ -261,14 +261,20 @@ void LogMessageBuilder::AppendAddress(Address addr) {
void LogMessageBuilder::AppendAddress(Address addr, Address bias) { void LogMessageBuilder::AppendAddress(Address addr, Address bias) {
if (!FLAG_compress_log || bias == NULL) { if (!FLAG_compress_log) {
Append("0x%" V8PRIxPTR, addr); Append("0x%" V8PRIxPTR, addr);
} else if (bias == NULL) {
Append("%" V8PRIxPTR, addr);
} else { } else {
intptr_t delta = addr - bias; uintptr_t delta;
// To avoid printing negative offsets in an unsigned form, char sign;
// we are printing an absolute value with a sign. if (addr >= bias) {
const char sign = delta >= 0 ? '+' : '-'; delta = addr - bias;
if (sign == '-') { delta = -delta; } sign = '+';
} else {
delta = bias - addr;
sign = '-';
}
Append("%c%" V8PRIxPTR, sign, delta); Append("%c%" V8PRIxPTR, sign, delta);
} }
} }

231
deps/v8/src/log.cc

@ -31,9 +31,7 @@
#include "bootstrapper.h" #include "bootstrapper.h"
#include "log.h" #include "log.h"
#include "log-utils.h"
#include "macro-assembler.h" #include "macro-assembler.h"
#include "platform.h"
#include "serialize.h" #include "serialize.h"
#include "string-stream.h" #include "string-stream.h"
@ -304,6 +302,7 @@ VMState Logger::bottom_state_(EXTERNAL);
SlidingStateWindow* Logger::sliding_state_window_ = NULL; SlidingStateWindow* Logger::sliding_state_window_ = NULL;
const char** Logger::log_events_ = NULL; const char** Logger::log_events_ = NULL;
CompressionHelper* Logger::compression_helper_ = NULL; CompressionHelper* Logger::compression_helper_ = NULL;
bool Logger::is_logging_ = false;
#define DECLARE_LONG_EVENT(ignore1, long_name, ignore2) long_name, #define DECLARE_LONG_EVENT(ignore1, long_name, ignore2) long_name,
const char* kLongLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = { const char* kLongLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
@ -318,11 +317,6 @@ const char* kCompressedLogEventsNames[Logger::NUMBER_OF_LOG_EVENTS] = {
#undef DECLARE_SHORT_EVENT #undef DECLARE_SHORT_EVENT
bool Logger::IsEnabled() {
return Log::IsEnabled();
}
void Logger::ProfilerBeginEvent() { void Logger::ProfilerBeginEvent() {
if (!Log::IsEnabled()) return; if (!Log::IsEnabled()) return;
LogMessageBuilder msg; LogMessageBuilder msg;
@ -426,26 +420,30 @@ void Logger::ApiNamedSecurityCheck(Object* key) {
void Logger::SharedLibraryEvent(const char* library_path, void Logger::SharedLibraryEvent(const char* library_path,
unsigned start, uintptr_t start,
unsigned end) { uintptr_t end) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_prof) return; if (!Log::IsEnabled() || !FLAG_prof) return;
LogMessageBuilder msg; LogMessageBuilder msg;
msg.Append("shared-library,\"%s\",0x%08x,0x%08x\n", library_path, msg.Append("shared-library,\"%s\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
start, end); library_path,
start,
end);
msg.WriteToLogFile(); msg.WriteToLogFile();
#endif #endif
} }
void Logger::SharedLibraryEvent(const wchar_t* library_path, void Logger::SharedLibraryEvent(const wchar_t* library_path,
unsigned start, uintptr_t start,
unsigned end) { uintptr_t end) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
if (!Log::IsEnabled() || !FLAG_prof) return; if (!Log::IsEnabled() || !FLAG_prof) return;
LogMessageBuilder msg; LogMessageBuilder msg;
msg.Append("shared-library,\"%ls\",0x%08x,0x%08x\n", library_path, msg.Append("shared-library,\"%ls\",0x%08" V8PRIxPTR ",0x%08" V8PRIxPTR "\n",
start, end); library_path,
start,
end);
msg.WriteToLogFile(); msg.WriteToLogFile();
#endif #endif
} }
@ -623,6 +621,42 @@ void Logger::DeleteEvent(const char* name, void* object) {
} }
#ifdef ENABLE_LOGGING_AND_PROFILING
// A class that contains all common code dealing with record compression.
class CompressionHelper {
public:
explicit CompressionHelper(int window_size)
: compressor_(window_size), repeat_count_(0) { }
// Handles storing message in compressor, retrieving the previous one and
// prefixing it with repeat count, if needed.
// Returns true if message needs to be written to log.
bool HandleMessage(LogMessageBuilder* msg) {
if (!msg->StoreInCompressor(&compressor_)) {
// Current message repeats the previous one, don't write it.
++repeat_count_;
return false;
}
if (repeat_count_ == 0) {
return msg->RetrieveCompressedPrevious(&compressor_);
}
OS::SNPrintF(prefix_, "%s,%d,",
Logger::log_events_[Logger::REPEAT_META_EVENT],
repeat_count_ + 1);
repeat_count_ = 0;
return msg->RetrieveCompressedPrevious(&compressor_, prefix_.start());
}
private:
LogRecordCompressor compressor_;
int repeat_count_;
EmbeddedVector<char, 20> prefix_;
};
#endif // ENABLE_LOGGING_AND_PROFILING
void Logger::CodeCreateEvent(LogEventsAndTags tag, void Logger::CodeCreateEvent(LogEventsAndTags tag,
Code* code, Code* code,
const char* comment) { const char* comment) {
@ -639,6 +673,10 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
msg.Append(*p); msg.Append(*p);
} }
msg.Append('"'); msg.Append('"');
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n'); msg.Append('\n');
msg.WriteToLogFile(); msg.WriteToLogFile();
#endif #endif
@ -653,7 +691,12 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, String* name) {
name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); name->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]); msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
msg.AppendAddress(code->address()); msg.AppendAddress(code->address());
msg.Append(",%d,\"%s\"\n", code->ExecutableSize(), *str); msg.Append(",%d,\"%s\"", code->ExecutableSize(), *str);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n');
msg.WriteToLogFile(); msg.WriteToLogFile();
#endif #endif
} }
@ -671,8 +714,13 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag,
source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL); source->ToCString(DISALLOW_NULLS, ROBUST_STRING_TRAVERSAL);
msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]); msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
msg.AppendAddress(code->address()); msg.AppendAddress(code->address());
msg.Append(",%d,\"%s %s:%d\"\n", msg.Append(",%d,\"%s %s:%d\"",
code->ExecutableSize(), *str, *sourcestr, line); code->ExecutableSize(), *str, *sourcestr, line);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n');
msg.WriteToLogFile(); msg.WriteToLogFile();
#endif #endif
} }
@ -684,7 +732,12 @@ void Logger::CodeCreateEvent(LogEventsAndTags tag, Code* code, int args_count) {
LogMessageBuilder msg; LogMessageBuilder msg;
msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]); msg.Append("%s,%s,", log_events_[CODE_CREATION_EVENT], log_events_[tag]);
msg.AppendAddress(code->address()); msg.AppendAddress(code->address());
msg.Append(",%d,\"args_count: %d\"\n", code->ExecutableSize(), args_count); msg.Append(",%d,\"args_count: %d\"", code->ExecutableSize(), args_count);
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n');
msg.WriteToLogFile(); msg.WriteToLogFile();
#endif #endif
} }
@ -699,48 +752,17 @@ void Logger::RegExpCodeCreateEvent(Code* code, String* source) {
msg.AppendAddress(code->address()); msg.AppendAddress(code->address());
msg.Append(",%d,\"", code->ExecutableSize()); msg.Append(",%d,\"", code->ExecutableSize());
msg.AppendDetailed(source, false); msg.AppendDetailed(source, false);
msg.Append("\"\n"); msg.Append('\"');
if (FLAG_compress_log) {
ASSERT(compression_helper_ != NULL);
if (!compression_helper_->HandleMessage(&msg)) return;
}
msg.Append('\n');
msg.WriteToLogFile(); msg.WriteToLogFile();
#endif #endif
} }
#ifdef ENABLE_LOGGING_AND_PROFILING
// A class that contains all common code dealing with record compression.
class CompressionHelper {
public:
explicit CompressionHelper(int window_size)
: compressor_(window_size), repeat_count_(0) { }
// Handles storing message in compressor, retrieving the previous one and
// prefixing it with repeat count, if needed.
// Returns true if message needs to be written to log.
bool HandleMessage(LogMessageBuilder* msg) {
if (!msg->StoreInCompressor(&compressor_)) {
// Current message repeats the previous one, don't write it.
++repeat_count_;
return false;
}
if (repeat_count_ == 0) {
return msg->RetrieveCompressedPrevious(&compressor_);
}
OS::SNPrintF(prefix_, "%s,%d,",
Logger::log_events_[Logger::REPEAT_META_EVENT],
repeat_count_ + 1);
repeat_count_ = 0;
return msg->RetrieveCompressedPrevious(&compressor_, prefix_.start());
}
private:
LogRecordCompressor compressor_;
int repeat_count_;
EmbeddedVector<char, 20> prefix_;
};
#endif // ENABLE_LOGGING_AND_PROFILING
void Logger::CodeMoveEvent(Address from, Address to) { void Logger::CodeMoveEvent(Address from, Address to) {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
static Address prev_to_ = NULL; static Address prev_to_ = NULL;
@ -918,6 +940,7 @@ void Logger::PauseProfiler() {
// Must be the same message as Log::kDynamicBufferSeal. // Must be the same message as Log::kDynamicBufferSeal.
LOG(UncheckedStringEvent("profiler", "pause")); LOG(UncheckedStringEvent("profiler", "pause"));
} }
is_logging_ = false;
} }
@ -925,6 +948,7 @@ void Logger::ResumeProfiler() {
if (!profiler_->paused() || !Log::IsEnabled()) { if (!profiler_->paused() || !Log::IsEnabled()) {
return; return;
} }
is_logging_ = true;
if (FLAG_prof_lazy) { if (FLAG_prof_lazy) {
LOG(UncheckedStringEvent("profiler", "resume")); LOG(UncheckedStringEvent("profiler", "resume"));
FLAG_log_code = true; FLAG_log_code = true;
@ -998,10 +1022,9 @@ void Logger::LogCompiledFunctions() {
Handle<String> script_name(String::cast(script->name())); Handle<String> script_name(String::cast(script->name()));
int line_num = GetScriptLineNumber(script, shared->start_position()); int line_num = GetScriptLineNumber(script, shared->start_position());
if (line_num > 0) { if (line_num > 0) {
line_num += script->line_offset()->value() + 1;
LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG, LOG(CodeCreateEvent(Logger::LAZY_COMPILE_TAG,
shared->code(), *func_name, shared->code(), *func_name,
*script_name, line_num)); *script_name, line_num + 1));
} else { } else {
// Can't distinguish enum and script here, so always use Script. // Can't distinguish enum and script here, so always use Script.
LOG(CodeCreateEvent(Logger::SCRIPT_TAG, LOG(CodeCreateEvent(Logger::SCRIPT_TAG,
@ -1042,9 +1065,11 @@ bool Logger::Setup() {
FLAG_prof_auto = false; FLAG_prof_auto = false;
} }
bool open_log_file = FLAG_log || FLAG_log_runtime || FLAG_log_api bool start_logging = FLAG_log || FLAG_log_runtime || FLAG_log_api
|| FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect || FLAG_log_code || FLAG_log_gc || FLAG_log_handles || FLAG_log_suspect
|| FLAG_log_regexp || FLAG_log_state_changes || FLAG_prof_lazy; || FLAG_log_regexp || FLAG_log_state_changes;
bool open_log_file = start_logging || FLAG_prof_lazy;
// If we're logging anything, we need to open the log file. // If we're logging anything, we need to open the log file.
if (open_log_file) { if (open_log_file) {
@ -1107,10 +1132,15 @@ bool Logger::Setup() {
compression_helper_ = new CompressionHelper(kCompressionWindowSize); compression_helper_ = new CompressionHelper(kCompressionWindowSize);
} }
is_logging_ = start_logging;
if (FLAG_prof) { if (FLAG_prof) {
profiler_ = new Profiler(); profiler_ = new Profiler();
if (!FLAG_prof_auto) if (!FLAG_prof_auto) {
profiler_->pause(); profiler_->pause();
} else {
is_logging_ = true;
}
profiler_->Engage(); profiler_->Engage();
} }
@ -1168,85 +1198,4 @@ void Logger::EnableSlidingStateWindow() {
} }
//
// VMState class implementation. A simple stack of VM states held by the
// logger and partially threaded through the call stack. States are pushed by
// VMState construction and popped by destruction.
//
#ifdef ENABLE_LOGGING_AND_PROFILING
static const char* StateToString(StateTag state) {
switch (state) {
case JS:
return "JS";
case GC:
return "GC";
case COMPILER:
return "COMPILER";
case OTHER:
return "OTHER";
default:
UNREACHABLE();
return NULL;
}
}
VMState::VMState(StateTag state) {
#if !defined(ENABLE_HEAP_PROTECTION)
// When not protecting the heap, there is no difference between
// EXTERNAL and OTHER. As an optimization in that case, we will not
// perform EXTERNAL->OTHER transitions through the API. We thus
// compress the two states into one.
if (state == EXTERNAL) state = OTHER;
#endif
state_ = state;
previous_ = Logger::current_state_;
Logger::current_state_ = this;
if (FLAG_log_state_changes) {
LOG(UncheckedStringEvent("Entering", StateToString(state_)));
if (previous_ != NULL) {
LOG(UncheckedStringEvent("From", StateToString(previous_->state_)));
}
}
#ifdef ENABLE_HEAP_PROTECTION
if (FLAG_protect_heap && previous_ != NULL) {
if (state_ == EXTERNAL) {
// We are leaving V8.
ASSERT(previous_->state_ != EXTERNAL);
Heap::Protect();
} else if (previous_->state_ == EXTERNAL) {
// We are entering V8.
Heap::Unprotect();
}
}
#endif
}
VMState::~VMState() {
Logger::current_state_ = previous_;
if (FLAG_log_state_changes) {
LOG(UncheckedStringEvent("Leaving", StateToString(state_)));
if (previous_ != NULL) {
LOG(UncheckedStringEvent("To", StateToString(previous_->state_)));
}
}
#ifdef ENABLE_HEAP_PROTECTION
if (FLAG_protect_heap && previous_ != NULL) {
if (state_ == EXTERNAL) {
// We are reentering V8.
ASSERT(previous_->state_ != EXTERNAL);
Heap::Unprotect();
} else if (previous_->state_ == EXTERNAL) {
// We are leaving V8.
Heap::Protect();
}
}
#endif
}
#endif
} } // namespace v8::internal } } // namespace v8::internal

26
deps/v8/src/log.h

@ -28,6 +28,9 @@
#ifndef V8_LOG_H_ #ifndef V8_LOG_H_
#define V8_LOG_H_ #define V8_LOG_H_
#include "platform.h"
#include "log-utils.h"
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -77,7 +80,7 @@ class CompressionHelper;
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
#define LOG(Call) \ #define LOG(Call) \
do { \ do { \
if (v8::internal::Logger::IsEnabled()) \ if (v8::internal::Logger::is_logging()) \
v8::internal::Logger::Call; \ v8::internal::Logger::Call; \
} while (false) } while (false)
#else #else
@ -88,12 +91,13 @@ class CompressionHelper;
class VMState BASE_EMBEDDED { class VMState BASE_EMBEDDED {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
public: public:
explicit VMState(StateTag state); inline explicit VMState(StateTag state);
~VMState(); inline ~VMState();
StateTag state() { return state_; } StateTag state() { return state_; }
private: private:
bool disabled_;
StateTag state_; StateTag state_;
VMState* previous_; VMState* previous_;
#else #else
@ -217,11 +221,11 @@ class Logger {
static void HeapSampleItemEvent(const char* type, int number, int bytes); static void HeapSampleItemEvent(const char* type, int number, int bytes);
static void SharedLibraryEvent(const char* library_path, static void SharedLibraryEvent(const char* library_path,
unsigned start, uintptr_t start,
unsigned end); uintptr_t end);
static void SharedLibraryEvent(const wchar_t* library_path, static void SharedLibraryEvent(const wchar_t* library_path,
unsigned start, uintptr_t start,
unsigned end); uintptr_t end);
// ==== Events logged by --log-regexp ==== // ==== Events logged by --log-regexp ====
// Regexp compilation and execution events. // Regexp compilation and execution events.
@ -236,7 +240,9 @@ class Logger {
return current_state_ ? current_state_->state() : OTHER; return current_state_ ? current_state_->state() : OTHER;
} }
static bool IsEnabled(); static bool is_logging() {
return is_logging_;
}
// Pause/Resume collection of profiling data. // Pause/Resume collection of profiling data.
// When data collection is paused, Tick events are discarded until // When data collection is paused, Tick events are discarded until
@ -317,8 +323,10 @@ class Logger {
friend class VMState; friend class VMState;
friend class LoggerTestHelper; friend class LoggerTestHelper;
static bool is_logging_;
#else #else
static bool is_enabled() { return false; } static bool is_logging() { return false; }
#endif #endif
}; };

17
deps/v8/src/mark-compact.cc

@ -947,13 +947,18 @@ void EncodeFreeRegion(Address free_start, int free_size) {
// Try to promote all objects in new space. Heap numbers and sequential // Try to promote all objects in new space. Heap numbers and sequential
// strings are promoted to the code space, all others to the old space. // strings are promoted to the code space, large objects to large object space,
// and all others to the old space.
inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) { inline Object* MCAllocateFromNewSpace(HeapObject* object, int object_size) {
OldSpace* target_space = Heap::TargetSpace(object); Object* forwarded;
ASSERT(target_space == Heap::old_pointer_space() || if (object_size > Heap::MaxObjectSizeInPagedSpace()) {
target_space == Heap::old_data_space()); forwarded = Failure::Exception();
Object* forwarded = target_space->MCAllocateRaw(object_size); } else {
OldSpace* target_space = Heap::TargetSpace(object);
ASSERT(target_space == Heap::old_pointer_space() ||
target_space == Heap::old_data_space());
forwarded = target_space->MCAllocateRaw(object_size);
}
if (forwarded->IsFailure()) { if (forwarded->IsFailure()) {
forwarded = Heap::new_space()->MCAllocateRaw(object_size); forwarded = Heap::new_space()->MCAllocateRaw(object_size);
} }

28
deps/v8/src/messages.js

@ -37,13 +37,13 @@ function GetInstanceName(cons) {
if (cons.length == 0) { if (cons.length == 0) {
return ""; return "";
} }
var first = cons.charAt(0).toLowerCase(); var first = %StringToLowerCase(StringCharAt.call(cons, 0));
var mapping = kVowelSounds; var mapping = kVowelSounds;
if (cons.length > 1 && (cons.charAt(0) != first)) { if (cons.length > 1 && (StringCharAt.call(cons, 0) != first)) {
// First char is upper case // First char is upper case
var second = cons.charAt(1).toLowerCase(); var second = %StringToLowerCase(StringCharAt.call(cons, 1));
// Second char is upper case // Second char is upper case
if (cons.charAt(1) != second) if (StringCharAt.call(cons, 1) != second)
mapping = kCapitalVowelSounds; mapping = kCapitalVowelSounds;
} }
var s = mapping[first] ? "an " : "a "; var s = mapping[first] ? "an " : "a ";
@ -126,7 +126,7 @@ function FormatString(format, args) {
var str; var str;
try { str = ToDetailString(args[i]); } try { str = ToDetailString(args[i]); }
catch (e) { str = "#<error>"; } catch (e) { str = "#<error>"; }
result = result.split("%" + i).join(str); result = ArrayJoin.call(StringSplit.call(result, "%" + i), str);
} }
return result; return result;
} }
@ -146,17 +146,9 @@ function ToDetailString(obj) {
function MakeGenericError(constructor, type, args) { function MakeGenericError(constructor, type, args) {
if (args instanceof $Array) { if (IS_UNDEFINED(args)) {
for (var i = 0; i < args.length; i++) {
var elem = args[i];
if (elem instanceof $Array && elem.length > 100) { // arbitrary limit, grab a reasonable slice to report
args[i] = elem.slice(0,20).concat("...");
}
}
} else if (IS_UNDEFINED(args)) {
args = []; args = [];
} }
var e = new constructor(kAddMessageAccessorsMarker); var e = new constructor(kAddMessageAccessorsMarker);
e.type = type; e.type = type;
e.arguments = args; e.arguments = args;
@ -281,7 +273,7 @@ Script.prototype.locationFromPosition = function (position,
// Determine start, end and column. // Determine start, end and column.
var start = line == 0 ? 0 : this.line_ends[line - 1] + 1; var start = line == 0 ? 0 : this.line_ends[line - 1] + 1;
var end = this.line_ends[line]; var end = this.line_ends[line];
if (end > 0 && this.source.charAt(end - 1) == '\r') end--; if (end > 0 && StringCharAt.call(this.source, end - 1) == '\r') end--;
var column = position - start; var column = position - start;
// Adjust according to the offset within the resource. // Adjust according to the offset within the resource.
@ -394,7 +386,7 @@ Script.prototype.sourceLine = function (opt_line) {
// Return the source line. // Return the source line.
var start = line == 0 ? 0 : this.line_ends[line - 1] + 1; var start = line == 0 ? 0 : this.line_ends[line - 1] + 1;
var end = this.line_ends[line]; var end = this.line_ends[line];
return this.source.substring(start, end); return StringSubstring.call(this.source, start, end);
} }
@ -498,7 +490,7 @@ SourceLocation.prototype.restrict = function (opt_limit, opt_before) {
* Source text for this location. * Source text for this location.
*/ */
SourceLocation.prototype.sourceText = function () { SourceLocation.prototype.sourceText = function () {
return this.script.source.substring(this.start, this.end); return StringSubstring.call(this.script.source, this.start, this.end);
}; };
@ -535,7 +527,7 @@ function SourceSlice(script, from_line, to_line, from_position, to_position) {
* the line terminating characters (if any) * the line terminating characters (if any)
*/ */
SourceSlice.prototype.sourceText = function () { SourceSlice.prototype.sourceText = function () {
return this.script.source.substring(this.from_position, this.to_position); return StringSubstring.call(this.script.source, this.from_position, this.to_position);
}; };

26
deps/v8/src/mirror-delay.js

@ -1895,8 +1895,8 @@ JSONProtocolSerializer.prototype.includeSource_ = function() {
} }
JSONProtocolSerializer.prototype.compactFormat_ = function() { JSONProtocolSerializer.prototype.inlineRefs_ = function() {
return this.options_ && this.options_.compactFormat; return this.options_ && this.options_.inlineRefs;
} }
@ -1960,7 +1960,7 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
// the mirror to the referenced mirrors. // the mirror to the referenced mirrors.
if (reference && if (reference &&
(mirror.isValue() || mirror.isScript() || mirror.isContext())) { (mirror.isValue() || mirror.isScript() || mirror.isContext())) {
if (this.compactFormat_() && mirror.isValue()) { if (this.inlineRefs_() && mirror.isValue()) {
return this.serializeReferenceWithDisplayData_(mirror); return this.serializeReferenceWithDisplayData_(mirror);
} else { } else {
this.add_(mirror); this.add_(mirror);
@ -2051,7 +2051,10 @@ JSONProtocolSerializer.prototype.serialize_ = function(mirror, reference,
content.sourceLength = mirror.source().length; content.sourceLength = mirror.source().length;
content.scriptType = mirror.scriptType(); content.scriptType = mirror.scriptType();
content.compilationType = mirror.compilationType(); content.compilationType = mirror.compilationType();
if (mirror.compilationType() == 1) { // Compilation type eval. // For compilation type eval emit information on the script from which
// eval was called if a script is present.
if (mirror.compilationType() == 1 &&
mirror.evalFromFunction().script()) {
content.evalFromScript = content.evalFromScript =
this.serializeReference(mirror.evalFromFunction().script()); this.serializeReference(mirror.evalFromFunction().script());
var evalFromLocation = mirror.evalFromLocation() var evalFromLocation = mirror.evalFromLocation()
@ -2172,7 +2175,7 @@ JSONProtocolSerializer.prototype.serializeProperty_ = function(propertyMirror) {
result.name = propertyMirror.name(); result.name = propertyMirror.name();
var propertyValue = propertyMirror.value(); var propertyValue = propertyMirror.value();
if (this.compactFormat_() && propertyValue.isValue()) { if (this.inlineRefs_() && propertyValue.isValue()) {
result.value = this.serializeReferenceWithDisplayData_(propertyValue); result.value = this.serializeReferenceWithDisplayData_(propertyValue);
} else { } else {
if (propertyMirror.attributes() != PropertyAttribute.None) { if (propertyMirror.attributes() != PropertyAttribute.None) {
@ -2229,6 +2232,15 @@ JSONProtocolSerializer.prototype.serializeFrame_ = function(mirror, content) {
if (!IS_UNDEFINED(source_line_text)) { if (!IS_UNDEFINED(source_line_text)) {
content.sourceLineText = source_line_text; content.sourceLineText = source_line_text;
} }
content.scopes = [];
for (var i = 0; i < mirror.scopeCount(); i++) {
var scope = mirror.scope(i);
content.scopes.push({
type: scope.scopeType(),
index: i
});
}
} }
@ -2236,7 +2248,9 @@ JSONProtocolSerializer.prototype.serializeScope_ = function(mirror, content) {
content.index = mirror.scopeIndex(); content.index = mirror.scopeIndex();
content.frameIndex = mirror.frameIndex(); content.frameIndex = mirror.frameIndex();
content.type = mirror.scopeType(); content.type = mirror.scopeType();
content.object = this.serializeReference(mirror.scopeObject()); content.object = this.inlineRefs_() ?
this.serializeValue(mirror.scopeObject()) :
this.serializeReference(mirror.scopeObject());
} }

35
deps/v8/src/objects-inl.h

@ -481,11 +481,6 @@ bool Object::IsMapCache() {
} }
bool Object::IsLookupCache() {
return IsHashTable();
}
bool Object::IsPrimitive() { bool Object::IsPrimitive() {
return IsOddball() || IsNumber() || IsString(); return IsOddball() || IsNumber() || IsString();
} }
@ -659,6 +654,12 @@ Object* Object::GetProperty(String* key, PropertyAttributes* attributes) {
#define WRITE_INT_FIELD(p, offset, value) \ #define WRITE_INT_FIELD(p, offset, value) \
(*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value) (*reinterpret_cast<int*>(FIELD_ADDR(p, offset)) = value)
#define READ_INTPTR_FIELD(p, offset) \
(*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)))
#define WRITE_INTPTR_FIELD(p, offset, value) \
(*reinterpret_cast<intptr_t*>(FIELD_ADDR(p, offset)) = value)
#define READ_UINT32_FIELD(p, offset) \ #define READ_UINT32_FIELD(p, offset) \
(*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset))) (*reinterpret_cast<uint32_t*>(FIELD_ADDR(p, offset)))
@ -1304,7 +1305,6 @@ int DescriptorArray::Search(String* name) {
} }
String* DescriptorArray::GetKey(int descriptor_number) { String* DescriptorArray::GetKey(int descriptor_number) {
ASSERT(descriptor_number < number_of_descriptors()); ASSERT(descriptor_number < number_of_descriptors());
return String::cast(get(ToKeyIndex(descriptor_number))); return String::cast(get(ToKeyIndex(descriptor_number)));
@ -1388,7 +1388,6 @@ CAST_ACCESSOR(Dictionary)
CAST_ACCESSOR(SymbolTable) CAST_ACCESSOR(SymbolTable)
CAST_ACCESSOR(CompilationCacheTable) CAST_ACCESSOR(CompilationCacheTable)
CAST_ACCESSOR(MapCache) CAST_ACCESSOR(MapCache)
CAST_ACCESSOR(LookupCache)
CAST_ACCESSOR(String) CAST_ACCESSOR(String)
CAST_ACCESSOR(SeqString) CAST_ACCESSOR(SeqString)
CAST_ACCESSOR(SeqAsciiString) CAST_ACCESSOR(SeqAsciiString)
@ -1786,11 +1785,17 @@ int Map::inobject_properties() {
int HeapObject::SizeFromMap(Map* map) { int HeapObject::SizeFromMap(Map* map) {
InstanceType instance_type = map->instance_type(); InstanceType instance_type = map->instance_type();
// Only inline the two most frequent cases. // Only inline the most frequent cases.
if (instance_type == JS_OBJECT_TYPE) return map->instance_size(); if (instance_type == JS_OBJECT_TYPE ||
(instance_type & (kIsNotStringMask | kStringRepresentationMask)) ==
(kStringTag | kConsStringTag) ||
instance_type == JS_ARRAY_TYPE) return map->instance_size();
if (instance_type == FIXED_ARRAY_TYPE) { if (instance_type == FIXED_ARRAY_TYPE) {
return reinterpret_cast<FixedArray*>(this)->FixedArraySize(); return reinterpret_cast<FixedArray*>(this)->FixedArraySize();
} }
if (instance_type == BYTE_ARRAY_TYPE) {
return reinterpret_cast<ByteArray*>(this)->ByteArraySize();
}
// Otherwise do the general size computation. // Otherwise do the general size computation.
return SlowSizeFromMap(map); return SlowSizeFromMap(map);
} }
@ -2130,6 +2135,7 @@ ACCESSORS(BreakPointInfo, statement_position, Smi, kStatementPositionIndex)
ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex) ACCESSORS(BreakPointInfo, break_point_objects, Object, kBreakPointObjectsIndex)
#endif #endif
ACCESSORS(SharedFunctionInfo, construct_stub, Code, kConstructStubOffset)
ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset) ACCESSORS(SharedFunctionInfo, name, Object, kNameOffset)
ACCESSORS(SharedFunctionInfo, instance_class_name, Object, ACCESSORS(SharedFunctionInfo, instance_class_name, Object,
kInstanceClassNameOffset) kInstanceClassNameOffset)
@ -2303,12 +2309,12 @@ void JSBuiltinsObject::set_javascript_builtin(Builtins::JavaScript id,
Address Proxy::proxy() { Address Proxy::proxy() {
return AddressFrom<Address>(READ_INT_FIELD(this, kProxyOffset)); return AddressFrom<Address>(READ_INTPTR_FIELD(this, kProxyOffset));
} }
void Proxy::set_proxy(Address value) { void Proxy::set_proxy(Address value) {
WRITE_INT_FIELD(this, kProxyOffset, OffsetFrom(value)); WRITE_INTPTR_FIELD(this, kProxyOffset, OffsetFrom(value));
} }
@ -2639,6 +2645,13 @@ void Map::ClearCodeCache() {
} }
void JSArray::EnsureSize(int required_size) {
ASSERT(HasFastElements());
if (elements()->length() >= required_size) return;
Expand(required_size);
}
void JSArray::SetContent(FixedArray* storage) { void JSArray::SetContent(FixedArray* storage) {
set_length(Smi::FromInt(storage->length()), SKIP_WRITE_BARRIER); set_length(Smi::FromInt(storage->length()), SKIP_WRITE_BARRIER);
set_elements(storage); set_elements(storage);

112
deps/v8/src/objects.cc

@ -1302,16 +1302,19 @@ Object* JSObject::ReplaceSlowProperty(String* name,
Object* value, Object* value,
PropertyAttributes attributes) { PropertyAttributes attributes) {
Dictionary* dictionary = property_dictionary(); Dictionary* dictionary = property_dictionary();
PropertyDetails old_details = int old_index = dictionary->FindStringEntry(name);
dictionary->DetailsAt(dictionary->FindStringEntry(name)); int new_enumeration_index = 0; // 0 means "Use the next available index."
int new_index = old_details.index(); if (old_index != -1) {
if (old_details.IsTransition()) new_index = 0; // All calls to ReplaceSlowProperty have had all transitions removed.
ASSERT(!dictionary->DetailsAt(old_index).IsTransition());
new_enumeration_index = dictionary->DetailsAt(old_index).index();
}
PropertyDetails new_details(attributes, NORMAL, old_details.index()); PropertyDetails new_details(attributes, NORMAL, new_enumeration_index);
Object* result = Object* result =
property_dictionary()->SetOrAddStringEntry(name, value, new_details); dictionary->SetOrAddStringEntry(name, value, new_details);
if (result->IsFailure()) return result; if (result->IsFailure()) return result;
if (property_dictionary() != result) { if (dictionary != result) {
set_properties(Dictionary::cast(result)); set_properties(Dictionary::cast(result));
} }
return value; return value;
@ -1562,7 +1565,11 @@ Object* JSObject::LookupCallbackSetterInPrototypes(uint32_t index) {
void JSObject::LookupInDescriptor(String* name, LookupResult* result) { void JSObject::LookupInDescriptor(String* name, LookupResult* result) {
DescriptorArray* descriptors = map()->instance_descriptors(); DescriptorArray* descriptors = map()->instance_descriptors();
int number = descriptors->Search(name); int number = DescriptorLookupCache::Lookup(descriptors, name);
if (number == DescriptorLookupCache::kAbsent) {
number = descriptors->Search(name);
DescriptorLookupCache::Update(descriptors, name, number);
}
if (number != DescriptorArray::kNotFound) { if (number != DescriptorArray::kNotFound) {
result->DescriptorResult(this, descriptors->GetDetails(number), number); result->DescriptorResult(this, descriptors->GetDetails(number), number);
} else { } else {
@ -4632,7 +4639,7 @@ void SharedFunctionInfo::SourceCodePrint(StringStream* accumulator,
void SharedFunctionInfo::SharedFunctionInfoIterateBody(ObjectVisitor* v) { void SharedFunctionInfo::SharedFunctionInfoIterateBody(ObjectVisitor* v) {
IteratePointers(v, kNameOffset, kCodeOffset + kPointerSize); IteratePointers(v, kNameOffset, kConstructStubOffset + kPointerSize);
IteratePointers(v, kInstanceClassNameOffset, kScriptOffset + kPointerSize); IteratePointers(v, kInstanceClassNameOffset, kScriptOffset + kPointerSize);
IteratePointers(v, kDebugInfoOffset, kInferredNameOffset + kPointerSize); IteratePointers(v, kDebugInfoOffset, kInferredNameOffset + kPointerSize);
} }
@ -4977,10 +4984,8 @@ Object* JSArray::Initialize(int capacity) {
} }
void JSArray::EnsureSize(int required_size) { void JSArray::Expand(int required_size) {
Handle<JSArray> self(this); Handle<JSArray> self(this);
ASSERT(HasFastElements());
if (elements()->length() >= required_size) return;
Handle<FixedArray> old_backing(elements()); Handle<FixedArray> old_backing(elements());
int old_size = old_backing->length(); int old_size = old_backing->length();
// Doubling in size would be overkill, but leave some slack to avoid // Doubling in size would be overkill, but leave some slack to avoid
@ -6352,8 +6357,8 @@ Object* HashTable<prefix_size, element_size>::EnsureCapacity(
int n, HashTableKey* key) { int n, HashTableKey* key) {
int capacity = Capacity(); int capacity = Capacity();
int nof = NumberOfElements() + n; int nof = NumberOfElements() + n;
// Make sure 25% is free // Make sure 50% is free
if (nof + (nof >> 2) <= capacity) return this; if (nof + (nof >> 1) <= capacity) return this;
Object* obj = Allocate(nof * 2); Object* obj = Allocate(nof * 2);
if (obj->IsFailure()) return obj; if (obj->IsFailure()) return obj;
@ -6756,60 +6761,6 @@ class SymbolsKey : public HashTableKey {
}; };
// MapNameKeys are used as keys in lookup caches.
class MapNameKey : public HashTableKey {
public:
MapNameKey(Map* map, String* name)
: map_(map), name_(name) { }
bool IsMatch(Object* other) {
if (!other->IsFixedArray()) return false;
FixedArray* pair = FixedArray::cast(other);
Map* map = Map::cast(pair->get(0));
if (map != map_) return false;
String* name = String::cast(pair->get(1));
return name->Equals(name_);
}
typedef uint32_t (*HashFunction)(Object* obj);
virtual HashFunction GetHashFunction() { return MapNameHash; }
static uint32_t MapNameHashHelper(Map* map, String* name) {
// Uses only lower 32 bits if pointers are larger.
uintptr_t addr_hash =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(map));
return addr_hash ^ name->Hash();
}
static uint32_t MapNameHash(Object* obj) {
FixedArray* pair = FixedArray::cast(obj);
Map* map = Map::cast(pair->get(0));
String* name = String::cast(pair->get(1));
return MapNameHashHelper(map, name);
}
virtual uint32_t Hash() {
return MapNameHashHelper(map_, name_);
}
virtual Object* GetObject() {
Object* obj = Heap::AllocateFixedArray(2);
if (obj->IsFailure()) return obj;
FixedArray* pair = FixedArray::cast(obj);
pair->set(0, map_);
pair->set(1, name_);
return pair;
}
virtual bool IsStringKey() { return false; }
private:
Map* map_;
String* name_;
};
Object* MapCache::Lookup(FixedArray* array) { Object* MapCache::Lookup(FixedArray* array) {
SymbolsKey key(array); SymbolsKey key(array);
int entry = FindEntry(&key); int entry = FindEntry(&key);
@ -6832,31 +6783,6 @@ Object* MapCache::Put(FixedArray* array, Map* value) {
} }
int LookupCache::Lookup(Map* map, String* name) {
MapNameKey key(map, name);
int entry = FindEntry(&key);
if (entry == -1) return kNotFound;
return Smi::cast(get(EntryToIndex(entry) + 1))->value();
}
Object* LookupCache::Put(Map* map, String* name, int value) {
MapNameKey key(map, name);
Object* obj = EnsureCapacity(1, &key);
if (obj->IsFailure()) return obj;
Object* k = key.GetObject();
if (k->IsFailure()) return k;
LookupCache* cache = reinterpret_cast<LookupCache*>(obj);
int entry = cache->FindInsertionEntry(k, key.Hash());
int index = EntryToIndex(entry);
cache->set(index, k);
cache->set(index + 1, Smi::FromInt(value), SKIP_WRITE_BARRIER);
cache->ElementAdded();
return cache;
}
Object* Dictionary::Allocate(int at_least_space_for) { Object* Dictionary::Allocate(int at_least_space_for) {
Object* obj = DictionaryBase::Allocate(at_least_space_for); Object* obj = DictionaryBase::Allocate(at_least_space_for);
// Initialize the next enumeration index. // Initialize the next enumeration index.

39
deps/v8/src/objects.h

@ -59,7 +59,6 @@
// - SymbolTable // - SymbolTable
// - CompilationCacheTable // - CompilationCacheTable
// - MapCache // - MapCache
// - LookupCache
// - Context // - Context
// - GlobalContext // - GlobalContext
// - String // - String
@ -678,7 +677,6 @@ class Object BASE_EMBEDDED {
inline bool IsSymbolTable(); inline bool IsSymbolTable();
inline bool IsCompilationCacheTable(); inline bool IsCompilationCacheTable();
inline bool IsMapCache(); inline bool IsMapCache();
inline bool IsLookupCache();
inline bool IsPrimitive(); inline bool IsPrimitive();
inline bool IsGlobalObject(); inline bool IsGlobalObject();
inline bool IsJSGlobalObject(); inline bool IsJSGlobalObject();
@ -1641,6 +1639,9 @@ class FixedArray: public Array {
// Garbage collection support. // Garbage collection support.
static int SizeFor(int length) { return kHeaderSize + length * kPointerSize; } static int SizeFor(int length) { return kHeaderSize + length * kPointerSize; }
// Code Generation support.
static int OffsetOfElementAt(int index) { return SizeFor(index); }
// Casting. // Casting.
static inline FixedArray* cast(Object* obj); static inline FixedArray* cast(Object* obj);
@ -2012,27 +2013,6 @@ class MapCache: public HashTable<0, 2> {
}; };
// LookupCache.
//
// Maps a key consisting of a map and a name to an index within a
// fast-case properties array.
//
// LookupCaches are used to avoid repeatedly searching instance
// descriptors.
class LookupCache: public HashTable<0, 2> {
public:
int Lookup(Map* map, String* name);
Object* Put(Map* map, String* name, int offset);
static inline LookupCache* cast(Object* obj);
// Constant returned by Lookup when the key was not found.
static const int kNotFound = -1;
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(LookupCache);
};
// Dictionary for keeping properties and elements in slow case. // Dictionary for keeping properties and elements in slow case.
// //
// One element in the prefix is used for storing non-element // One element in the prefix is used for storing non-element
@ -2056,6 +2036,7 @@ class Dictionary: public DictionaryBase {
// Returns the property details for the property at entry. // Returns the property details for the property at entry.
PropertyDetails DetailsAt(int entry) { PropertyDetails DetailsAt(int entry) {
ASSERT(entry >= 0); // Not found is -1, which is not caught by get().
return PropertyDetails(Smi::cast(get(EntryToIndex(entry) + 2))); return PropertyDetails(Smi::cast(get(EntryToIndex(entry) + 2)));
} }
@ -2766,6 +2747,9 @@ class SharedFunctionInfo: public HeapObject {
// [code]: Function code. // [code]: Function code.
DECL_ACCESSORS(code, Code) DECL_ACCESSORS(code, Code)
// [construct stub]: Code stub for constructing instances of this function.
DECL_ACCESSORS(construct_stub, Code)
// Returns if this function has been compiled to native code yet. // Returns if this function has been compiled to native code yet.
inline bool is_compiled(); inline bool is_compiled();
@ -2861,7 +2845,8 @@ class SharedFunctionInfo: public HeapObject {
// (An even number of integers has a size that is a multiple of a pointer.) // (An even number of integers has a size that is a multiple of a pointer.)
static const int kNameOffset = HeapObject::kHeaderSize; static const int kNameOffset = HeapObject::kHeaderSize;
static const int kCodeOffset = kNameOffset + kPointerSize; static const int kCodeOffset = kNameOffset + kPointerSize;
static const int kLengthOffset = kCodeOffset + kPointerSize; static const int kConstructStubOffset = kCodeOffset + kPointerSize;
static const int kLengthOffset = kConstructStubOffset + kPointerSize;
static const int kFormalParameterCountOffset = kLengthOffset + kIntSize; static const int kFormalParameterCountOffset = kLengthOffset + kIntSize;
static const int kExpectedNofPropertiesOffset = static const int kExpectedNofPropertiesOffset =
kFormalParameterCountOffset + kIntSize; kFormalParameterCountOffset + kIntSize;
@ -4005,7 +3990,7 @@ class JSArray: public JSObject {
// Uses handles. Ensures that the fixed array backing the JSArray has at // Uses handles. Ensures that the fixed array backing the JSArray has at
// least the stated size. // least the stated size.
void EnsureSize(int minimum_size_of_backing_fixed_array); inline void EnsureSize(int minimum_size_of_backing_fixed_array);
// Dispatched behavior. // Dispatched behavior.
#ifdef DEBUG #ifdef DEBUG
@ -4018,6 +4003,10 @@ class JSArray: public JSObject {
static const int kSize = kLengthOffset + kPointerSize; static const int kSize = kLengthOffset + kPointerSize;
private: private:
// Expand the fixed array backing of a fast-case JSArray to at least
// the requested size.
void Expand(int minimum_size_of_backing_fixed_array);
DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray); DISALLOW_IMPLICIT_CONSTRUCTORS(JSArray);
}; };

4
deps/v8/src/oprofile-agent.cc

@ -52,6 +52,10 @@ bool OProfileAgent::Initialize() {
return true; return true;
} }
#else #else
if (FLAG_oprofile) {
OS::Print("Warning: --oprofile specified but binary compiled without "
"oprofile support.\n");
}
return true; return true;
#endif #endif
} }

16
deps/v8/src/parser.cc

@ -1582,7 +1582,8 @@ VariableProxy* AstBuildingParser::Declare(Handle<String> name,
// For global const variables we bind the proxy to a variable. // For global const variables we bind the proxy to a variable.
if (mode == Variable::CONST && top_scope_->is_global_scope()) { if (mode == Variable::CONST && top_scope_->is_global_scope()) {
ASSERT(resolve); // should be set by all callers ASSERT(resolve); // should be set by all callers
var = NEW(Variable(top_scope_, name, Variable::CONST, true, false)); Variable::Kind kind = Variable::NORMAL;
var = NEW(Variable(top_scope_, name, Variable::CONST, true, kind));
} }
// If requested and we have a local variable, bind the proxy to the variable // If requested and we have a local variable, bind the proxy to the variable
@ -2653,10 +2654,15 @@ Expression* Parser::ParseBinaryExpression(int prec, bool accept_IN, bool* ok) {
double y_val = y->AsLiteral()->handle()->Number(); double y_val = y->AsLiteral()->handle()->Number();
int64_t y_int = static_cast<int64_t>(y_val); int64_t y_int = static_cast<int64_t>(y_val);
// There are rounding issues with this optimization, but they don't // There are rounding issues with this optimization, but they don't
// apply if the number to be divided with has a reciprocal that can // apply if the number to be divided with has a reciprocal that can be
// be precisely represented as a floating point number. This is // precisely represented as a floating point number. This is the case
// the case if the number is an integer power of 2. // if the number is an integer power of 2. Negative integer powers of
if (static_cast<double>(y_int) == y_val && IsPowerOf2(y_int)) { // 2 work too, but for -2, -1, 1 and 2 we don't do the strength
// reduction because the inlined optimistic idiv has a reasonable
// chance of succeeding by producing a Smi answer with no remainder.
if (static_cast<double>(y_int) == y_val &&
(IsPowerOf2(y_int) || IsPowerOf2(-y_int)) &&
(y_int > 2 || y_int < -2)) {
y = NewNumberLiteral(1 / y_val); y = NewNumberLiteral(1 / y_val);
op = Token::MUL; op = Token::MUL;
} }

27
deps/v8/src/platform-linux.cc

@ -224,8 +224,8 @@ PosixMemoryMappedFile::~PosixMemoryMappedFile() {
#ifdef ENABLE_LOGGING_AND_PROFILING #ifdef ENABLE_LOGGING_AND_PROFILING
static unsigned StringToLong(char* buffer) { static uintptr_t StringToULong(char* buffer) {
return static_cast<unsigned>(strtol(buffer, NULL, 16)); // NOLINT return strtoul(buffer, NULL, 16); // NOLINT
} }
#endif #endif
@ -242,13 +242,13 @@ void OS::LogSharedLibraryAddresses() {
addr_buffer[10] = 0; addr_buffer[10] = 0;
int result = read(fd, addr_buffer + 2, 8); int result = read(fd, addr_buffer + 2, 8);
if (result < 8) break; if (result < 8) break;
unsigned start = StringToLong(addr_buffer); uintptr_t start = StringToULong(addr_buffer);
result = read(fd, addr_buffer + 2, 1); result = read(fd, addr_buffer + 2, 1);
if (result < 1) break; if (result < 1) break;
if (addr_buffer[2] != '-') break; if (addr_buffer[2] != '-') break;
result = read(fd, addr_buffer + 2, 8); result = read(fd, addr_buffer + 2, 8);
if (result < 8) break; if (result < 8) break;
unsigned end = StringToLong(addr_buffer); uintptr_t end = StringToULong(addr_buffer);
char buffer[MAP_LENGTH]; char buffer[MAP_LENGTH];
int bytes_read = -1; int bytes_read = -1;
do { do {
@ -262,10 +262,21 @@ void OS::LogSharedLibraryAddresses() {
// Ignore mappings that are not executable. // Ignore mappings that are not executable.
if (buffer[3] != 'x') continue; if (buffer[3] != 'x') continue;
char* start_of_path = index(buffer, '/'); char* start_of_path = index(buffer, '/');
// There may be no filename in this line. Skip to next. // If there is no filename for this line then log it as an anonymous
if (start_of_path == NULL) continue; // mapping and use the address as its name.
buffer[bytes_read] = 0; if (start_of_path == NULL) {
LOG(SharedLibraryEvent(start_of_path, start, end)); // 40 is enough to print a 64 bit address range.
ASSERT(sizeof(buffer) > 40);
snprintf(buffer,
sizeof(buffer),
"%08" V8PRIxPTR "-%08" V8PRIxPTR,
start,
end);
LOG(SharedLibraryEvent(buffer, start, end));
} else {
buffer[bytes_read] = 0;
LOG(SharedLibraryEvent(start_of_path, start, end));
}
} }
close(fd); close(fd);
#endif #endif

23
deps/v8/src/platform-macos.cc

@ -35,10 +35,6 @@
#include <AvailabilityMacros.h> #include <AvailabilityMacros.h>
#ifdef MAC_OS_X_VERSION_10_5
# include <execinfo.h> // backtrace, backtrace_symbols
#endif
#include <pthread.h> #include <pthread.h>
#include <semaphore.h> #include <semaphore.h>
#include <signal.h> #include <signal.h>
@ -58,6 +54,17 @@
#include "platform.h" #include "platform.h"
// Manually define these here as weak imports, rather than including execinfo.h.
// This lets us launch on 10.4 which does not have these calls.
extern "C" {
extern int backtrace(void**, int) __attribute__((weak_import));
extern char** backtrace_symbols(void* const*, int)
__attribute__((weak_import));
extern void backtrace_symbols_fd(void* const*, int, int)
__attribute__((weak_import));
}
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -214,9 +221,10 @@ int OS::ActivationFrameAlignment() {
int OS::StackWalk(Vector<StackFrame> frames) { int OS::StackWalk(Vector<StackFrame> frames) {
#ifndef MAC_OS_X_VERSION_10_5 // If weak link to execinfo lib has failed, ie because we are on 10.4, abort.
return 0; if (backtrace == NULL)
#else return 0;
int frames_size = frames.length(); int frames_size = frames.length();
void** addresses = NewArray<void*>(frames_size); void** addresses = NewArray<void*>(frames_size);
int frames_count = backtrace(addresses, frames_size); int frames_count = backtrace(addresses, frames_size);
@ -244,7 +252,6 @@ int OS::StackWalk(Vector<StackFrame> frames) {
free(symbols); free(symbols);
return frames_count; return frames_count;
#endif
} }

23
deps/v8/src/platform.h

@ -44,6 +44,8 @@
#ifndef V8_PLATFORM_H_ #ifndef V8_PLATFORM_H_
#define V8_PLATFORM_H_ #define V8_PLATFORM_H_
#define V8_INFINITY INFINITY
// Windows specific stuff. // Windows specific stuff.
#ifdef WIN32 #ifdef WIN32
@ -58,7 +60,8 @@ enum {
FP_NORMAL FP_NORMAL
}; };
#define INFINITY HUGE_VAL #undef V8_INFINITY
#define V8_INFINITY HUGE_VAL
namespace v8 { namespace v8 {
namespace internal { namespace internal {
@ -75,14 +78,6 @@ int strncasecmp(const char* s1, const char* s2, int n);
#endif // _MSC_VER #endif // _MSC_VER
// MinGW specific stuff.
#ifdef __MINGW32__
// Needed for va_list.
#include <stdarg.h>
#endif // __MINGW32__
// Random is missing on both Visual Studio and MinGW. // Random is missing on both Visual Studio and MinGW.
int random(); int random();
@ -90,6 +85,10 @@ int random();
// GCC specific stuff // GCC specific stuff
#ifdef __GNUC__ #ifdef __GNUC__
// Needed for va_list on at least MinGW and Android.
#include <stdarg.h>
#define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100) #define __GNUC_VERSION__ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100)
// Unfortunately, the INFINITY macro cannot be used with the '-pedantic' // Unfortunately, the INFINITY macro cannot be used with the '-pedantic'
@ -100,8 +99,8 @@ int random();
// __GNUC_PREREQ is not defined in GCC for Mac OS X, so we define our own macro // __GNUC_PREREQ is not defined in GCC for Mac OS X, so we define our own macro
#if __GNUC_VERSION__ >= 29600 && __GNUC_VERSION__ < 40100 #if __GNUC_VERSION__ >= 29600 && __GNUC_VERSION__ < 40100
#include <limits> #include <limits>
#undef INFINITY #undef V8_INFINITY
#define INFINITY std::numeric_limits<double>::infinity() #define V8_INFINITY std::numeric_limits<double>::infinity()
#endif #endif
#endif // __GNUC__ #endif // __GNUC__
@ -109,6 +108,8 @@ int random();
namespace v8 { namespace v8 {
namespace internal { namespace internal {
class Semaphore;
double ceiling(double x); double ceiling(double x);
// Forward declarations. // Forward declarations.

13
deps/v8/src/register-allocator.cc

@ -40,18 +40,7 @@ namespace internal {
Result::Result(Register reg) { Result::Result(Register reg) {
ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg)); ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
CodeGeneratorScope::Current()->allocator()->Use(reg); CodeGeneratorScope::Current()->allocator()->Use(reg);
value_ = StaticTypeField::encode(StaticType::UNKNOWN_TYPE) value_ = TypeField::encode(REGISTER) | DataField::encode(reg.code_);
| TypeField::encode(REGISTER)
| DataField::encode(reg.code_);
}
Result::Result(Register reg, StaticType type) {
ASSERT(reg.is_valid() && !RegisterAllocator::IsReserved(reg));
CodeGeneratorScope::Current()->allocator()->Use(reg);
value_ = StaticTypeField::encode(type.static_type_)
| TypeField::encode(REGISTER)
| DataField::encode(reg.code_);
} }

94
deps/v8/src/register-allocator.h

@ -44,80 +44,6 @@ namespace v8 {
namespace internal { namespace internal {
// -------------------------------------------------------------------------
// StaticType
//
// StaticType represent the type of an expression or a word at runtime.
// The types are ordered by knowledge, so that if a value can come about
// in more than one way, and there are different static types inferred
// for the different ways, the types can be combined to a type that we
// are still certain of (possibly just "unknown").
class StaticType BASE_EMBEDDED {
public:
StaticType() : static_type_(UNKNOWN_TYPE) {}
static StaticType unknown() { return StaticType(); }
static StaticType smi() { return StaticType(SMI_TYPE); }
static StaticType jsstring() { return StaticType(STRING_TYPE); }
static StaticType heap_object() { return StaticType(HEAP_OBJECT_TYPE); }
// Accessors
bool is_unknown() { return static_type_ == UNKNOWN_TYPE; }
bool is_smi() { return static_type_ == SMI_TYPE; }
bool is_heap_object() { return (static_type_ & HEAP_OBJECT_TYPE) != 0; }
bool is_jsstring() { return static_type_ == STRING_TYPE; }
bool operator==(StaticType other) const {
return static_type_ == other.static_type_;
}
// Find the best approximating type for a value.
// The argument must not be NULL.
static StaticType TypeOf(Object* object) {
// Remember to make the most specific tests first. A string is also a heap
// object, so test for string-ness first.
if (object->IsSmi()) return smi();
if (object->IsString()) return jsstring();
if (object->IsHeapObject()) return heap_object();
return unknown();
}
// Merges two static types to a type that combines the knowledge
// of both. If there is no way to combine (e.g., being a string *and*
// being a smi), the resulting type is unknown.
StaticType merge(StaticType other) {
StaticType x(
static_cast<StaticTypeEnum>(static_type_ & other.static_type_));
return x;
}
private:
enum StaticTypeEnum {
// Numbers are chosen so that least upper bound of the following
// partial order is implemented by bitwise "and":
//
// string
// |
// heap-object smi
// \ /
// unknown
//
UNKNOWN_TYPE = 0x00,
SMI_TYPE = 0x01,
HEAP_OBJECT_TYPE = 0x02,
STRING_TYPE = 0x04 | HEAP_OBJECT_TYPE
};
explicit StaticType(StaticTypeEnum static_type) : static_type_(static_type) {}
// StaticTypeEnum static_type_;
StaticTypeEnum static_type_;
friend class FrameElement;
friend class Result;
};
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
// Results // Results
// //
@ -138,13 +64,9 @@ class Result BASE_EMBEDDED {
// Construct a register Result. // Construct a register Result.
explicit Result(Register reg); explicit Result(Register reg);
// Construct a register Result with a known static type.
Result(Register reg, StaticType static_type);
// Construct a Result whose value is a compile-time constant. // Construct a Result whose value is a compile-time constant.
explicit Result(Handle<Object> value) { explicit Result(Handle<Object> value) {
value_ = StaticTypeField::encode(StaticType::TypeOf(*value).static_type_) value_ = TypeField::encode(CONSTANT)
| TypeField::encode(CONSTANT)
| DataField::encode(ConstantList()->length()); | DataField::encode(ConstantList()->length());
ConstantList()->Add(value); ConstantList()->Add(value);
} }
@ -182,15 +104,6 @@ class Result BASE_EMBEDDED {
inline void Unuse(); inline void Unuse();
StaticType static_type() const {
return StaticType(StaticTypeField::decode(value_));
}
void set_static_type(StaticType type) {
value_ = value_ & ~StaticTypeField::mask();
value_ = value_ | StaticTypeField::encode(type.static_type_);
}
Type type() const { return TypeField::decode(value_); } Type type() const { return TypeField::decode(value_); }
void invalidate() { value_ = TypeField::encode(INVALID); } void invalidate() { value_ = TypeField::encode(INVALID); }
@ -225,9 +138,8 @@ class Result BASE_EMBEDDED {
private: private:
uint32_t value_; uint32_t value_;
class StaticTypeField: public BitField<StaticType::StaticTypeEnum, 0, 3> {}; class TypeField: public BitField<Type, 0, 2> {};
class TypeField: public BitField<Type, 3, 2> {}; class DataField: public BitField<uint32_t, 2, 32 - 3> {};
class DataField: public BitField<uint32_t, 5, 32 - 6> {};
inline void CopyTo(Result* destination) const; inline void CopyTo(Result* destination) const;

5
deps/v8/src/rewriter.cc

@ -283,7 +283,10 @@ void AstOptimizer::VisitAssignment(Assignment* node) {
case Token::ASSIGN: case Token::ASSIGN:
// No type can be infered from the general assignment. // No type can be infered from the general assignment.
scoped_fni.Enter(); // Don't infer if it is "a = function(){...}();"-like expression.
if (node->value()->AsCall() == NULL) {
scoped_fni.Enter();
}
break; break;
case Token::ASSIGN_BIT_OR: case Token::ASSIGN_BIT_OR:
case Token::ASSIGN_BIT_XOR: case Token::ASSIGN_BIT_XOR:

190
deps/v8/src/runtime.cc

@ -50,9 +50,8 @@ namespace v8 {
namespace internal { namespace internal {
#define RUNTIME_ASSERT(value) do { \ #define RUNTIME_ASSERT(value) \
if (!(value)) return IllegalOperation(); \ if (!(value)) return Top::ThrowIllegalOperation();
} while (false)
// Cast the given object to a value of the specified type and store // Cast the given object to a value of the specified type and store
// it in a variable with the given name. If the object is not of the // it in a variable with the given name. If the object is not of the
@ -97,11 +96,6 @@ namespace internal {
static StaticResource<StringInputBuffer> runtime_string_input_buffer; static StaticResource<StringInputBuffer> runtime_string_input_buffer;
static Object* IllegalOperation() {
return Top::Throw(Heap::illegal_access_symbol());
}
static Object* DeepCopyBoilerplate(JSObject* boilerplate) { static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
StackLimitCheck check; StackLimitCheck check;
if (check.HasOverflowed()) return Top::StackOverflow(); if (check.HasOverflowed()) return Top::StackOverflow();
@ -124,7 +118,8 @@ static Object* DeepCopyBoilerplate(JSObject* boilerplate) {
} }
} }
mode = copy->GetWriteBarrierMode(); mode = copy->GetWriteBarrierMode();
for (int i = 0; i < copy->map()->inobject_properties(); i++) { int nof = copy->map()->inobject_properties();
for (int i = 0; i < nof; i++) {
Object* value = copy->InObjectPropertyAt(i); Object* value = copy->InObjectPropertyAt(i);
if (value->IsJSObject()) { if (value->IsJSObject()) {
JSObject* jsObject = JSObject::cast(value); JSObject* jsObject = JSObject::cast(value);
@ -522,12 +517,9 @@ static Object* Runtime_IsConstructCall(Arguments args) {
static Object* Runtime_RegExpCompile(Arguments args) { static Object* Runtime_RegExpCompile(Arguments args) {
HandleScope scope; HandleScope scope;
ASSERT(args.length() == 3); ASSERT(args.length() == 3);
CONVERT_CHECKED(JSRegExp, raw_re, args[0]); CONVERT_ARG_CHECKED(JSRegExp, re, 0);
Handle<JSRegExp> re(raw_re); CONVERT_ARG_CHECKED(String, pattern, 1);
CONVERT_CHECKED(String, raw_pattern, args[1]); CONVERT_ARG_CHECKED(String, flags, 2);
Handle<String> pattern(raw_pattern);
CONVERT_CHECKED(String, raw_flags, args[2]);
Handle<String> flags(raw_flags);
Handle<Object> result = RegExpImpl::Compile(re, pattern, flags); Handle<Object> result = RegExpImpl::Compile(re, pattern, flags);
if (result.is_null()) return Failure::Exception(); if (result.is_null()) return Failure::Exception();
return *result; return *result;
@ -537,8 +529,7 @@ static Object* Runtime_RegExpCompile(Arguments args) {
static Object* Runtime_CreateApiFunction(Arguments args) { static Object* Runtime_CreateApiFunction(Arguments args) {
HandleScope scope; HandleScope scope;
ASSERT(args.length() == 1); ASSERT(args.length() == 1);
CONVERT_CHECKED(FunctionTemplateInfo, raw_data, args[0]); CONVERT_ARG_CHECKED(FunctionTemplateInfo, data, 0);
Handle<FunctionTemplateInfo> data(raw_data);
return *Factory::CreateApiFunction(data); return *Factory::CreateApiFunction(data);
} }
@ -1066,15 +1057,12 @@ static Object* Runtime_InitializeConstContextSlot(Arguments args) {
static Object* Runtime_RegExpExec(Arguments args) { static Object* Runtime_RegExpExec(Arguments args) {
HandleScope scope; HandleScope scope;
ASSERT(args.length() == 4); ASSERT(args.length() == 4);
CONVERT_CHECKED(JSRegExp, raw_regexp, args[0]); CONVERT_ARG_CHECKED(JSRegExp, regexp, 0);
Handle<JSRegExp> regexp(raw_regexp); CONVERT_ARG_CHECKED(String, subject, 1);
CONVERT_CHECKED(String, raw_subject, args[1]);
Handle<String> subject(raw_subject);
// Due to the way the JS files are constructed this must be less than the // Due to the way the JS files are constructed this must be less than the
// length of a string, i.e. it is always a Smi. We check anyway for security. // length of a string, i.e. it is always a Smi. We check anyway for security.
CONVERT_CHECKED(Smi, index, args[2]); CONVERT_CHECKED(Smi, index, args[2]);
CONVERT_CHECKED(JSArray, raw_last_match_info, args[3]); CONVERT_ARG_CHECKED(JSArray, last_match_info, 3);
Handle<JSArray> last_match_info(raw_last_match_info);
RUNTIME_ASSERT(last_match_info->HasFastElements()); RUNTIME_ASSERT(last_match_info->HasFastElements());
RUNTIME_ASSERT(index->value() >= 0); RUNTIME_ASSERT(index->value() >= 0);
RUNTIME_ASSERT(index->value() <= subject->length()); RUNTIME_ASSERT(index->value() <= subject->length());
@ -1217,8 +1205,7 @@ static Object* Runtime_SetCode(Arguments args) {
HandleScope scope; HandleScope scope;
ASSERT(args.length() == 2); ASSERT(args.length() == 2);
CONVERT_CHECKED(JSFunction, raw_target, args[0]); CONVERT_ARG_CHECKED(JSFunction, target, 0);
Handle<JSFunction> target(raw_target);
Handle<Object> code = args.at<Object>(1); Handle<Object> code = args.at<Object>(1);
Handle<Context> context(target->context()); Handle<Context> context(target->context());
@ -2633,12 +2620,9 @@ static Object* Runtime_KeyedGetProperty(Arguments args) {
String* key = String::cast(args[1]); String* key = String::cast(args[1]);
if (receiver->HasFastProperties()) { if (receiver->HasFastProperties()) {
// Attempt to use lookup cache. // Attempt to use lookup cache.
Object* obj = Heap::GetKeyedLookupCache();
if (obj->IsFailure()) return obj;
LookupCache* cache = LookupCache::cast(obj);
Map* receiver_map = receiver->map(); Map* receiver_map = receiver->map();
int offset = cache->Lookup(receiver_map, key); int offset = KeyedLookupCache::Lookup(receiver_map, key);
if (offset != LookupCache::kNotFound) { if (offset != -1) {
Object* value = receiver->FastPropertyAt(offset); Object* value = receiver->FastPropertyAt(offset);
return value->IsTheHole() ? Heap::undefined_value() : value; return value->IsTheHole() ? Heap::undefined_value() : value;
} }
@ -2648,9 +2632,7 @@ static Object* Runtime_KeyedGetProperty(Arguments args) {
receiver->LocalLookup(key, &result); receiver->LocalLookup(key, &result);
if (result.IsProperty() && result.IsLoaded() && result.type() == FIELD) { if (result.IsProperty() && result.IsLoaded() && result.type() == FIELD) {
int offset = result.GetFieldIndex(); int offset = result.GetFieldIndex();
Object* obj = cache->Put(receiver_map, key, offset); KeyedLookupCache::Update(receiver_map, key, offset);
if (obj->IsFailure()) return obj;
Heap::SetKeyedLookupCache(LookupCache::cast(obj));
Object* value = receiver->FastPropertyAt(offset); Object* value = receiver->FastPropertyAt(offset);
return value->IsTheHole() ? Heap::undefined_value() : value; return value->IsTheHole() ? Heap::undefined_value() : value;
} }
@ -2977,9 +2959,7 @@ static Object* Runtime_IsPropertyEnumerable(Arguments args) {
static Object* Runtime_GetPropertyNames(Arguments args) { static Object* Runtime_GetPropertyNames(Arguments args) {
HandleScope scope; HandleScope scope;
ASSERT(args.length() == 1); ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSObject, object, 0);
CONVERT_CHECKED(JSObject, raw_object, args[0]);
Handle<JSObject> object(raw_object);
return *GetKeysFor(object); return *GetKeysFor(object);
} }
@ -3718,20 +3698,8 @@ static Object* Runtime_NumberMod(Arguments args) {
static Object* Runtime_StringAdd(Arguments args) { static Object* Runtime_StringAdd(Arguments args) {
NoHandleAllocation ha; NoHandleAllocation ha;
ASSERT(args.length() == 2); ASSERT(args.length() == 2);
CONVERT_CHECKED(String, str1, args[0]); CONVERT_CHECKED(String, str1, args[0]);
CONVERT_CHECKED(String, str2, args[1]); CONVERT_CHECKED(String, str2, args[1]);
int len1 = str1->length();
int len2 = str2->length();
if (len1 == 0) return str2;
if (len2 == 0) return str1;
int length_sum = len1 + len2;
// Make sure that an out of memory exception is thrown if the length
// of the new cons string is too large to fit in a Smi.
if (length_sum > Smi::kMaxValue || length_sum < 0) {
Top::context()->mark_out_of_memory();
return Failure::OutOfMemoryException();
}
return Heap::AllocateConsString(str1, str2); return Heap::AllocateConsString(str1, str2);
} }
@ -4166,16 +4134,64 @@ static Object* Runtime_Math_log(Arguments args) {
} }
// Helper function to compute x^y, where y is known to be an
// integer. Uses binary decomposition to limit the number of
// multiplications; see the discussion in "Hacker's Delight" by Henry
// S. Warren, Jr., figure 11-6, page 213.
static double powi(double x, int y) {
ASSERT(y != kMinInt);
unsigned n = (y < 0) ? -y : y;
double m = x;
double p = 1;
while (true) {
if ((n & 1) != 0) p *= m;
n >>= 1;
if (n == 0) {
if (y < 0) {
// Unfortunately, we have to be careful when p has reached
// infinity in the computation, because sometimes the higher
// internal precision in the pow() implementation would have
// given us a finite p. This happens very rarely.
double result = 1.0 / p;
return (result == 0 && isinf(p))
? pow(x, static_cast<double>(y)) // Avoid pow(double, int).
: result;
} else {
return p;
}
}
m *= m;
}
}
static Object* Runtime_Math_pow(Arguments args) { static Object* Runtime_Math_pow(Arguments args) {
NoHandleAllocation ha; NoHandleAllocation ha;
ASSERT(args.length() == 2); ASSERT(args.length() == 2);
CONVERT_DOUBLE_CHECKED(x, args[0]); CONVERT_DOUBLE_CHECKED(x, args[0]);
// If the second argument is a smi, it is much faster to call the
// custom powi() function than the generic pow().
if (args[1]->IsSmi()) {
int y = Smi::cast(args[1])->value();
return Heap::AllocateHeapNumber(powi(x, y));
}
CONVERT_DOUBLE_CHECKED(y, args[1]); CONVERT_DOUBLE_CHECKED(y, args[1]);
if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) { if (y == 0.5) {
return Heap::nan_value(); // It's not uncommon to use Math.pow(x, 0.5) to compute the square
// root of a number. To speed up such computations, we explictly
// check for this case and use the sqrt() function which is faster
// than pow().
return Heap::AllocateHeapNumber(sqrt(x));
} else if (y == -0.5) {
// Optimized using Math.pow(x, -0.5) == 1 / Math.pow(x, 0.5).
return Heap::AllocateHeapNumber(1.0 / sqrt(x));
} else if (y == 0) { } else if (y == 0) {
return Smi::FromInt(1); return Smi::FromInt(1);
} else if (isnan(y) || ((x == 1 || x == -1) && isinf(y))) {
return Heap::nan_value();
} else { } else {
return Heap::AllocateHeapNumber(pow(x, y)); return Heap::AllocateHeapNumber(pow(x, y));
} }
@ -4295,45 +4311,61 @@ static Object* Runtime_NewClosure(Arguments args) {
} }
static Handle<Code> ComputeConstructStub(Handle<Map> map) {
// TODO(385): Change this to create a construct stub specialized for
// the given map to make allocation of simple objects - and maybe
// arrays - much faster.
return Handle<Code>(Builtins::builtin(Builtins::JSConstructStubGeneric));
}
static Object* Runtime_NewObject(Arguments args) { static Object* Runtime_NewObject(Arguments args) {
NoHandleAllocation ha; HandleScope scope;
ASSERT(args.length() == 1); ASSERT(args.length() == 1);
Object* constructor = args[0]; Handle<Object> constructor = args.at<Object>(0);
if (constructor->IsJSFunction()) {
JSFunction* function = JSFunction::cast(constructor); // If the constructor isn't a proper function we throw a type error.
if (!constructor->IsJSFunction()) {
Vector< Handle<Object> > arguments = HandleVector(&constructor, 1);
Handle<Object> type_error =
Factory::NewTypeError("not_constructor", arguments);
return Top::Throw(*type_error);
}
// Handle stepping into constructors if step into is active. Handle<JSFunction> function = Handle<JSFunction>::cast(constructor);
#ifdef ENABLE_DEBUGGER_SUPPORT #ifdef ENABLE_DEBUGGER_SUPPORT
if (Debug::StepInActive()) { // Handle stepping into constructors if step into is active.
HandleScope scope; if (Debug::StepInActive()) {
Debug::HandleStepIn(Handle<JSFunction>(function), 0, true); Debug::HandleStepIn(function, 0, true);
} }
#endif #endif
if (function->has_initial_map() && if (function->has_initial_map()) {
function->initial_map()->instance_type() == JS_FUNCTION_TYPE) { if (function->initial_map()->instance_type() == JS_FUNCTION_TYPE) {
// The 'Function' function ignores the receiver object when // The 'Function' function ignores the receiver object when
// called using 'new' and creates a new JSFunction object that // called using 'new' and creates a new JSFunction object that
// is returned. The receiver object is only used for error // is returned. The receiver object is only used for error
// reporting if an error occurs when constructing the new // reporting if an error occurs when constructing the new
// JSFunction. AllocateJSObject should not be used to allocate // JSFunction. Factory::NewJSObject() should not be used to
// JSFunctions since it does not properly initialize the shared // allocate JSFunctions since it does not properly initialize
// part of the function. Since the receiver is ignored anyway, // the shared part of the function. Since the receiver is
// we use the global object as the receiver instead of a new // ignored anyway, we use the global object as the receiver
// JSFunction object. This way, errors are reported the same // instead of a new JSFunction object. This way, errors are
// way whether or not 'Function' is called using 'new'. // reported the same way whether or not 'Function' is called
// using 'new'.
return Top::context()->global(); return Top::context()->global();
} }
return Heap::AllocateJSObject(function);
} }
HandleScope scope; bool first_allocation = !function->has_initial_map();
Handle<Object> cons(constructor); Handle<JSObject> result = Factory::NewJSObject(function);
// The constructor is not a function; throw a type error. if (first_allocation) {
Handle<Object> type_error = Handle<Map> map = Handle<Map>(function->initial_map());
Factory::NewTypeError("not_constructor", HandleVector(&cons, 1)); Handle<Code> stub = ComputeConstructStub(map);
return Top::Throw(*type_error); function->shared()->set_construct_stub(*stub);
}
return *result;
} }
@ -4534,7 +4566,7 @@ static ObjectPair LoadContextSlotHelper(Arguments args, bool throw_error) {
ASSERT(args.length() == 2); ASSERT(args.length() == 2);
if (!args[0]->IsContext() || !args[1]->IsString()) { if (!args[0]->IsContext() || !args[1]->IsString()) {
return MakePair(IllegalOperation(), NULL); return MakePair(Top::ThrowIllegalOperation(), NULL);
} }
Handle<Context> context = args.at<Context>(0); Handle<Context> context = args.at<Context>(0);
Handle<String> name = args.at<String>(1); Handle<String> name = args.at<String>(1);
@ -6622,8 +6654,8 @@ static Object* Runtime_GetBreakLocations(Arguments args) {
HandleScope scope; HandleScope scope;
ASSERT(args.length() == 1); ASSERT(args.length() == 1);
CONVERT_ARG_CHECKED(JSFunction, raw_fun, 0); CONVERT_ARG_CHECKED(JSFunction, fun, 0);
Handle<SharedFunctionInfo> shared(raw_fun->shared()); Handle<SharedFunctionInfo> shared(fun->shared());
// Find the number of break points // Find the number of break points
Handle<Object> break_locations = Debug::GetSourceBreakLocations(shared); Handle<Object> break_locations = Debug::GetSourceBreakLocations(shared);
if (break_locations->IsUndefined()) return Heap::undefined_value(); if (break_locations->IsUndefined()) return Heap::undefined_value();
@ -6640,8 +6672,8 @@ static Object* Runtime_GetBreakLocations(Arguments args) {
static Object* Runtime_SetFunctionBreakPoint(Arguments args) { static Object* Runtime_SetFunctionBreakPoint(Arguments args) {
HandleScope scope; HandleScope scope;
ASSERT(args.length() == 3); ASSERT(args.length() == 3);
CONVERT_ARG_CHECKED(JSFunction, raw_fun, 0); CONVERT_ARG_CHECKED(JSFunction, fun, 0);
Handle<SharedFunctionInfo> shared(raw_fun->shared()); Handle<SharedFunctionInfo> shared(fun->shared());
CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]); CONVERT_NUMBER_CHECKED(int32_t, source_position, Int32, args[1]);
RUNTIME_ASSERT(source_position >= 0); RUNTIME_ASSERT(source_position >= 0);
Handle<Object> break_point_object_arg = args.at<Object>(2); Handle<Object> break_point_object_arg = args.at<Object>(2);

5
deps/v8/src/runtime.js

@ -391,8 +391,9 @@ function CALL_NON_FUNCTION_AS_CONSTRUCTOR() {
function APPLY_PREPARE(args) { function APPLY_PREPARE(args) {
var length; var length;
// First check whether length is a positive Smi and args is an array. This is the // First check whether length is a positive Smi and args is an
// fast case. If this fails, we do the slow case that takes care of more eventualities // array. This is the fast case. If this fails, we do the slow case
// that takes care of more eventualities.
if (%_IsArray(args)) { if (%_IsArray(args)) {
length = args.length; length = args.length;
if (%_IsSmi(length) && length >= 0 && length < 0x800000 && IS_FUNCTION(this)) { if (%_IsSmi(length) && length >= 0 && length < 0x800000 && IS_FUNCTION(this)) {

86
deps/v8/src/scopeinfo.cc

@ -432,10 +432,13 @@ int ScopeInfo<Allocator>::ContextSlotIndex(Code* code,
String* name, String* name,
Variable::Mode* mode) { Variable::Mode* mode) {
ASSERT(name->IsSymbol()); ASSERT(name->IsSymbol());
int result = ContextSlotCache::Lookup(code, name, mode);
if (result != ContextSlotCache::kNotFound) return result;
if (code->sinfo_size() > 0) { if (code->sinfo_size() > 0) {
// Loop below depends on the NULL sentinel after the context slot names. // Loop below depends on the NULL sentinel after the context slot names.
ASSERT(NumberOfContextSlots(code) >= Context::MIN_CONTEXT_SLOTS || ASSERT(NumberOfContextSlots(code) >= Context::MIN_CONTEXT_SLOTS ||
*(ContextEntriesAddr(code) + 1) == NULL); *(ContextEntriesAddr(code) + 1) == NULL);
// slots start after length entry // slots start after length entry
Object** p0 = ContextEntriesAddr(code) + 1; Object** p0 = ContextEntriesAddr(code) + 1;
Object** p = p0; Object** p = p0;
@ -443,14 +446,18 @@ int ScopeInfo<Allocator>::ContextSlotIndex(Code* code,
while (*p != NULL) { while (*p != NULL) {
if (*p == name) { if (*p == name) {
ASSERT(((p - p0) & 1) == 0); ASSERT(((p - p0) & 1) == 0);
if (mode != NULL) { int v;
ReadInt(p + 1, reinterpret_cast<int*>(mode)); ReadInt(p + 1, &v);
} Variable::Mode mode_value = static_cast<Variable::Mode>(v);
return ((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS; if (mode != NULL) *mode = mode_value;
result = ((p - p0) >> 1) + Context::MIN_CONTEXT_SLOTS;
ContextSlotCache::Update(code, name, mode_value, result);
return result;
} }
p += 2; p += 2;
} }
} }
ContextSlotCache::Update(code, name, Variable::INTERNAL, -1);
return -1; return -1;
} }
@ -526,7 +533,78 @@ int ScopeInfo<Allocator>::NumberOfLocals() const {
} }
int ContextSlotCache::Hash(Code* code, String* name) {
// Uses only lower 32 bits if pointers are larger.
uintptr_t addr_hash =
static_cast<uint32_t>(reinterpret_cast<uintptr_t>(code)) >> 2;
return (addr_hash ^ name->Hash()) % kLength;
}
int ContextSlotCache::Lookup(Code* code,
String* name,
Variable::Mode* mode) {
int index = Hash(code, name);
Key& key = keys_[index];
if ((key.code == code) && key.name->Equals(name)) {
Value result(values_[index]);
if (mode != NULL) *mode = result.mode();
return result.index() + kNotFound;
}
return kNotFound;
}
void ContextSlotCache::Update(Code* code,
String* name,
Variable::Mode mode,
int slot_index) {
String* symbol;
ASSERT(slot_index > kNotFound);
if (Heap::LookupSymbolIfExists(name, &symbol)) {
int index = Hash(code, symbol);
Key& key = keys_[index];
key.code = code;
key.name = symbol;
// Please note value only takes a uint as index.
values_[index] = Value(mode, slot_index - kNotFound).raw();
#ifdef DEBUG
ValidateEntry(code, name, mode, slot_index);
#endif
}
}
void ContextSlotCache::Clear() {
for (int index = 0; index < kLength; index++) keys_[index].code = NULL;
}
ContextSlotCache::Key ContextSlotCache::keys_[ContextSlotCache::kLength];
uint32_t ContextSlotCache::values_[ContextSlotCache::kLength];
#ifdef DEBUG #ifdef DEBUG
void ContextSlotCache::ValidateEntry(Code* code,
String* name,
Variable::Mode mode,
int slot_index) {
String* symbol;
if (Heap::LookupSymbolIfExists(name, &symbol)) {
int index = Hash(code, name);
Key& key = keys_[index];
ASSERT(key.code == code);
ASSERT(key.name->Equals(name));
Value result(values_[index]);
ASSERT(result.mode() == mode);
ASSERT(result.index() + kNotFound == slot_index);
}
}
template <class Allocator> template <class Allocator>
static void PrintList(const char* list_name, static void PrintList(const char* list_name,
int nof_internal_slots, int nof_internal_slots,

68
deps/v8/src/scopeinfo.h

@ -163,6 +163,74 @@ class ZoneScopeInfo: public ScopeInfo<ZoneListAllocationPolicy> {
}; };
// Cache for mapping (code, property name) into context slot index.
// The cache contains both positive and negative results.
// Slot index equals -1 means the property is absent.
// Cleared at startup and prior to mark sweep collection.
class ContextSlotCache {
public:
// Lookup context slot index for (code, name).
// If absent, kNotFound is returned.
static int Lookup(Code* code,
String* name,
Variable::Mode* mode);
// Update an element in the cache.
static void Update(Code* code,
String* name,
Variable::Mode mode,
int slot_index);
// Clear the cache.
static void Clear();
static const int kNotFound = -2;
private:
inline static int Hash(Code* code, String* name);
#ifdef DEBUG
static void ValidateEntry(Code* code,
String* name,
Variable::Mode mode,
int slot_index);
#endif
static const int kLength = 256;
struct Key {
Code* code;
String* name;
};
struct Value {
Value(Variable::Mode mode, int index) {
ASSERT(ModeField::is_valid(mode));
ASSERT(IndexField::is_valid(index));
value_ = ModeField::encode(mode) | IndexField::encode(index);
ASSERT(mode == this->mode());
ASSERT(index == this->index());
}
inline Value(uint32_t value) : value_(value) {}
uint32_t raw() { return value_; }
Variable::Mode mode() { return ModeField::decode(value_); }
int index() { return IndexField::decode(value_); }
// Bit fields in value_ (type, shift, size). Must be public so the
// constants can be embedded in generated code.
class ModeField: public BitField<Variable::Mode, 0, 3> {};
class IndexField: public BitField<int, 3, 32-3> {};
private:
uint32_t value_;
};
static Key keys_[kLength];
static uint32_t values_[kLength];
};
} } // namespace v8::internal } } // namespace v8::internal
#endif // V8_SCOPEINFO_H_ #endif // V8_SCOPEINFO_H_

23
deps/v8/src/scopes.cc

@ -81,12 +81,12 @@ Variable* LocalsMap::Declare(Scope* scope,
Handle<String> name, Handle<String> name,
Variable::Mode mode, Variable::Mode mode,
bool is_valid_LHS, bool is_valid_LHS,
bool is_this) { Variable::Kind kind) {
HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true); HashMap::Entry* p = HashMap::Lookup(name.location(), name->Hash(), true);
if (p->value == NULL) { if (p->value == NULL) {
// The variable has not been declared yet -> insert it. // The variable has not been declared yet -> insert it.
ASSERT(p->key == name.location()); ASSERT(p->key == name.location());
p->value = new Variable(scope, name, mode, is_valid_LHS, is_this); p->value = new Variable(scope, name, mode, is_valid_LHS, kind);
} }
return reinterpret_cast<Variable*>(p->value); return reinterpret_cast<Variable*>(p->value);
} }
@ -169,7 +169,8 @@ void Scope::Initialize(bool inside_with) {
// such parameter is 'this' which is passed on the stack when // such parameter is 'this' which is passed on the stack when
// invoking scripts // invoking scripts
{ Variable* var = { Variable* var =
locals_.Declare(this, Factory::this_symbol(), Variable::VAR, false, true); locals_.Declare(this, Factory::this_symbol(), Variable::VAR,
false, Variable::THIS);
var->rewrite_ = new Slot(var, Slot::PARAMETER, -1); var->rewrite_ = new Slot(var, Slot::PARAMETER, -1);
receiver_ = new VariableProxy(Factory::this_symbol(), true, false); receiver_ = new VariableProxy(Factory::this_symbol(), true, false);
receiver_->BindTo(var); receiver_->BindTo(var);
@ -179,7 +180,8 @@ void Scope::Initialize(bool inside_with) {
// Declare 'arguments' variable which exists in all functions. // Declare 'arguments' variable which exists in all functions.
// Note that it may never be accessed, in which case it won't // Note that it may never be accessed, in which case it won't
// be allocated during variable allocation. // be allocated during variable allocation.
Declare(Factory::arguments_symbol(), Variable::VAR); locals_.Declare(this, Factory::arguments_symbol(), Variable::VAR,
true, Variable::ARGUMENTS);
} }
} }
@ -203,7 +205,7 @@ Variable* Scope::Lookup(Handle<String> name) {
Variable* Scope::DeclareFunctionVar(Handle<String> name) { Variable* Scope::DeclareFunctionVar(Handle<String> name) {
ASSERT(is_function_scope() && function_ == NULL); ASSERT(is_function_scope() && function_ == NULL);
function_ = new Variable(this, name, Variable::CONST, true, false); function_ = new Variable(this, name, Variable::CONST, true, Variable::NORMAL);
return function_; return function_;
} }
@ -213,7 +215,7 @@ Variable* Scope::Declare(Handle<String> name, Variable::Mode mode) {
// INTERNAL variables are allocated explicitly, and TEMPORARY // INTERNAL variables are allocated explicitly, and TEMPORARY
// variables are allocated via NewTemporary(). // variables are allocated via NewTemporary().
ASSERT(mode == Variable::VAR || mode == Variable::CONST); ASSERT(mode == Variable::VAR || mode == Variable::CONST);
return locals_.Declare(this, name, mode, true, false); return locals_.Declare(this, name, mode, true, Variable::NORMAL);
} }
@ -247,7 +249,8 @@ void Scope::RemoveUnresolved(VariableProxy* var) {
VariableProxy* Scope::NewTemporary(Handle<String> name) { VariableProxy* Scope::NewTemporary(Handle<String> name) {
Variable* var = new Variable(this, name, Variable::TEMPORARY, true, false); Variable* var = new Variable(this, name, Variable::TEMPORARY, true,
Variable::NORMAL);
VariableProxy* tmp = new VariableProxy(name, false, false); VariableProxy* tmp = new VariableProxy(name, false, false);
tmp->BindTo(var); tmp->BindTo(var);
temps_.Add(var); temps_.Add(var);
@ -503,7 +506,7 @@ Variable* Scope::NonLocal(Handle<String> name, Variable::Mode mode) {
Variable* var = map->Lookup(name); Variable* var = map->Lookup(name);
if (var == NULL) { if (var == NULL) {
// Declare a new non-local. // Declare a new non-local.
var = map->Declare(NULL, name, mode, true, false); var = map->Declare(NULL, name, mode, true, Variable::NORMAL);
// Allocate it by giving it a dynamic lookup. // Allocate it by giving it a dynamic lookup.
var->rewrite_ = new Slot(var, Slot::LOOKUP, -1); var->rewrite_ = new Slot(var, Slot::LOOKUP, -1);
} }
@ -619,7 +622,7 @@ void Scope::ResolveVariable(Scope* global_scope,
// We must have a global variable. // We must have a global variable.
ASSERT(global_scope != NULL); ASSERT(global_scope != NULL);
var = new Variable(global_scope, proxy->name(), var = new Variable(global_scope, proxy->name(),
Variable::DYNAMIC, true, false); Variable::DYNAMIC, true, Variable::NORMAL);
} else if (scope_inside_with_) { } else if (scope_inside_with_) {
// If we are inside a with statement we give up and look up // If we are inside a with statement we give up and look up
@ -797,7 +800,7 @@ void Scope::AllocateParameterLocals() {
// are never allocated in the context). // are never allocated in the context).
Variable* arguments_shadow = Variable* arguments_shadow =
new Variable(this, Factory::arguments_shadow_symbol(), new Variable(this, Factory::arguments_shadow_symbol(),
Variable::INTERNAL, true, false); Variable::INTERNAL, true, Variable::ARGUMENTS);
arguments_shadow_ = arguments_shadow_ =
new VariableProxy(Factory::arguments_shadow_symbol(), false, false); new VariableProxy(Factory::arguments_shadow_symbol(), false, false);
arguments_shadow_->BindTo(arguments_shadow); arguments_shadow_->BindTo(arguments_shadow);

2
deps/v8/src/scopes.h

@ -47,7 +47,7 @@ class LocalsMap: public HashMap {
virtual ~LocalsMap(); virtual ~LocalsMap();
Variable* Declare(Scope* scope, Handle<String> name, Variable::Mode mode, Variable* Declare(Scope* scope, Handle<String> name, Variable::Mode mode,
bool is_valid_LHS, bool is_this); bool is_valid_LHS, Variable::Kind kind);
Variable* Lookup(Handle<String> name); Variable* Lookup(Handle<String> name);
}; };

18
deps/v8/src/serialize.cc

@ -1261,15 +1261,19 @@ RelativeAddress Serializer::Allocate(HeapObject* obj) {
found = Heap::InSpace(obj, s); found = Heap::InSpace(obj, s);
} }
CHECK(found); CHECK(found);
int size = obj->Size();
if (s == NEW_SPACE) { if (s == NEW_SPACE) {
Space* space = Heap::TargetSpace(obj); if (size > Heap::MaxObjectSizeInPagedSpace()) {
ASSERT(space == Heap::old_pointer_space() || s = LO_SPACE;
space == Heap::old_data_space()); } else {
s = (space == Heap::old_pointer_space()) ? OldSpace* space = Heap::TargetSpace(obj);
OLD_POINTER_SPACE : ASSERT(space == Heap::old_pointer_space() ||
OLD_DATA_SPACE; space == Heap::old_data_space());
s = (space == Heap::old_pointer_space()) ?
OLD_POINTER_SPACE :
OLD_DATA_SPACE;
}
} }
int size = obj->Size();
GCTreatment gc_treatment = DataObject; GCTreatment gc_treatment = DataObject;
if (obj->IsFixedArray()) gc_treatment = PointerObject; if (obj->IsFixedArray()) gc_treatment = PointerObject;
else if (obj->IsCode()) gc_treatment = CodeObject; else if (obj->IsCode()) gc_treatment = CodeObject;

1
deps/v8/src/spaces.h

@ -1041,7 +1041,6 @@ class SemiSpaceIterator : public ObjectIterator {
HeapObject* object = HeapObject::FromAddress(current_); HeapObject* object = HeapObject::FromAddress(current_);
int size = (size_func_ == NULL) ? object->Size() : size_func_(object); int size = (size_func_ == NULL) ? object->Size() : size_func_(object);
ASSERT_OBJECT_SIZE(size);
current_ += size; current_ += size;
return object; return object;

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save