Browse Source

uv: upgrade to d6a06b8

v0.7.4-release
Ben Noordhuis 13 years ago
parent
commit
dd0188ec08
  1. 1
      deps/uv/AUTHORS
  2. 119
      deps/uv/include/uv-private/eio.h
  3. 5
      deps/uv/include/uv.h
  4. 3
      deps/uv/src/unix/core.c
  5. 22
      deps/uv/src/unix/dl.c
  6. 170
      deps/uv/src/unix/eio/eio.c
  7. 20
      deps/uv/src/unix/fs.c
  8. 26
      deps/uv/src/unix/uv-eio.c
  9. 2
      deps/uv/test/test-list.h

1
deps/uv/AUTHORS

@ -39,3 +39,4 @@ Bruce Mitchener <bruce.mitchener@gmail.com>
Maciej Małecki <maciej.malecki@notimplemented.org>
Yasuhiro Matsumoto <mattn.jp@gmail.com>
Daisuke Murase <typester@cpan.org>
Paddy Byers <paddy.byers@gmail.com>

119
deps/uv/include/uv-private/eio.h

@ -206,6 +206,28 @@ enum {
EIO_PRI_DEFAULT = 0
};
#define ETP_PRI_MIN EIO_PRI_MIN
#define ETP_PRI_MAX EIO_PRI_MAX
#define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1)
#define ETP_REQ eio_req
/*
* a somewhat faster data structure might be nice, but
* with 8 priorities this actually needs <20 insns
* per shift, the most expensive operation.
*/
typedef struct {
ETP_REQ *qs[ETP_NUM_PRI], *qe[ETP_NUM_PRI]; /* qstart, qend */
int size;
} etp_reqq;
typedef struct {
etp_reqq res_queue; /* queue of outstanding responses for this channel */
void *data; /* use this for what you want */
} eio_channel;
/* eio request structure */
/* this structure is mostly read-only */
/* when initialising it, all members must be zero-initialised */
@ -227,6 +249,8 @@ struct eio_req
long int3; /* chown, fchown: gid */
int errorno; /* errno value on syscall return */
eio_channel *channel; /* data used to direct poll callbacks arising from this req */
#if __i386 || __amd64
unsigned char cancelled;
#else
@ -261,11 +285,14 @@ enum {
* and eio_poll_cb needs to be invoked (it MUST NOT call eio_poll_cb itself).
* done_poll is called when the need to poll is gone.
*/
int eio_init (void (*want_poll)(void), void (*done_poll)(void));
int eio_init (void (*want_poll)(eio_channel *), void (*done_poll)(eio_channel *));
/* initialises a channel */
void eio_channel_init(eio_channel *, void *data);
/* must be called regularly to handle pending requests */
/* returns 0 if all requests were handled, -1 if not, or the value of EIO_FINISH if != 0 */
int eio_poll (void);
int eio_poll (eio_channel *channel);
/* stop polling if poll took longer than duration seconds */
void eio_set_max_poll_time (eio_tstamp nseconds);
@ -289,55 +316,55 @@ unsigned int eio_nthreads (void); /* number of worker threads in use currently *
/* convenience wrappers */
#ifndef EIO_NO_WRAPPERS
eio_req *eio_nop (int pri, eio_cb cb, void *data); /* does nothing except go through the whole process */
eio_req *eio_busy (eio_tstamp delay, int pri, eio_cb cb, void *data); /* ties a thread for this long, simulating busyness */
eio_req *eio_sync (int pri, eio_cb cb, void *data);
eio_req *eio_fsync (int fd, int pri, eio_cb cb, void *data);
eio_req *eio_fdatasync (int fd, int pri, eio_cb cb, void *data);
eio_req *eio_syncfs (int fd, int pri, eio_cb cb, void *data);
eio_req *eio_msync (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data);
eio_req *eio_mtouch (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data);
eio_req *eio_mlock (void *addr, size_t length, int pri, eio_cb cb, void *data);
eio_req *eio_mlockall (int flags, int pri, eio_cb cb, void *data);
eio_req *eio_sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags, int pri, eio_cb cb, void *data);
eio_req *eio_fallocate (int fd, int mode, off_t offset, size_t len, int pri, eio_cb cb, void *data);
eio_req *eio_close (int fd, int pri, eio_cb cb, void *data);
eio_req *eio_readahead (int fd, off_t offset, size_t length, int pri, eio_cb cb, void *data);
eio_req *eio_read (int fd, void *buf, size_t length, off_t offset, int pri, eio_cb cb, void *data);
eio_req *eio_write (int fd, void *buf, size_t length, off_t offset, int pri, eio_cb cb, void *data);
eio_req *eio_fstat (int fd, int pri, eio_cb cb, void *data); /* stat buffer=ptr2 allocated dynamically */
eio_req *eio_fstatvfs (int fd, int pri, eio_cb cb, void *data); /* stat buffer=ptr2 allocated dynamically */
eio_req *eio_futime (int fd, eio_tstamp atime, eio_tstamp mtime, int pri, eio_cb cb, void *data);
eio_req *eio_ftruncate (int fd, off_t offset, int pri, eio_cb cb, void *data);
eio_req *eio_fchmod (int fd, eio_mode_t mode, int pri, eio_cb cb, void *data);
eio_req *eio_fchown (int fd, eio_uid_t uid, eio_gid_t gid, int pri, eio_cb cb, void *data);
eio_req *eio_dup2 (int fd, int fd2, int pri, eio_cb cb, void *data);
eio_req *eio_sendfile (int out_fd, int in_fd, off_t in_offset, size_t length, int pri, eio_cb cb, void *data);
eio_req *eio_open (const char *path, int flags, eio_mode_t mode, int pri, eio_cb cb, void *data);
eio_req *eio_utime (const char *path, eio_tstamp atime, eio_tstamp mtime, int pri, eio_cb cb, void *data);
eio_req *eio_truncate (const char *path, off_t offset, int pri, eio_cb cb, void *data);
eio_req *eio_chown (const char *path, eio_uid_t uid, eio_gid_t gid, int pri, eio_cb cb, void *data);
eio_req *eio_chmod (const char *path, eio_mode_t mode, int pri, eio_cb cb, void *data);
eio_req *eio_mkdir (const char *path, eio_mode_t mode, int pri, eio_cb cb, void *data);
eio_req *eio_readdir (const char *path, int flags, int pri, eio_cb cb, void *data); /* result=ptr2 allocated dynamically */
eio_req *eio_rmdir (const char *path, int pri, eio_cb cb, void *data);
eio_req *eio_unlink (const char *path, int pri, eio_cb cb, void *data);
eio_req *eio_readlink (const char *path, int pri, eio_cb cb, void *data); /* result=ptr2 allocated dynamically */
eio_req *eio_realpath (const char *path, int pri, eio_cb cb, void *data); /* result=ptr2 allocated dynamically */
eio_req *eio_stat (const char *path, int pri, eio_cb cb, void *data); /* stat buffer=ptr2 allocated dynamically */
eio_req *eio_lstat (const char *path, int pri, eio_cb cb, void *data); /* stat buffer=ptr2 allocated dynamically */
eio_req *eio_statvfs (const char *path, int pri, eio_cb cb, void *data); /* stat buffer=ptr2 allocated dynamically */
eio_req *eio_mknod (const char *path, eio_mode_t mode, dev_t dev, int pri, eio_cb cb, void *data);
eio_req *eio_link (const char *path, const char *new_path, int pri, eio_cb cb, void *data);
eio_req *eio_symlink (const char *path, const char *new_path, int pri, eio_cb cb, void *data);
eio_req *eio_rename (const char *path, const char *new_path, int pri, eio_cb cb, void *data);
eio_req *eio_custom (void (*execute)(eio_req *), int pri, eio_cb cb, void *data);
eio_req *eio_nop (int pri, eio_cb cb, void *data, eio_channel *channel); /* does nothing except go through the whole process */
eio_req *eio_busy (eio_tstamp delay, int pri, eio_cb cb, void *data, eio_channel *channel); /* ties a thread for this long, simulating busyness */
eio_req *eio_sync (int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_fsync (int fd, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_fdatasync (int fd, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_syncfs (int fd, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_msync (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_mtouch (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_mlock (void *addr, size_t length, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_mlockall (int flags, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_fallocate (int fd, int mode, off_t offset, size_t len, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_close (int fd, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_readahead (int fd, off_t offset, size_t length, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_read (int fd, void *buf, size_t length, off_t offset, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_write (int fd, void *buf, size_t length, off_t offset, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_fstat (int fd, int pri, eio_cb cb, void *data, eio_channel *channel); /* stat buffer=ptr2 allocated dynamically */
eio_req *eio_fstatvfs (int fd, int pri, eio_cb cb, void *data, eio_channel *channel); /* stat buffer=ptr2 allocated dynamically */
eio_req *eio_futime (int fd, eio_tstamp atime, eio_tstamp mtime, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_ftruncate (int fd, off_t offset, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_fchmod (int fd, eio_mode_t mode, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_fchown (int fd, eio_uid_t uid, eio_gid_t gid, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_dup2 (int fd, int fd2, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_sendfile (int out_fd, int in_fd, off_t in_offset, size_t length, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_open (const char *path, int flags, eio_mode_t mode, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_utime (const char *path, eio_tstamp atime, eio_tstamp mtime, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_truncate (const char *path, off_t offset, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_chown (const char *path, eio_uid_t uid, eio_gid_t gid, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_chmod (const char *path, eio_mode_t mode, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_mkdir (const char *path, eio_mode_t mode, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_readdir (const char *path, int flags, int pri, eio_cb cb, void *data, eio_channel *channel); /* result=ptr2 allocated dynamically */
eio_req *eio_rmdir (const char *path, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_unlink (const char *path, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_readlink (const char *path, int pri, eio_cb cb, void *data, eio_channel *channel); /* result=ptr2 allocated dynamically */
eio_req *eio_realpath (const char *path, int pri, eio_cb cb, void *data, eio_channel *channel); /* result=ptr2 allocated dynamically */
eio_req *eio_stat (const char *path, int pri, eio_cb cb, void *data, eio_channel *channel); /* stat buffer=ptr2 allocated dynamically */
eio_req *eio_lstat (const char *path, int pri, eio_cb cb, void *data, eio_channel *channel); /* stat buffer=ptr2 allocated dynamically */
eio_req *eio_statvfs (const char *path, int pri, eio_cb cb, void *data, eio_channel *channel); /* stat buffer=ptr2 allocated dynamically */
eio_req *eio_mknod (const char *path, eio_mode_t mode, dev_t dev, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_link (const char *path, const char *new_path, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_symlink (const char *path, const char *new_path, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_rename (const char *path, const char *new_path, int pri, eio_cb cb, void *data, eio_channel *channel);
eio_req *eio_custom (void (*execute)(eio_req *), int pri, eio_cb cb, void *data, eio_channel *channel);
#endif
/*****************************************************************************/
/* groups */
eio_req *eio_grp (eio_cb cb, void *data);
eio_req *eio_grp (eio_cb cb, void *data, eio_channel *channel);
void eio_grp_feed (eio_req *grp, void (*feed)(eio_req *req), int limit);
void eio_grp_limit (eio_req *grp, int limit);
void eio_grp_add (eio_req *grp, eio_req *req);

5
deps/uv/include/uv.h

@ -1313,8 +1313,7 @@ UV_EXTERN uv_err_t uv_dlopen(const char* filename, uv_lib_t* library);
UV_EXTERN uv_err_t uv_dlclose(uv_lib_t library);
/*
* Retrieves a data pointer from a dynamic library. It is legal for a symbol to
* map to NULL.
* Retrieves a data pointer from a dynamic library.
*/
UV_EXTERN uv_err_t uv_dlsym(uv_lib_t library, const char* name, void** ptr);
@ -1401,6 +1400,8 @@ struct uv_loop_s {
uv_async_t uv_eio_want_poll_notifier;
uv_async_t uv_eio_done_poll_notifier;
uv_idle_t uv_eio_poller;
/* Poll result queue */
eio_channel uv_eio_channel;
/* Diagnostic counters */
uv_counters_t counters;
/* The last error */

3
deps/uv/src/unix/core.c

@ -167,6 +167,7 @@ static int uv__loop_init(uv_loop_t* loop,
loop->ev = ev_loop_new(EVFLAG_AUTO);
#endif
ev_set_userdata(loop->ev, loop);
eio_channel_init(&loop->uv_eio_channel, loop);
return 0;
}
@ -709,7 +710,7 @@ int uv_getaddrinfo(uv_loop_t* loop,
uv_ref(loop);
req = eio_custom(getaddrinfo_thread_proc, EIO_PRI_DEFAULT,
uv_getaddrinfo_done, handle);
uv_getaddrinfo_done, handle, &loop->uv_eio_channel);
assert(req);
assert(req->data == handle);

22
deps/uv/src/unix/dl.c

@ -25,17 +25,11 @@
#include <dlfcn.h>
#include <errno.h>
/* The dl family of functions don't set errno. We need a good way to communicate
* errors to the caller but there is only dlerror() and that returns a string -
* a string that may or may not be safe to keep a reference to...
*/
static const uv_err_t uv_inval_ = { UV_EINVAL, EINVAL };
uv_err_t uv_dlopen(const char* filename, uv_lib_t* library) {
void* handle = dlopen(filename, RTLD_LAZY);
if (handle == NULL) {
return uv_inval_;
return uv__new_sys_error(errno);
}
*library = handle;
@ -45,7 +39,7 @@ uv_err_t uv_dlopen(const char* filename, uv_lib_t* library) {
uv_err_t uv_dlclose(uv_lib_t library) {
if (dlclose(library) != 0) {
return uv_inval_;
return uv__new_sys_error(errno);
}
return uv_ok_;
@ -53,15 +47,9 @@ uv_err_t uv_dlclose(uv_lib_t library) {
uv_err_t uv_dlsym(uv_lib_t library, const char* name, void** ptr) {
void* address;
/* Reset error status. */
dlerror();
address = dlsym(library, name);
if (dlerror()) {
return uv_inval_;
void* address = dlsym(library, name);
if (address == NULL) {
return uv__new_sys_error(errno);
}
*ptr = (void*) address;

170
deps/uv/src/unix/eio/eio.c

@ -362,12 +362,8 @@ static int gettimeofday(struct timeval *tv, struct timezone *tz)
#define EIO_TICKS ((1000000 + 1023) >> 10)
#define ETP_PRI_MIN EIO_PRI_MIN
#define ETP_PRI_MAX EIO_PRI_MAX
struct etp_worker;
#define ETP_REQ eio_req
#define ETP_DESTROY(req) eio_destroy (req)
static int eio_finish (eio_req *req);
#define ETP_FINISH(req) eio_finish (req)
@ -376,8 +372,6 @@ static void eio_execute (struct etp_worker *self, eio_req *req);
/*****************************************************************************/
#define ETP_NUM_PRI (ETP_PRI_MAX - ETP_PRI_MIN + 1)
/* calculate time difference in ~1/EIO_TICKS of a second */
ecb_inline int
tvdiff (struct timeval *tv1, struct timeval *tv2)
@ -388,8 +382,8 @@ tvdiff (struct timeval *tv1, struct timeval *tv2)
static unsigned int started, idle, wanted = 4;
static void (*want_poll_cb) (void);
static void (*done_poll_cb) (void);
static void (*want_poll_cb) (eio_channel *);
static void (*done_poll_cb) (eio_channel *);
static unsigned int max_poll_time; /* reslock */
static unsigned int max_poll_reqs; /* reslock */
@ -506,18 +500,8 @@ etp_nthreads (void)
return retval;
}
/*
* a somewhat faster data structure might be nice, but
* with 8 priorities this actually needs <20 insns
* per shift, the most expensive operation.
*/
typedef struct {
ETP_REQ *qs[ETP_NUM_PRI], *qe[ETP_NUM_PRI]; /* qstart, qend */
int size;
} etp_reqq;
static etp_reqq req_queue;
static etp_reqq res_queue;
static eio_channel default_channel;
static void ecb_noinline ecb_cold
reqq_init (etp_reqq *q)
@ -574,7 +558,7 @@ reqq_shift (etp_reqq *q)
}
static int ecb_cold
etp_init (void (*want_poll)(void), void (*done_poll)(void))
etp_init (void (*want_poll)(eio_channel *), void (*done_poll)(eio_channel *))
{
X_MUTEX_CREATE (wrklock);
X_MUTEX_CREATE (reslock);
@ -582,7 +566,7 @@ etp_init (void (*want_poll)(void), void (*done_poll)(void))
X_COND_CREATE (reqwait);
reqq_init (&req_queue);
reqq_init (&res_queue);
eio_channel_init (&default_channel, 0);
wrk_first.next =
wrk_first.prev = &wrk_first;
@ -656,12 +640,19 @@ etp_end_thread (void)
X_UNLOCK (wrklock);
}
void
eio_channel_init(eio_channel *channel, void *data) {
reqq_init(&channel->res_queue);
channel->data = data;
}
static int
etp_poll (void)
etp_poll (eio_channel *channel)
{
unsigned int maxreqs;
unsigned int maxtime;
struct timeval tv_start, tv_now;
if(!channel) channel = &default_channel;
X_LOCK (reslock);
maxreqs = max_poll_reqs;
@ -678,14 +669,14 @@ etp_poll (void)
etp_maybe_start_thread ();
X_LOCK (reslock);
req = reqq_shift (&res_queue);
req = reqq_shift (&channel->res_queue);
if (req)
{
--npending;
if (!res_queue.size && done_poll_cb)
done_poll_cb ();
if (!channel->res_queue.size && done_poll_cb)
done_poll_cb (channel);
}
X_UNLOCK (reslock);
@ -752,8 +743,8 @@ etp_submit (ETP_REQ *req)
++npending;
if (!reqq_push (&res_queue, req) && want_poll_cb)
want_poll_cb ();
if (!reqq_push (&req->channel->res_queue, req) && want_poll_cb)
want_poll_cb (req->channel);
X_UNLOCK (reslock);
}
@ -970,9 +961,9 @@ eio_set_max_parallel (unsigned int nthreads)
etp_set_max_parallel (nthreads);
}
int eio_poll (void)
int eio_poll (eio_channel *channel)
{
return etp_poll ();
return etp_poll (channel);
}
/*****************************************************************************/
@ -2092,8 +2083,8 @@ X_THREAD_PROC (etp_proc)
++npending;
if (!reqq_push (&res_queue, req) && want_poll_cb)
want_poll_cb ();
if (!reqq_push (&req->channel->res_queue, req) && want_poll_cb)
want_poll_cb (req->channel);
self->req = 0;
etp_worker_clear (self);
@ -2112,7 +2103,7 @@ quit:
/*****************************************************************************/
int ecb_cold
eio_init (void (*want_poll)(void), void (*done_poll)(void))
eio_init (void (*want_poll)(eio_channel *), void (*done_poll)(eio_channel *))
{
#if !HAVE_PREADWRITE
X_MUTEX_CREATE (preadwritelock);
@ -2138,7 +2129,8 @@ eio_api_destroy (eio_req *req)
req->pri = pri; \
req->finish = cb; \
req->data = data; \
req->destroy = eio_api_destroy;
req->destroy = eio_api_destroy; \
req->channel = channel
#define SEND eio_submit (req); return req
@ -2294,209 +2286,209 @@ eio_execute (etp_worker *self, eio_req *req)
#ifndef EIO_NO_WRAPPERS
eio_req *eio_nop (int pri, eio_cb cb, void *data)
eio_req *eio_nop (int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_NOP); SEND;
}
eio_req *eio_busy (double delay, int pri, eio_cb cb, void *data)
eio_req *eio_busy (double delay, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_BUSY); req->nv1 = delay; SEND;
}
eio_req *eio_sync (int pri, eio_cb cb, void *data)
eio_req *eio_sync (int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_SYNC); SEND;
}
eio_req *eio_fsync (int fd, int pri, eio_cb cb, void *data)
eio_req *eio_fsync (int fd, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_FSYNC); req->int1 = fd; SEND;
}
eio_req *eio_msync (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data)
eio_req *eio_msync (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_MSYNC); req->ptr2 = addr; req->size = length; req->int1 = flags; SEND;
}
eio_req *eio_fdatasync (int fd, int pri, eio_cb cb, void *data)
eio_req *eio_fdatasync (int fd, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_FDATASYNC); req->int1 = fd; SEND;
}
eio_req *eio_syncfs (int fd, int pri, eio_cb cb, void *data)
eio_req *eio_syncfs (int fd, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_SYNCFS); req->int1 = fd; SEND;
}
eio_req *eio_sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags, int pri, eio_cb cb, void *data)
eio_req *eio_sync_file_range (int fd, off_t offset, size_t nbytes, unsigned int flags, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_SYNC_FILE_RANGE); req->int1 = fd; req->offs = offset; req->size = nbytes; req->int2 = flags; SEND;
}
eio_req *eio_mtouch (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data)
eio_req *eio_mtouch (void *addr, size_t length, int flags, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_MTOUCH); req->ptr2 = addr; req->size = length; req->int1 = flags; SEND;
}
eio_req *eio_mlock (void *addr, size_t length, int pri, eio_cb cb, void *data)
eio_req *eio_mlock (void *addr, size_t length, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_MLOCK); req->ptr2 = addr; req->size = length; SEND;
}
eio_req *eio_mlockall (int flags, int pri, eio_cb cb, void *data)
eio_req *eio_mlockall (int flags, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_MLOCKALL); req->int1 = flags; SEND;
}
eio_req *eio_fallocate (int fd, int mode, off_t offset, size_t len, int pri, eio_cb cb, void *data)
eio_req *eio_fallocate (int fd, int mode, off_t offset, size_t len, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_FALLOCATE); req->int1 = fd; req->int2 = mode; req->offs = offset; req->size = len; SEND;
}
eio_req *eio_close (int fd, int pri, eio_cb cb, void *data)
eio_req *eio_close (int fd, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_CLOSE); req->int1 = fd; SEND;
}
eio_req *eio_readahead (int fd, off_t offset, size_t length, int pri, eio_cb cb, void *data)
eio_req *eio_readahead (int fd, off_t offset, size_t length, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_READAHEAD); req->int1 = fd; req->offs = offset; req->size = length; SEND;
}
eio_req *eio_read (int fd, void *buf, size_t length, off_t offset, int pri, eio_cb cb, void *data)
eio_req *eio_read (int fd, void *buf, size_t length, off_t offset, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_READ); req->int1 = fd; req->offs = offset; req->size = length; req->ptr2 = buf; SEND;
}
eio_req *eio_write (int fd, void *buf, size_t length, off_t offset, int pri, eio_cb cb, void *data)
eio_req *eio_write (int fd, void *buf, size_t length, off_t offset, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_WRITE); req->int1 = fd; req->offs = offset; req->size = length; req->ptr2 = buf; SEND;
}
eio_req *eio_fstat (int fd, int pri, eio_cb cb, void *data)
eio_req *eio_fstat (int fd, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_FSTAT); req->int1 = fd; SEND;
}
eio_req *eio_fstatvfs (int fd, int pri, eio_cb cb, void *data)
eio_req *eio_fstatvfs (int fd, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_FSTATVFS); req->int1 = fd; SEND;
}
eio_req *eio_futime (int fd, double atime, double mtime, int pri, eio_cb cb, void *data)
eio_req *eio_futime (int fd, double atime, double mtime, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_FUTIME); req->int1 = fd; req->nv1 = atime; req->nv2 = mtime; SEND;
}
eio_req *eio_ftruncate (int fd, off_t offset, int pri, eio_cb cb, void *data)
eio_req *eio_ftruncate (int fd, off_t offset, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_FTRUNCATE); req->int1 = fd; req->offs = offset; SEND;
}
eio_req *eio_fchmod (int fd, eio_mode_t mode, int pri, eio_cb cb, void *data)
eio_req *eio_fchmod (int fd, eio_mode_t mode, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_FCHMOD); req->int1 = fd; req->int2 = (long)mode; SEND;
}
eio_req *eio_fchown (int fd, eio_uid_t uid, eio_gid_t gid, int pri, eio_cb cb, void *data)
eio_req *eio_fchown (int fd, eio_uid_t uid, eio_gid_t gid, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_FCHOWN); req->int1 = fd; req->int2 = (long)uid; req->int3 = (long)gid; SEND;
}
eio_req *eio_dup2 (int fd, int fd2, int pri, eio_cb cb, void *data)
eio_req *eio_dup2 (int fd, int fd2, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_DUP2); req->int1 = fd; req->int2 = fd2; SEND;
}
eio_req *eio_sendfile (int out_fd, int in_fd, off_t in_offset, size_t length, int pri, eio_cb cb, void *data)
eio_req *eio_sendfile (int out_fd, int in_fd, off_t in_offset, size_t length, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_SENDFILE); req->int1 = out_fd; req->int2 = in_fd; req->offs = in_offset; req->size = length; SEND;
}
eio_req *eio_open (const char *path, int flags, eio_mode_t mode, int pri, eio_cb cb, void *data)
eio_req *eio_open (const char *path, int flags, eio_mode_t mode, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_OPEN); PATH; req->int1 = flags; req->int2 = (long)mode; SEND;
}
eio_req *eio_utime (const char *path, double atime, double mtime, int pri, eio_cb cb, void *data)
eio_req *eio_utime (const char *path, double atime, double mtime, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_UTIME); PATH; req->nv1 = atime; req->nv2 = mtime; SEND;
}
eio_req *eio_truncate (const char *path, off_t offset, int pri, eio_cb cb, void *data)
eio_req *eio_truncate (const char *path, off_t offset, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_TRUNCATE); PATH; req->offs = offset; SEND;
}
eio_req *eio_chown (const char *path, eio_uid_t uid, eio_gid_t gid, int pri, eio_cb cb, void *data)
eio_req *eio_chown (const char *path, eio_uid_t uid, eio_gid_t gid, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_CHOWN); PATH; req->int2 = (long)uid; req->int3 = (long)gid; SEND;
}
eio_req *eio_chmod (const char *path, eio_mode_t mode, int pri, eio_cb cb, void *data)
eio_req *eio_chmod (const char *path, eio_mode_t mode, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_CHMOD); PATH; req->int2 = (long)mode; SEND;
}
eio_req *eio_mkdir (const char *path, eio_mode_t mode, int pri, eio_cb cb, void *data)
eio_req *eio_mkdir (const char *path, eio_mode_t mode, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_MKDIR); PATH; req->int2 = (long)mode; SEND;
}
static eio_req *
eio__1path (int type, const char *path, int pri, eio_cb cb, void *data)
eio__1path (int type, const char *path, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (type); PATH; SEND;
}
eio_req *eio_readlink (const char *path, int pri, eio_cb cb, void *data)
eio_req *eio_readlink (const char *path, int pri, eio_cb cb, void *data, eio_channel *channel)
{
return eio__1path (EIO_READLINK, path, pri, cb, data);
return eio__1path (EIO_READLINK, path, pri, cb, data, channel);
}
eio_req *eio_realpath (const char *path, int pri, eio_cb cb, void *data)
eio_req *eio_realpath (const char *path, int pri, eio_cb cb, void *data, eio_channel *channel)
{
return eio__1path (EIO_REALPATH, path, pri, cb, data);
return eio__1path (EIO_REALPATH, path, pri, cb, data, channel);
}
eio_req *eio_stat (const char *path, int pri, eio_cb cb, void *data)
eio_req *eio_stat (const char *path, int pri, eio_cb cb, void *data, eio_channel *channel)
{
return eio__1path (EIO_STAT, path, pri, cb, data);
return eio__1path (EIO_STAT, path, pri, cb, data, channel);
}
eio_req *eio_lstat (const char *path, int pri, eio_cb cb, void *data)
eio_req *eio_lstat (const char *path, int pri, eio_cb cb, void *data, eio_channel *channel)
{
return eio__1path (EIO_LSTAT, path, pri, cb, data);
return eio__1path (EIO_LSTAT, path, pri, cb, data, channel);
}
eio_req *eio_statvfs (const char *path, int pri, eio_cb cb, void *data)
eio_req *eio_statvfs (const char *path, int pri, eio_cb cb, void *data, eio_channel *channel)
{
return eio__1path (EIO_STATVFS, path, pri, cb, data);
return eio__1path (EIO_STATVFS, path, pri, cb, data, channel);
}
eio_req *eio_unlink (const char *path, int pri, eio_cb cb, void *data)
eio_req *eio_unlink (const char *path, int pri, eio_cb cb, void *data, eio_channel *channel)
{
return eio__1path (EIO_UNLINK, path, pri, cb, data);
return eio__1path (EIO_UNLINK, path, pri, cb, data, channel);
}
eio_req *eio_rmdir (const char *path, int pri, eio_cb cb, void *data)
eio_req *eio_rmdir (const char *path, int pri, eio_cb cb, void *data, eio_channel *channel)
{
return eio__1path (EIO_RMDIR, path, pri, cb, data);
return eio__1path (EIO_RMDIR, path, pri, cb, data, channel);
}
eio_req *eio_readdir (const char *path, int flags, int pri, eio_cb cb, void *data)
eio_req *eio_readdir (const char *path, int flags, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_READDIR); PATH; req->int1 = flags; SEND;
}
eio_req *eio_mknod (const char *path, eio_mode_t mode, dev_t dev, int pri, eio_cb cb, void *data)
eio_req *eio_mknod (const char *path, eio_mode_t mode, dev_t dev, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_MKNOD); PATH; req->int2 = (long)mode; req->offs = (off_t)dev; SEND;
}
static eio_req *
eio__2path (int type, const char *path, const char *new_path, int pri, eio_cb cb, void *data)
eio__2path (int type, const char *path, const char *new_path, int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (type); PATH;
@ -2511,29 +2503,29 @@ eio__2path (int type, const char *path, const char *new_path, int pri, eio_cb cb
SEND;
}
eio_req *eio_link (const char *path, const char *new_path, int pri, eio_cb cb, void *data)
eio_req *eio_link (const char *path, const char *new_path, int pri, eio_cb cb, void *data, eio_channel *channel)
{
return eio__2path (EIO_LINK, path, new_path, pri, cb, data);
return eio__2path (EIO_LINK, path, new_path, pri, cb, data, channel);
}
eio_req *eio_symlink (const char *path, const char *new_path, int pri, eio_cb cb, void *data)
eio_req *eio_symlink (const char *path, const char *new_path, int pri, eio_cb cb, void *data, eio_channel *channel)
{
return eio__2path (EIO_SYMLINK, path, new_path, pri, cb, data);
return eio__2path (EIO_SYMLINK, path, new_path, pri, cb, data, channel);
}
eio_req *eio_rename (const char *path, const char *new_path, int pri, eio_cb cb, void *data)
eio_req *eio_rename (const char *path, const char *new_path, int pri, eio_cb cb, void *data, eio_channel *channel)
{
return eio__2path (EIO_RENAME, path, new_path, pri, cb, data);
return eio__2path (EIO_RENAME, path, new_path, pri, cb, data, channel);
}
eio_req *eio_custom (void (*execute)(eio_req *), int pri, eio_cb cb, void *data)
eio_req *eio_custom (void (*execute)(eio_req *), int pri, eio_cb cb, void *data, eio_channel *channel)
{
REQ (EIO_CUSTOM); req->feed = execute; SEND;
}
#endif
eio_req *eio_grp (eio_cb cb, void *data)
eio_req *eio_grp (eio_cb cb, void *data, eio_channel *channel)
{
const int pri = EIO_PRI_MAX;

20
deps/uv/src/unix/fs.c

@ -44,7 +44,7 @@
uv_fs_req_init(loop, req, type, path, cb); \
if (cb) { \
/* async */ \
req->eio = eiofunc(args, EIO_PRI_DEFAULT, uv__fs_after, req); \
req->eio = eiofunc(args, EIO_PRI_DEFAULT, uv__fs_after, req, &loop->uv_eio_channel); \
if (!req->eio) { \
uv__set_sys_error(loop, ENOMEM); \
return -1; \
@ -191,7 +191,7 @@ int uv_fs_open(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags,
if (cb) {
/* async */
uv_ref(loop);
req->eio = eio_open(path, flags, mode, EIO_PRI_DEFAULT, uv__fs_after, req);
req->eio = eio_open(path, flags, mode, EIO_PRI_DEFAULT, uv__fs_after, req, &loop->uv_eio_channel);
if (!req->eio) {
uv__set_sys_error(loop, ENOMEM);
return -1;
@ -222,7 +222,7 @@ int uv_fs_read(uv_loop_t* loop, uv_fs_t* req, uv_file fd, void* buf,
/* async */
uv_ref(loop);
req->eio = eio_read(fd, buf, length, offset, EIO_PRI_DEFAULT,
uv__fs_after, req);
uv__fs_after, req, &loop->uv_eio_channel);
if (!req->eio) {
uv__set_sys_error(loop, ENOMEM);
@ -260,7 +260,7 @@ int uv_fs_write(uv_loop_t* loop, uv_fs_t* req, uv_file file, void* buf,
/* async */
uv_ref(loop);
req->eio = eio_write(file, buf, length, offset, EIO_PRI_DEFAULT,
uv__fs_after, req);
uv__fs_after, req, &loop->uv_eio_channel);
if (!req->eio) {
uv__set_sys_error(loop, ENOMEM);
return -1;
@ -307,7 +307,7 @@ int uv_fs_readdir(uv_loop_t* loop, uv_fs_t* req, const char* path, int flags,
if (cb) {
/* async */
uv_ref(loop);
req->eio = eio_readdir(path, flags, EIO_PRI_DEFAULT, uv__fs_after, req);
req->eio = eio_readdir(path, flags, EIO_PRI_DEFAULT, uv__fs_after, req, &loop->uv_eio_channel);
if (!req->eio) {
uv__set_sys_error(loop, ENOMEM);
return -1;
@ -377,7 +377,7 @@ int uv_fs_stat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
if (cb) {
/* async */
uv_ref(loop);
req->eio = eio_stat(pathdup, EIO_PRI_DEFAULT, uv__fs_after, req);
req->eio = eio_stat(pathdup, EIO_PRI_DEFAULT, uv__fs_after, req, &loop->uv_eio_channel);
free(pathdup);
@ -411,7 +411,7 @@ int uv_fs_fstat(uv_loop_t* loop, uv_fs_t* req, uv_file file, uv_fs_cb cb) {
if (cb) {
/* async */
uv_ref(loop);
req->eio = eio_fstat(file, EIO_PRI_DEFAULT, uv__fs_after, req);
req->eio = eio_fstat(file, EIO_PRI_DEFAULT, uv__fs_after, req, &loop->uv_eio_channel);
if (!req->eio) {
uv__set_sys_error(loop, ENOMEM);
@ -550,7 +550,7 @@ int uv_fs_lstat(uv_loop_t* loop, uv_fs_t* req, const char* path, uv_fs_cb cb) {
if (cb) {
/* async */
uv_ref(loop);
req->eio = eio_lstat(pathdup, EIO_PRI_DEFAULT, uv__fs_after, req);
req->eio = eio_lstat(pathdup, EIO_PRI_DEFAULT, uv__fs_after, req, &loop->uv_eio_channel);
free(pathdup);
@ -598,7 +598,7 @@ int uv_fs_readlink(uv_loop_t* loop, uv_fs_t* req, const char* path,
uv_fs_req_init(loop, req, UV_FS_READLINK, path, cb);
if (cb) {
if ((req->eio = eio_readlink(path, EIO_PRI_DEFAULT, uv__fs_after, req))) {
if ((req->eio = eio_readlink(path, EIO_PRI_DEFAULT, uv__fs_after, req, &loop->uv_eio_channel))) {
uv_ref(loop);
return 0;
} else {
@ -692,7 +692,7 @@ int uv_queue_work(uv_loop_t* loop, uv_work_t* req, uv_work_cb work_cb,
req->work_cb = work_cb;
req->after_work_cb = after_work_cb;
req->eio = eio_custom(uv__work, EIO_PRI_DEFAULT, uv__after_work, req);
req->eio = eio_custom(uv__work, EIO_PRI_DEFAULT, uv__after_work, req, &loop->uv_eio_channel);
if (!req->eio) {
uv__set_sys_error(loop, ENOMEM);

26
deps/uv/src/unix/uv-eio.c

@ -27,16 +27,12 @@
#include <stdio.h>
/* TODO remove me! */
static uv_loop_t* main_loop;
static void uv_eio_do_poll(uv_idle_t* watcher, int status) {
assert(watcher == &(watcher->loop->uv_eio_poller));
/* printf("uv_eio_poller\n"); */
if (eio_poll() != -1 && uv_is_active((uv_handle_t*) watcher)) {
if (eio_poll(&watcher->loop->uv_eio_channel) != -1 && uv_is_active((uv_handle_t*) watcher)) {
/* printf("uv_eio_poller stop\n"); */
uv_idle_stop(watcher);
uv_unref(watcher->loop);
@ -52,7 +48,7 @@ static void uv_eio_want_poll_notifier_cb(uv_async_t* watcher, int status) {
/* printf("want poll notifier\n"); */
if (eio_poll() == -1 && !uv_is_active((uv_handle_t*) &loop->uv_eio_poller)) {
if (eio_poll(&watcher->loop->uv_eio_channel) == -1 && !uv_is_active((uv_handle_t*) &loop->uv_eio_poller)) {
/* printf("uv_eio_poller start\n"); */
uv_idle_start(&loop->uv_eio_poller, uv_eio_do_poll);
uv_ref(loop);
@ -67,7 +63,7 @@ static void uv_eio_done_poll_notifier_cb(uv_async_t* watcher, int revents) {
/* printf("done poll notifier\n"); */
if (eio_poll() != -1 && uv_is_active((uv_handle_t*) &loop->uv_eio_poller)) {
if (eio_poll(&watcher->loop->uv_eio_channel) != -1 && uv_is_active((uv_handle_t*) &loop->uv_eio_poller)) {
/* printf("uv_eio_poller stop\n"); */
uv_idle_stop(&loop->uv_eio_poller);
uv_unref(loop);
@ -79,7 +75,7 @@ static void uv_eio_done_poll_notifier_cb(uv_async_t* watcher, int revents) {
* uv_eio_want_poll() is called from the EIO thread pool each time an EIO
* request (that is, one of the node.fs.* functions) has completed.
*/
static void uv_eio_want_poll(void) {
static void uv_eio_want_poll(eio_channel *channel) {
/* Signal the main thread that eio_poll need to be processed. */
/*
@ -87,16 +83,16 @@ static void uv_eio_want_poll(void) {
* uv_eio_want_poll_notifier.
*/
uv_async_send(&main_loop->uv_eio_want_poll_notifier);
uv_async_send(&((uv_loop_t *)channel->data)->uv_eio_want_poll_notifier);
}
static void uv_eio_done_poll(void) {
static void uv_eio_done_poll(eio_channel *channel) {
/*
* Signal the main thread that we should stop calling eio_poll().
* from the idle watcher.
*/
uv_async_send(&main_loop->uv_eio_done_poll_notifier);
uv_async_send(&((uv_loop_t *)channel->data)->uv_eio_done_poll_notifier);
}
@ -104,8 +100,6 @@ void uv_eio_init(uv_loop_t* loop) {
if (loop->counters.eio_init == 0) {
loop->counters.eio_init++;
main_loop = loop;
uv_idle_init(loop, &loop->uv_eio_poller);
uv_idle_start(&loop->uv_eio_poller, uv_eio_do_poll);
@ -124,11 +118,5 @@ void uv_eio_init(uv_loop_t* loop) {
* race conditions. See Node's test/simple/test-eio-race.js
*/
eio_set_max_poll_reqs(10);
} else {
/*
* If this assertion breaks then Ryan hasn't implemented support for
* receiving thread pool requests back to multiple threads.
*/
assert(main_loop == loop);
}
}

2
deps/uv/test/test-list.h

@ -126,7 +126,6 @@ TEST_DECLARE (thread_self)
TEST_DECLARE (strlcpy)
TEST_DECLARE (strlcat)
TEST_DECLARE (counters_init)
#ifdef _WIN32
TEST_DECLARE (spawn_detect_pipe_name_collisions_on_windows)
TEST_DECLARE (argument_escaping)
@ -293,7 +292,6 @@ TASK_LIST_START
TEST_ENTRY (strlcpy)
TEST_ENTRY (strlcat)
TEST_ENTRY (counters_init)
#if 0
/* These are for testing the test runner. */
TEST_ENTRY (fail_always)

Loading…
Cancel
Save