Browse Source

upgrade libuv to ab8c3b85c1

v0.9.1-release
Igor Zinkovsky 13 years ago
parent
commit
6aa7f6f732
  1. 6
      deps/uv/config-unix.mk
  2. 6
      deps/uv/include/uv-private/ngx-queue.h
  3. 12
      deps/uv/include/uv-private/tree.h
  4. 4
      deps/uv/include/uv-private/uv-unix.h
  5. 5
      deps/uv/include/uv-private/uv-win.h
  6. 23
      deps/uv/include/uv.h
  7. 5
      deps/uv/src/unix/cares.c
  8. 11
      deps/uv/src/unix/core.c
  9. 2
      deps/uv/src/unix/error.c
  10. 7
      deps/uv/src/unix/internal.h
  11. 2
      deps/uv/src/unix/kqueue.c
  12. 38
      deps/uv/src/unix/linux/core.c
  13. 5
      deps/uv/src/unix/linux/inotify.c
  14. 3
      deps/uv/src/unix/openbsd.c
  15. 4
      deps/uv/src/unix/pipe.c
  16. 34
      deps/uv/src/unix/process.c
  17. 7
      deps/uv/src/unix/stream.c
  18. 2
      deps/uv/src/unix/sunos.c
  19. 4
      deps/uv/src/unix/tcp.c
  20. 4
      deps/uv/src/unix/udp.c
  21. 59
      deps/uv/src/uv-common.c
  22. 4
      deps/uv/src/uv-common.h
  23. 1
      deps/uv/src/win/cares.c
  24. 2
      deps/uv/src/win/core.c
  25. 5
      deps/uv/src/win/error.c
  26. 13
      deps/uv/src/win/fs-event.c
  27. 15
      deps/uv/src/win/handle.c
  28. 6
      deps/uv/src/win/internal.h
  29. 61
      deps/uv/src/win/pipe.c
  30. 177
      deps/uv/src/win/tcp.c
  31. 2
      deps/uv/src/win/winapi.h
  32. 14
      deps/uv/test/benchmark-ares.c
  33. 7
      deps/uv/test/dns-server.c
  34. 216
      deps/uv/test/run-tests.c
  35. 3
      deps/uv/test/runner.c
  36. 106
      deps/uv/test/test-fs-event.c
  37. 2
      deps/uv/test/test-get-memory.c
  38. 345
      deps/uv/test/test-ipc.c
  39. 6
      deps/uv/test/test-list.h
  40. 12
      deps/uv/test/test-platform-output.c
  41. 99
      deps/uv/test/test-stdio-over-pipes.c
  42. 1
      deps/uv/test/test-tcp-writealot.c
  43. 3
      deps/uv/test/test-udp-multicast-ttl.c
  44. 1
      deps/uv/uv.gyp

6
deps/uv/config-unix.mk

@ -152,14 +152,16 @@ src/unix/uv-eio.o: src/unix/uv-eio.c
clean-platform:
-rm -f src/ares/*.o
-rm -f src/unix/*.o
-rm -f src/unix/ev/*.o
-rm -f src/unix/eio/*.o
-rm -f src/unix/*.o
-rm -f src/unix/linux/*.o
-rm -rf test/run-tests.dSYM run-benchmarks.dSYM
distclean-platform:
-rm -f src/ares/*.o
-rm -f src/unix/ev/*.o
-rm -f src/unix/*.o
-rm -f src/unix/ev/*.o
-rm -f src/unix/eio/*.o
-rm -f src/unix/linux/*.o
-rm -rf test/run-tests.dSYM run-benchmarks.dSYM

6
deps/uv/include/uv-private/ngx-queue.h

@ -4,8 +4,8 @@
*/
#ifndef _NGX_QUEUE_H_INCLUDED_
#define _NGX_QUEUE_H_INCLUDED_
#ifndef NGX_QUEUE_H_INCLUDED_
#define NGX_QUEUE_H_INCLUDED_
typedef struct ngx_queue_s ngx_queue_t;
@ -103,4 +103,4 @@ struct ngx_queue_s {
for ((q) = ngx_queue_head(h); (q) != (h); (q) = ngx_queue_next(q))
#endif /* _NGX_QUEUE_H_INCLUDED_ */
#endif /* NGX_QUEUE_H_INCLUDED_ */

12
deps/uv/include/uv-private/tree.h

@ -23,10 +23,14 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _UV_TREE_H_
#define _UV_TREE_H_
#ifndef UV_TREE_H_
#define UV_TREE_H_
#define __unused
#if __GNUC__
# define __unused __attribute__((unused))
#else
# define __unused
#endif
/*
* This file defines data structures for different types of trees:
@ -759,4 +763,4 @@ name##_RB_MINMAX(struct name *head, int val) \
((x) != NULL) && ((y) = name##_RB_PREV(x), (x) != NULL); \
(x) = (y))
#endif /* _UV_TREE_H_ */
#endif /* UV_TREE_H_ */

4
deps/uv/include/uv-private/uv-unix.h

@ -78,6 +78,10 @@ typedef void* uv_lib_t;
/* Poll result queue */ \
eio_channel uv_eio_channel; \
struct ev_loop* ev; \
/* Various thing for libeio. */ \
uv_async_t uv_eio_want_poll_notifier; \
uv_async_t uv_eio_done_poll_notifier; \
uv_idle_t uv_eio_poller; \
UV_LOOP_PRIVATE_PLATFORM_FIELDS
#define UV_REQ_BUFSML_SIZE (4)

5
deps/uv/include/uv-private/uv-win.h

@ -318,7 +318,10 @@ RB_HEAD(uv_timer_tree_s, uv_timer_s);
uv_write_t ipc_header_write_req; \
int ipc_pid; \
uint64_t remaining_ipc_rawdata_bytes; \
WSAPROTOCOL_INFOW* pending_socket_info; \
struct { \
WSAPROTOCOL_INFOW* socket_info; \
int tcp_connection; \
} pending_ipc_info; \
uv_write_t* non_overlapped_writes_tail;
#define UV_PIPE_PRIVATE_FIELDS \

23
deps/uv/include/uv.h

@ -121,7 +121,8 @@ typedef intptr_t ssize_t;
XX( 50, EPERM, "operation not permitted") \
XX( 51, ELOOP, "too many symbolic links encountered") \
XX( 52, EXDEV, "cross-device link not permitted") \
XX( 53, ENOTEMPTY, "directory not empty")
XX( 53, ENOTEMPTY, "directory not empty") \
XX( 54, ENOSPC, "no space left on device")
#define UV_ERRNO_GEN(val, name, s) UV_##name = val,
@ -494,6 +495,13 @@ UV_EXTERN int uv_read2_start(uv_stream_t*, uv_alloc_cb alloc_cb,
UV_EXTERN int uv_write(uv_write_t* req, uv_stream_t* handle,
uv_buf_t bufs[], int bufcnt, uv_write_cb cb);
/*
* Extended write function for sending handles over a pipe. The pipe must be
* initialized with ipc == 1.
* send_handle must be a TCP socket or pipe, which is a server or a connection
* (listening or connected state). Bound sockets or pipes will be assumed to
* be servers.
*/
UV_EXTERN int uv_write2(uv_write_t* req, uv_stream_t* handle, uv_buf_t bufs[],
int bufcnt, uv_stream_t* send_handle, uv_write_cb cb);
@ -509,10 +517,9 @@ struct uv_write_s {
/*
* Used to determine whether a stream is readable or writable.
* TODO: export in v0.8.
*/
/* UV_EXTERN */ int uv_is_readable(uv_stream_t* handle);
/* UV_EXTERN */ int uv_is_writable(uv_stream_t* handle);
UV_EXTERN int uv_is_readable(uv_stream_t* handle);
UV_EXTERN int uv_is_writable(uv_stream_t* handle);
/*
@ -1482,12 +1489,8 @@ struct uv_counters_s {
struct uv_loop_s {
UV_LOOP_PRIVATE_FIELDS
/* list used for ares task handles */
uv_ares_task_t* uv_ares_handles_;
/* Various thing for libeio. */
uv_async_t uv_eio_want_poll_notifier;
uv_async_t uv_eio_done_poll_notifier;
uv_idle_t uv_eio_poller;
/* RB_HEAD(uv__ares_tasks, uv_ares_task_t) */
struct uv__ares_tasks { uv_ares_task_t* rbh_root; } uv_ares_handles_;
/* Diagnostic counters */
uv_counters_t counters;
/* The last error */

5
deps/uv/src/unix/cares.c

@ -62,7 +62,7 @@ static void uv__ares_io(struct ev_loop* ev, struct ev_io* watcher,
/* Allocates and returns a new uv_ares_task_t */
static uv_ares_task_t* uv__ares_task_create(int fd) {
static uv_ares_task_t* uv__ares_task_create(uv_loop_t* loop, int fd) {
uv_ares_task_t* h = malloc(sizeof(uv_ares_task_t));
if (h == NULL) {
@ -70,6 +70,7 @@ static uv_ares_task_t* uv__ares_task_create(int fd) {
return NULL;
}
h->loop = loop;
h->sock = fd;
ev_io_init(&h->read_watcher, uv__ares_io, fd, EV_READ);
@ -102,7 +103,7 @@ static void uv__ares_sockstate_cb(void* data, ares_socket_t sock,
ev_timer_again(loop->ev, &loop->timer);
}
h = uv__ares_task_create(sock);
h = uv__ares_task_create(loop, sock);
uv_add_ares_handle(loop, h);
}

11
deps/uv/src/unix/core.c

@ -82,11 +82,11 @@ void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
uv_read_stop(stream);
ev_io_stop(stream->loop->ev, &stream->write_watcher);
uv__close(stream->fd);
close(stream->fd);
stream->fd = -1;
if (stream->accepted_fd >= 0) {
uv__close(stream->accepted_fd);
close(stream->accepted_fd);
stream->accepted_fd = -1;
}
@ -145,6 +145,7 @@ void uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
static int uv__loop_init(uv_loop_t* loop,
struct ev_loop *(ev_loop_new)(unsigned int flags)) {
memset(loop, 0, sizeof(*loop));
RB_INIT(&loop->uv_ares_handles_);
#if HAVE_KQUEUE
loop->ev = ev_loop_new(EVBACKEND_KQUEUE);
#else
@ -751,7 +752,7 @@ int uv__socket(int domain, int type, int protocol) {
goto out;
if (uv__nonblock(sockfd, 1) || uv__cloexec(sockfd, 1)) {
uv__close(sockfd);
close(sockfd);
sockfd = -1;
}
@ -787,7 +788,7 @@ int uv__accept(int sockfd, struct sockaddr* saddr, socklen_t slen) {
}
if (uv__cloexec(peerfd, 1) || uv__nonblock(peerfd, 1)) {
uv__close(peerfd);
close(peerfd);
peerfd = -1;
}
@ -861,7 +862,7 @@ int uv__dup(int fd) {
return -1;
if (uv__cloexec(fd, 1)) {
SAVE_ERRNO(uv__close(fd));
SAVE_ERRNO(close(fd));
return -1;
}

2
deps/uv/src/unix/error.c

@ -74,6 +74,7 @@ uv_err_code uv_translate_sys_error(int sys_errno) {
case EMSGSIZE: return UV_EMSGSIZE;
case ENAMETOOLONG: return UV_ENAMETOOLONG;
case EINVAL: return UV_EINVAL;
case ENETUNREACH: return UV_ENETUNREACH;
case ECONNABORTED: return UV_ECONNABORTED;
case ELOOP: return UV_ELOOP;
case ECONNREFUSED: return UV_ECONNREFUSED;
@ -90,6 +91,7 @@ uv_err_code uv_translate_sys_error(int sys_errno) {
case EXDEV: return UV_EXDEV;
case EBUSY: return UV_EBUSY;
case ENOTEMPTY: return UV_ENOTEMPTY;
case ENOSPC: return UV_ENOSPC;
default: return UV_UNKNOWN;
}
UNREACHABLE();

7
deps/uv/src/unix/internal.h

@ -172,13 +172,6 @@ int uv__cloexec(int fd, int set) __attribute__((unused));
int uv__socket(int domain, int type, int protocol);
int uv__dup(int fd);
/* We used to handle EINTR in uv__close() but linux 2.6 will have closed the
* file descriptor anyway, even on EINTR. Retrying in that case isn't merely
* useless, it's actively harmful - the file descriptor may have been acquired
* by another thread.
*/
#define uv__close(fd) close(fd)
/* error */
uv_err_code uv_translate_sys_error(int sys_errno);
void uv_fatal_error(const int errorno, const char* syscall);

2
deps/uv/src/unix/kqueue.c

@ -124,7 +124,7 @@ int uv_fs_event_init(uv_loop_t* loop,
void uv__fs_event_destroy(uv_fs_event_t* handle) {
uv__fs_event_stop(handle);
free(handle->filename);
uv__close(handle->fd);
close(handle->fd);
handle->fd = -1;
}

38
deps/uv/src/unix/linux/core.c

@ -48,6 +48,14 @@
#undef NANOSEC
#define NANOSEC 1000000000
/* This is rather annoying: CLOCK_BOOTTIME lives in <linux/time.h> but we can't
* include that file because it conflicts with <time.h>. We'll just have to
* define it ourselves.
*/
#ifndef CLOCK_BOOTTIME
# define CLOCK_BOOTTIME 7
#endif
static char buf[MAXPATHLEN + 1];
static struct {
@ -267,22 +275,28 @@ error:
uv_err_t uv_uptime(double* uptime) {
#ifdef CLOCK_MONOTONIC
static volatile int no_clock_boottime;
struct timespec now;
if (0 == clock_gettime(CLOCK_MONOTONIC, &now)) {
*uptime = now.tv_sec;
*uptime += (double)now.tv_nsec / 1000000000.0;
return uv_ok_;
int r;
/* Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available
* (pre-2.6.39 kernels). CLOCK_MONOTONIC doesn't increase when the system
* is suspended.
*/
if (no_clock_boottime) {
retry: r = clock_gettime(CLOCK_MONOTONIC, &now);
}
return uv__new_sys_error(errno);
#else
struct sysinfo info;
if (sysinfo(&info) < 0) {
return uv__new_sys_error(errno);
else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) {
no_clock_boottime = 1;
goto retry;
}
*uptime = (double)info.uptime;
if (r)
return uv__new_sys_error(errno);
*uptime = now.tv_sec;
*uptime += (double)now.tv_nsec / 1000000000.0;
return uv_ok_;
#endif
}

5
deps/uv/src/unix/linux/inotify.c

@ -132,8 +132,7 @@ static int compare_watchers(const uv_fs_event_t* a, const uv_fs_event_t* b) {
}
RB_GENERATE_INTERNAL(uv__inotify_watchers, uv_fs_event_s, node, compare_watchers,
inline static __attribute__((unused)))
RB_GENERATE_STATIC(uv__inotify_watchers, uv_fs_event_s, node, compare_watchers)
void uv__inotify_loop_init(uv_loop_t* loop) {
@ -170,7 +169,7 @@ static int new_inotify_fd(void) {
return -1;
if (uv__cloexec(fd, 1) || uv__nonblock(fd, 1)) {
SAVE_ERRNO(uv__close(fd));
SAVE_ERRNO(close(fd));
return -1;
}

3
deps/uv/src/unix/openbsd.c

@ -216,6 +216,7 @@ uv_err_t uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
int numcpus = 1;
static int which[] = {CTL_HW,HW_MODEL,0};
size_t size;
int i;
uv_cpu_info_t* cpu_info;
size = sizeof(model);
@ -245,7 +246,7 @@ uv_err_t uv_cpu_info(uv_cpu_info_t** cpu_infos, int* count) {
size = sizeof(info);
which[0] = CTL_KERN;
which[1] = KERN_CPTIME2;
for (int i = 0; i < numcpus; i++) {
for (i = 0; i < numcpus; i++) {
which[2] = i;
size = sizeof(info);
if (sysctl(which, 3, &info, &size, NULL, 0) < 0) {

4
deps/uv/src/unix/pipe.c

@ -109,7 +109,7 @@ out:
assert(pipe_fname != NULL);
unlink(pipe_fname);
}
uv__close(sockfd);
close(sockfd);
free((void*)pipe_fname);
}
@ -210,7 +210,7 @@ void uv_pipe_connect(uv_connect_t* req,
if (r == -1) {
status = errno;
uv__close(sockfd);
close(sockfd);
goto out;
}

34
deps/uv/src/unix/process.c

@ -34,7 +34,7 @@
# include <TargetConditionals.h>
#endif
#if defined(__APPLE__) && !defined(TARGET_OS_IPHONE)
#if defined(__APPLE__) && !TARGET_OS_IPHONE
# include <crt_externs.h>
# define environ (*_NSGetEnviron())
#else
@ -229,8 +229,8 @@ int uv_spawn(uv_loop_t* loop, uv_process_t* process,
if (pid == -1) {
#if SPAWN_WAIT_EXEC
uv__close(signal_pipe[0]);
uv__close(signal_pipe[1]);
close(signal_pipe[0]);
close(signal_pipe[1]);
#endif
environ = save_our_env;
goto error;
@ -238,7 +238,7 @@ int uv_spawn(uv_loop_t* loop, uv_process_t* process,
if (pid == 0) {
if (stdin_pipe[0] >= 0) {
uv__close(stdin_pipe[1]);
close(stdin_pipe[1]);
dup2(stdin_pipe[0], STDIN_FILENO);
} else {
/* Reset flags that might be set by Node */
@ -247,7 +247,7 @@ int uv_spawn(uv_loop_t* loop, uv_process_t* process,
}
if (stdout_pipe[1] >= 0) {
uv__close(stdout_pipe[0]);
close(stdout_pipe[0]);
dup2(stdout_pipe[1], STDOUT_FILENO);
} else {
/* Reset flags that might be set by Node */
@ -256,7 +256,7 @@ int uv_spawn(uv_loop_t* loop, uv_process_t* process,
}
if (stderr_pipe[1] >= 0) {
uv__close(stderr_pipe[0]);
close(stderr_pipe[0]);
dup2(stderr_pipe[1], STDERR_FILENO);
} else {
/* Reset flags that might be set by Node */
@ -284,7 +284,7 @@ int uv_spawn(uv_loop_t* loop, uv_process_t* process,
#if SPAWN_WAIT_EXEC
/* POLLHUP signals child has exited or execve()'d. */
uv__close(signal_pipe[1]);
close(signal_pipe[1]);
do {
pfd.fd = signal_pipe[0];
pfd.events = POLLIN|POLLHUP;
@ -294,7 +294,7 @@ int uv_spawn(uv_loop_t* loop, uv_process_t* process,
while (status == -1 && (errno == EINTR || errno == ENOMEM));
assert((status == 1) && "poll() on pipe read end failed");
uv__close(signal_pipe[0]);
close(signal_pipe[0]);
#endif
process->pid = pid;
@ -306,7 +306,7 @@ int uv_spawn(uv_loop_t* loop, uv_process_t* process,
if (stdin_pipe[1] >= 0) {
assert(options.stdin_stream);
assert(stdin_pipe[0] >= 0);
uv__close(stdin_pipe[0]);
close(stdin_pipe[0]);
uv__nonblock(stdin_pipe[1], 1);
flags = UV_WRITABLE | (options.stdin_stream->ipc ? UV_READABLE : 0);
uv__stream_open((uv_stream_t*)options.stdin_stream, stdin_pipe[1],
@ -316,7 +316,7 @@ int uv_spawn(uv_loop_t* loop, uv_process_t* process,
if (stdout_pipe[0] >= 0) {
assert(options.stdout_stream);
assert(stdout_pipe[1] >= 0);
uv__close(stdout_pipe[1]);
close(stdout_pipe[1]);
uv__nonblock(stdout_pipe[0], 1);
flags = UV_READABLE | (options.stdout_stream->ipc ? UV_WRITABLE : 0);
uv__stream_open((uv_stream_t*)options.stdout_stream, stdout_pipe[0],
@ -326,7 +326,7 @@ int uv_spawn(uv_loop_t* loop, uv_process_t* process,
if (stderr_pipe[0] >= 0) {
assert(options.stderr_stream);
assert(stderr_pipe[1] >= 0);
uv__close(stderr_pipe[1]);
close(stderr_pipe[1]);
uv__nonblock(stderr_pipe[0], 1);
flags = UV_READABLE | (options.stderr_stream->ipc ? UV_WRITABLE : 0);
uv__stream_open((uv_stream_t*)options.stderr_stream, stderr_pipe[0],
@ -337,12 +337,12 @@ int uv_spawn(uv_loop_t* loop, uv_process_t* process,
error:
uv__set_sys_error(process->loop, errno);
uv__close(stdin_pipe[0]);
uv__close(stdin_pipe[1]);
uv__close(stdout_pipe[0]);
uv__close(stdout_pipe[1]);
uv__close(stderr_pipe[0]);
uv__close(stderr_pipe[1]);
close(stdin_pipe[0]);
close(stdin_pipe[1]);
close(stdout_pipe[0]);
close(stdout_pipe[1]);
close(stderr_pipe[0]);
close(stderr_pipe[1]);
return -1;
}

7
deps/uv/src/unix/stream.c

@ -185,6 +185,9 @@ void uv__server_io(EV_P_ ev_io* watcher, int revents) {
} else if (errno == EMFILE) {
/* TODO special trick. unlock reserved socket, accept, close. */
return;
} else if (errno == ECONNABORTED) {
/* ignore */
continue;
} else {
uv__set_sys_error(stream->loop, errno);
stream->connection_cb((uv_stream_t*)stream, -1);
@ -225,7 +228,7 @@ int uv_accept(uv_stream_t* server, uv_stream_t* client) {
if (uv__stream_open(streamClient, streamServer->accepted_fd,
UV_READABLE | UV_WRITABLE)) {
/* TODO handle error */
uv__close(streamServer->accepted_fd);
close(streamServer->accepted_fd);
streamServer->accepted_fd = -1;
goto out;
}
@ -793,7 +796,7 @@ int uv__connect(uv_connect_t* req, uv_stream_t* stream, struct sockaddr* addr,
}
if (uv__stream_open(stream, sockfd, UV_READABLE | UV_WRITABLE)) {
uv__close(sockfd);
close(sockfd);
return -2;
}
}

2
deps/uv/src/unix/sunos.c

@ -193,7 +193,7 @@ int uv_fs_event_init(uv_loop_t* loop,
void uv__fs_event_destroy(uv_fs_event_t* handle) {
ev_ref(handle->loop->ev);
ev_io_stop(handle->loop->ev, &handle->event_watcher);
uv__close(handle->fd);
close(handle->fd);
handle->fd = -1;
free(handle->filename);
handle->filename = NULL;

4
deps/uv/src/unix/tcp.c

@ -51,7 +51,7 @@ static int uv__bind(uv_tcp_t* tcp,
}
if (uv__stream_open((uv_stream_t*)tcp, tcp->fd, UV_READABLE | UV_WRITABLE)) {
uv__close(tcp->fd);
close(tcp->fd);
tcp->fd = -1;
status = -2;
goto out;
@ -182,7 +182,7 @@ int uv_tcp_listen(uv_tcp_t* tcp, int backlog, uv_connection_cb cb) {
}
if (uv__stream_open((uv_stream_t*)tcp, tcp->fd, UV_READABLE)) {
uv__close(tcp->fd);
close(tcp->fd);
tcp->fd = -1;
return -1;
}

4
deps/uv/src/unix/udp.c

@ -88,7 +88,7 @@ static void uv__udp_stop_write_watcher(uv_udp_t* handle) {
void uv__udp_start_close(uv_udp_t* handle) {
uv__udp_stop_write_watcher(handle);
uv__udp_stop_read_watcher(handle);
uv__close(handle->fd);
close(handle->fd);
handle->fd = -1;
}
@ -383,7 +383,7 @@ static int uv__bind(uv_udp_t* handle,
out:
if (status)
uv__close(fd);
close(fd);
errno = saved_errno;
return status;

59
deps/uv/src/uv-common.c

@ -180,57 +180,43 @@ int uv_ip6_name(struct sockaddr_in6* src, char* dst, size_t size) {
}
/* find matching ares handle in list */
void uv_add_ares_handle(uv_loop_t* loop, uv_ares_task_t* handle) {
handle->loop = loop;
handle->ares_next = loop->uv_ares_handles_;
handle->ares_prev = NULL;
static int cmp_ares_tasks(const uv_ares_task_t* a, const uv_ares_task_t* b) {
if (a->sock < b->sock) return -1;
if (a->sock > b->sock) return 1;
return 0;
}
RB_GENERATE_STATIC(uv__ares_tasks, uv_ares_task_s, node, cmp_ares_tasks)
if (loop->uv_ares_handles_) {
loop->uv_ares_handles_->ares_prev = handle;
}
loop->uv_ares_handles_ = handle;
/* add ares handle to list */
void uv_add_ares_handle(uv_loop_t* loop, uv_ares_task_t* handle) {
assert(loop == handle->loop);
RB_INSERT(uv__ares_tasks, &loop->uv_ares_handles_, handle);
}
/* find matching ares handle in list */
/* TODO: faster lookup */
uv_ares_task_t* uv_find_ares_handle(uv_loop_t* loop, ares_socket_t sock) {
uv_ares_task_t* handle = loop->uv_ares_handles_;
while (handle != NULL) {
if (handle->sock == sock) {
break;
}
handle = handle->ares_next;
}
return handle;
uv_ares_task_t handle;
handle.sock = sock;
return RB_FIND(uv__ares_tasks, &loop->uv_ares_handles_, &handle);
}
/* remove ares handle in list */
void uv_remove_ares_handle(uv_ares_task_t* handle) {
uv_loop_t* loop = handle->loop;
if (handle == loop->uv_ares_handles_) {
loop->uv_ares_handles_ = handle->ares_next;
}
if (handle->ares_next) {
handle->ares_next->ares_prev = handle->ares_prev;
}
if (handle->ares_prev) {
handle->ares_prev->ares_next = handle->ares_next;
}
RB_REMOVE(uv__ares_tasks, &handle->loop->uv_ares_handles_, handle);
}
/* Returns 1 if the uv_ares_handles_ list is empty. 0 otherwise. */
int uv_ares_handles_empty(uv_loop_t* loop) {
return loop->uv_ares_handles_ ? 0 : 1;
return RB_EMPTY(&loop->uv_ares_handles_);
}
int uv_tcp_bind(uv_tcp_t* handle, struct sockaddr_in addr) {
if (handle->type != UV_TCP || addr.sin_family != AF_INET) {
uv__set_artificial_error(handle->loop, UV_EFAULT);
@ -240,6 +226,7 @@ int uv_tcp_bind(uv_tcp_t* handle, struct sockaddr_in addr) {
return uv__tcp_bind(handle, addr);
}
int uv_tcp_bind6(uv_tcp_t* handle, struct sockaddr_in6 addr) {
if (handle->type != UV_TCP || addr.sin6_family != AF_INET6) {
uv__set_artificial_error(handle->loop, UV_EFAULT);
@ -249,6 +236,7 @@ int uv_tcp_bind6(uv_tcp_t* handle, struct sockaddr_in6 addr) {
return uv__tcp_bind6(handle, addr);
}
int uv_udp_bind(uv_udp_t* handle, struct sockaddr_in addr,
unsigned int flags) {
if (handle->type != UV_UDP || addr.sin_family != AF_INET) {
@ -259,6 +247,7 @@ int uv_udp_bind(uv_udp_t* handle, struct sockaddr_in addr,
return uv__udp_bind(handle, addr, flags);
}
int uv_udp_bind6(uv_udp_t* handle, struct sockaddr_in6 addr,
unsigned int flags) {
if (handle->type != UV_UDP || addr.sin6_family != AF_INET6) {
@ -269,6 +258,7 @@ int uv_udp_bind6(uv_udp_t* handle, struct sockaddr_in6 addr,
return uv__udp_bind6(handle, addr, flags);
}
int uv_tcp_connect(uv_connect_t* req,
uv_tcp_t* handle,
struct sockaddr_in address,
@ -281,6 +271,7 @@ int uv_tcp_connect(uv_connect_t* req,
return uv__tcp_connect(req, handle, address, cb);
}
int uv_tcp_connect6(uv_connect_t* req,
uv_tcp_t* handle,
struct sockaddr_in6 address,

4
deps/uv/src/uv-common.h

@ -28,6 +28,7 @@
#define UV_COMMON_H_
#include "uv.h"
#include "tree.h"
#define ARRAY_SIZE(a) (sizeof(a) / sizeof((a)[0]))
@ -35,8 +36,7 @@
struct uv_ares_task_s {
UV_HANDLE_FIELDS
UV_ARES_TASK_PRIVATE_FIELDS
uv_ares_task_t* ares_prev;
uv_ares_task_t* ares_next;
RB_ENTRY(uv_ares_task_s) node;
};

1
deps/uv/src/win/cares.c

@ -159,6 +159,7 @@ static void uv_ares_sockstate_cb(void *data, ares_socket_t sock, int read,
}
uv_handle_ares->type = UV_ARES_TASK;
uv_handle_ares->close_cb = NULL;
uv_handle_ares->loop = loop;
uv_handle_ares->data = loop;
uv_handle_ares->sock = sock;
uv_handle_ares->h_wait = NULL;

2
deps/uv/src/win/core.c

@ -59,7 +59,6 @@ static void uv_init(void) {
static void uv_loop_init(uv_loop_t* loop) {
loop->uv_ares_handles_ = NULL;
/* Create an I/O completion port */
loop->iocp = CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 1);
if (loop->iocp == NULL) {
@ -75,6 +74,7 @@ static void uv_loop_init(uv_loop_t* loop) {
loop->endgame_handles = NULL;
RB_INIT(&loop->timers);
RB_INIT(&loop->uv_ares_handles_);
loop->check_handles = NULL;
loop->prepare_handles = NULL;

5
deps/uv/src/win/error.c

@ -104,6 +104,11 @@ uv_err_code uv_translate_sys_error(int sys_errno) {
case WSAENETUNREACH: return UV_ENETUNREACH;
case WSAENOBUFS: return UV_ENOBUFS;
case ERROR_OUTOFMEMORY: return UV_ENOMEM;
case ERROR_CANNOT_MAKE: return UV_ENOSPC;
case ERROR_DISK_FULL: return UV_ENOSPC;
case ERROR_EA_TABLE_FULL: return UV_ENOSPC;
case ERROR_END_OF_MEDIA: return UV_ENOSPC;
case ERROR_HANDLE_DISK_FULL: return UV_ENOSPC;
case ERROR_NOT_CONNECTED: return UV_ENOTCONN;
case WSAENOTCONN: return UV_ENOTCONN;
case ERROR_DIR_NOT_EMPTY: return UV_ENOTEMPTY;

13
deps/uv/src/win/fs-event.c

@ -301,6 +301,13 @@ void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
assert(handle->req_pending);
handle->req_pending = 0;
/* If we're closing, don't report any callbacks, and just push the handle */
/* onto the endgame queue. */
if (handle->flags & UV_HANDLE_CLOSING) {
uv_want_endgame(loop, (uv_handle_t*) handle);
return;
};
file_info = (FILE_NOTIFY_INFORMATION*)(handle->buffer + offset);
if (REQ_SUCCESS(req)) {
@ -438,11 +445,9 @@ void uv_process_fs_event_req(uv_loop_t* loop, uv_req_t* req,
}
offset = file_info->NextEntryOffset;
} while(offset);
} while (offset && !(handle->flags & UV_HANDLE_CLOSING));
} else {
if (!(handle->flags & UV_HANDLE_CLOSING)) {
handle->cb(handle, NULL, UV_CHANGE, 0);
}
handle->cb(handle, NULL, UV_CHANGE, 0);
}
} else {
uv__set_sys_error(loop, GET_REQ_ERROR(req));

15
deps/uv/src/win/handle.c

@ -71,7 +71,6 @@ int uv_is_active(uv_handle_t* handle) {
void uv_close(uv_handle_t* handle, uv_close_cb cb) {
uv_tcp_t* tcp;
uv_pipe_t* pipe;
uv_udp_t* udp;
uv_process_t* process;
@ -88,19 +87,7 @@ void uv_close(uv_handle_t* handle, uv_close_cb cb) {
/* Handle-specific close actions */
switch (handle->type) {
case UV_TCP:
tcp = (uv_tcp_t*)handle;
/* If we don't shutdown before calling closesocket, windows will */
/* silently discard the kernel send buffer and reset the connection. */
if ((tcp->flags & UV_HANDLE_CONNECTION) &&
!(tcp->flags & UV_HANDLE_SHUT)) {
shutdown(tcp->socket, SD_SEND);
tcp->flags |= UV_HANDLE_SHUTTING | UV_HANDLE_SHUT;
}
tcp->flags &= ~(UV_HANDLE_READING | UV_HANDLE_LISTENING);
closesocket(tcp->socket);
if (tcp->reqs_pending == 0) {
uv_want_endgame(loop, handle);
}
uv_tcp_close((uv_tcp_t*)handle);
return;
case UV_NAMED_PIPE:

6
deps/uv/src/win/internal.h

@ -68,11 +68,12 @@ void uv_process_timers(uv_loop_t* loop);
#define UV_HANDLE_NON_OVERLAPPED_PIPE 0x00200000
#define UV_HANDLE_TTY_SAVED_POSITION 0x00400000
#define UV_HANDLE_TTY_SAVED_ATTRIBUTES 0x00800000
#define UV_HANDLE_SHARED_TCP_SERVER 0x01000000
#define UV_HANDLE_SHARED_TCP_SOCKET 0x01000000
#define UV_HANDLE_TCP_NODELAY 0x02000000
#define UV_HANDLE_TCP_KEEPALIVE 0x04000000
#define UV_HANDLE_TCP_SINGLE_ACCEPT 0x08000000
#define UV_HANDLE_TCP_ACCEPT_STATE_CHANGING 0x10000000
#define UV_HANDLE_TCP_SOCKET_CLOSED 0x20000000
void uv_want_endgame(uv_loop_t* loop, uv_handle_t* handle);
void uv_process_endgames(uv_loop_t* loop);
@ -143,7 +144,8 @@ void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle);
int uv_tcp_import(uv_tcp_t* tcp, WSAPROTOCOL_INFOW* socket_protocol_info);
int uv_tcp_import(uv_tcp_t* tcp, WSAPROTOCOL_INFOW* socket_protocol_info,
int tcp_connection);
int uv_tcp_duplicate_socket(uv_tcp_t* handle, int pid,
LPWSAPROTOCOL_INFOW protocol_info);

61
deps/uv/src/win/pipe.c

@ -42,8 +42,9 @@ static const int64_t eof_timeout = 50; /* ms */
static const int default_pending_pipe_instances = 4;
/* IPC protocol flags. */
#define UV_IPC_RAW_DATA 0x0001
#define UV_IPC_UV_STREAM 0x0002
#define UV_IPC_RAW_DATA 0x0001
#define UV_IPC_TCP_SERVER 0x0002
#define UV_IPC_TCP_CONNECTION 0x0004
/* IPC frame header. */
typedef struct {
@ -79,7 +80,8 @@ int uv_pipe_init(uv_loop_t* loop, uv_pipe_t* handle, int ipc) {
handle->name = NULL;
handle->ipc_pid = 0;
handle->remaining_ipc_rawdata_bytes = 0;
handle->pending_socket_info = NULL;
handle->pending_ipc_info.socket_info = NULL;
handle->pending_ipc_info.tcp_connection = 0;
handle->ipc = ipc;
handle->non_overlapped_writes_tail = NULL;
@ -356,9 +358,9 @@ void uv_pipe_endgame(uv_loop_t* loop, uv_pipe_t* handle) {
handle->flags |= UV_HANDLE_CLOSED;
if (handle->flags & UV_HANDLE_CONNECTION) {
if (handle->pending_socket_info) {
free(handle->pending_socket_info);
handle->pending_socket_info = NULL;
if (handle->pending_ipc_info.socket_info) {
free(handle->pending_ipc_info.socket_info);
handle->pending_ipc_info.socket_info = NULL;
}
if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
@ -711,13 +713,14 @@ int uv_pipe_accept(uv_pipe_t* server, uv_stream_t* client) {
uv_pipe_accept_t* req;
if (server->ipc) {
if (!server->pending_socket_info) {
if (!server->pending_ipc_info.socket_info) {
/* No valid pending sockets. */
uv__set_sys_error(loop, WSAEWOULDBLOCK);
return -1;
}
return uv_tcp_import((uv_tcp_t*)client, server->pending_socket_info);
return uv_tcp_import((uv_tcp_t*)client, server->pending_ipc_info.socket_info,
server->pending_ipc_info.tcp_connection);
} else {
pipe_client = (uv_pipe_t*)client;
@ -1051,9 +1054,10 @@ static int uv_pipe_write_impl(uv_loop_t* loop, uv_write_t* req,
return -1;
}
/* Only TCP server handles are supported for sharing. */
if (send_handle && (send_handle->type != UV_TCP ||
send_handle->flags & UV_HANDLE_CONNECTION)) {
/* Only TCP handles are supported for sharing. */
if (send_handle && ((send_handle->type != UV_TCP) ||
(!(send_handle->flags & UV_HANDLE_BOUND) &&
!(send_handle->flags & UV_HANDLE_CONNECTION)))) {
uv__set_artificial_error(loop, UV_ENOTSUP);
return -1;
}
@ -1091,7 +1095,11 @@ static int uv_pipe_write_impl(uv_loop_t* loop, uv_write_t* req,
&ipc_frame.socket_info)) {
return -1;
}
ipc_frame.header.flags |= UV_IPC_UV_STREAM;
ipc_frame.header.flags |= UV_IPC_TCP_SERVER;
if (tcp_send_handle->flags & UV_HANDLE_CONNECTION) {
ipc_frame.header.flags |= UV_IPC_TCP_CONNECTION;
}
}
if (bufcnt == 1) {
@ -1132,7 +1140,7 @@ static int uv_pipe_write_impl(uv_loop_t* loop, uv_write_t* req,
result = WriteFile(handle->handle,
&ipc_frame,
ipc_frame.header.flags & UV_IPC_UV_STREAM ?
ipc_frame.header.flags & UV_IPC_TCP_SERVER ?
sizeof(ipc_frame) : sizeof(ipc_frame.header),
NULL,
&ipc_header_req->overlapped);
@ -1146,7 +1154,7 @@ static int uv_pipe_write_impl(uv_loop_t* loop, uv_write_t* req,
ipc_header_req->queued_bytes = 0;
} else {
/* Request queued by the kernel. */
ipc_header_req->queued_bytes = ipc_frame.header.flags & UV_IPC_UV_STREAM ?
ipc_header_req->queued_bytes = ipc_frame.header.flags & UV_IPC_TCP_SERVER ?
sizeof(ipc_frame) : sizeof(ipc_frame.header);
handle->write_queue_size += req->queued_bytes;
}
@ -1330,9 +1338,10 @@ void uv_process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle,
}
assert(bytes == sizeof(ipc_frame.header));
assert(ipc_frame.header.flags <= (UV_IPC_UV_STREAM | UV_IPC_RAW_DATA));
assert(ipc_frame.header.flags <= (UV_IPC_TCP_SERVER | UV_IPC_RAW_DATA |
UV_IPC_TCP_CONNECTION));
if (ipc_frame.header.flags & UV_IPC_UV_STREAM) {
if (ipc_frame.header.flags & UV_IPC_TCP_SERVER) {
assert(avail - sizeof(ipc_frame.header) >=
sizeof(ipc_frame.socket_info));
@ -1350,14 +1359,16 @@ void uv_process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle,
assert(bytes == sizeof(ipc_frame) - sizeof(ipc_frame.header));
/* Store the pending socket info. */
assert(!handle->pending_socket_info);
handle->pending_socket_info =
(WSAPROTOCOL_INFOW*)malloc(sizeof(*(handle->pending_socket_info)));
if (!handle->pending_socket_info) {
assert(!handle->pending_ipc_info.socket_info);
handle->pending_ipc_info.socket_info =
(WSAPROTOCOL_INFOW*)malloc(sizeof(*(handle->pending_ipc_info.socket_info)));
if (!handle->pending_ipc_info.socket_info) {
uv_fatal_error(ERROR_OUTOFMEMORY, "malloc");
}
*(handle->pending_socket_info) = ipc_frame.socket_info;
*(handle->pending_ipc_info.socket_info) = ipc_frame.socket_info;
handle->pending_ipc_info.tcp_connection =
ipc_frame.header.flags & UV_IPC_TCP_CONNECTION;
}
if (ipc_frame.header.flags & UV_IPC_RAW_DATA) {
@ -1385,14 +1396,14 @@ void uv_process_pipe_read_req(uv_loop_t* loop, uv_pipe_t* handle,
handle->remaining_ipc_rawdata_bytes - bytes;
if (handle->read2_cb) {
handle->read2_cb(handle, bytes, buf,
handle->pending_socket_info ? UV_TCP : UV_UNKNOWN_HANDLE);
handle->pending_ipc_info.socket_info ? UV_TCP : UV_UNKNOWN_HANDLE);
} else if (handle->read_cb) {
handle->read_cb((uv_stream_t*)handle, bytes, buf);
}
if (handle->pending_socket_info) {
free(handle->pending_socket_info);
handle->pending_socket_info = NULL;
if (handle->pending_ipc_info.socket_info) {
free(handle->pending_ipc_info.socket_info);
handle->pending_ipc_info.socket_info = NULL;
}
} else {
handle->read_cb((uv_stream_t*)handle, bytes, buf);

177
deps/uv/src/win/tcp.c

@ -199,6 +199,11 @@ void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
assert(!(handle->flags & UV_HANDLE_CLOSED));
handle->flags |= UV_HANDLE_CLOSED;
if (!(handle->flags & UV_HANDLE_TCP_SOCKET_CLOSED)) {
closesocket(handle->socket);
handle->flags |= UV_HANDLE_TCP_SOCKET_CLOSED;
}
if (!(handle->flags & UV_HANDLE_CONNECTION) && handle->accept_reqs) {
if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
for (i = 0; i < uv_simultaneous_server_accepts; i++) {
@ -218,6 +223,18 @@ void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) {
handle->accept_reqs = NULL;
}
if (handle->flags & UV_HANDLE_CONNECTION &&
handle->flags & UV_HANDLE_EMULATE_IOCP) {
if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) {
UnregisterWait(handle->read_req.wait_handle);
handle->read_req.wait_handle = INVALID_HANDLE_VALUE;
}
if (handle->read_req.event_handle) {
CloseHandle(handle->read_req.event_handle);
handle->read_req.event_handle = NULL;
}
}
if (handle->close_cb) {
handle->close_cb((uv_handle_t*)handle);
}
@ -341,7 +358,7 @@ static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) {
/* Prepare the overlapped structure. */
memset(&(req->overlapped), 0, sizeof(req->overlapped));
if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
req->overlapped.hEvent = (HANDLE) ((DWORD) req->event_handle | 1);
req->overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
}
success = handle->func_acceptex(handle->socket,
@ -415,6 +432,13 @@ static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
buf.len = 0;
}
/* Prepare the overlapped structure. */
memset(&(req->overlapped), 0, sizeof(req->overlapped));
if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
assert(req->event_handle);
req->overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
}
flags = 0;
result = WSARecv(handle->socket,
(WSABUF*)&buf,
@ -434,6 +458,14 @@ static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) {
/* The req will be processed with IOCP. */
handle->flags |= UV_HANDLE_READ_PENDING;
handle->reqs_pending++;
if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
req->wait_handle == INVALID_HANDLE_VALUE &&
!RegisterWaitForSingleObject(&req->wait_handle,
req->overlapped.hEvent, post_completion, (void*) req,
INFINITE, WT_EXECUTEINWAITTHREAD)) {
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
}
} else {
/* Make this req pending reporting an error. */
SET_REQ_ERROR(req, WSAGetLastError());
@ -466,7 +498,7 @@ int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
}
}
if (!(handle->flags & UV_HANDLE_SHARED_TCP_SERVER) &&
if (!(handle->flags & UV_HANDLE_SHARED_TCP_SOCKET) &&
listen(handle->socket, backlog) == SOCKET_ERROR) {
uv__set_sys_error(loop, WSAGetLastError());
return -1;
@ -593,10 +625,18 @@ int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb,
handle->read_cb = read_cb;
handle->alloc_cb = alloc_cb;
/* If reading was stopped and then started again, there could stell be a */
/* If reading was stopped and then started again, there could still be a */
/* read request pending. */
if (!(handle->flags & UV_HANDLE_READ_PENDING))
if (!(handle->flags & UV_HANDLE_READ_PENDING)) {
if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
!handle->read_req.event_handle) {
handle->read_req.event_handle = CreateEvent(NULL, 0, 0, NULL);
if (!handle->read_req.event_handle) {
uv_fatal_error(GetLastError(), "CreateEvent");
}
}
uv_tcp_queue_read(loop, handle);
}
return 0;
}
@ -790,6 +830,16 @@ int uv_tcp_write(uv_loop_t* loop, uv_write_t* req, uv_tcp_t* handle,
req->cb = cb;
memset(&req->overlapped, 0, sizeof(req->overlapped));
/* Prepare the overlapped structure. */
memset(&(req->overlapped), 0, sizeof(req->overlapped));
if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
req->event_handle = CreateEvent(NULL, 0, 0, NULL);
if (!req->event_handle) {
uv_fatal_error(GetLastError(), "CreateEvent");
}
req->overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1);
}
result = WSASend(handle->socket,
(WSABUF*)bufs,
bufcnt,
@ -812,6 +862,14 @@ int uv_tcp_write(uv_loop_t* loop, uv_write_t* req, uv_tcp_t* handle,
handle->write_reqs_pending++;
handle->write_queue_size += req->queued_bytes;
uv_ref(loop);
if (handle->flags & UV_HANDLE_EMULATE_IOCP &&
req->wait_handle == INVALID_HANDLE_VALUE &&
!RegisterWaitForSingleObject(&req->wait_handle,
req->overlapped.hEvent, post_completion, (void*) req,
INFINITE, WT_EXECUTEINWAITTHREAD)) {
SET_REQ_ERROR(req, GetLastError());
uv_insert_pending_req(loop, (uv_req_t*)req);
}
} else {
/* Send failed due to an error. */
uv__set_sys_error(loop, WSAGetLastError());
@ -945,6 +1003,17 @@ void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle,
assert(handle->write_queue_size >= req->queued_bytes);
handle->write_queue_size -= req->queued_bytes;
if (handle->flags & UV_HANDLE_EMULATE_IOCP) {
if (req->wait_handle != INVALID_HANDLE_VALUE) {
UnregisterWait(req->wait_handle);
req->wait_handle = INVALID_HANDLE_VALUE;
}
if (req->event_handle) {
CloseHandle(req->event_handle);
req->event_handle = NULL;
}
}
if (req->cb) {
uv__set_sys_error(loop, GET_REQ_SOCK_ERROR(req));
((uv_write_cb)req->cb)(req, loop->last_err.code == UV_OK ? 0 : -1);
@ -1036,7 +1105,8 @@ void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle,
}
int uv_tcp_import(uv_tcp_t* tcp, WSAPROTOCOL_INFOW* socket_protocol_info) {
int uv_tcp_import(uv_tcp_t* tcp, WSAPROTOCOL_INFOW* socket_protocol_info,
int tcp_connection) {
SOCKET socket = WSASocketW(AF_INET,
SOCK_STREAM,
IPPROTO_IP,
@ -1050,13 +1120,22 @@ int uv_tcp_import(uv_tcp_t* tcp, WSAPROTOCOL_INFOW* socket_protocol_info) {
}
tcp->flags |= UV_HANDLE_BOUND;
tcp->flags |= UV_HANDLE_SHARED_TCP_SERVER;
tcp->flags |= UV_HANDLE_SHARED_TCP_SOCKET;
if (tcp_connection) {
uv_connection_init((uv_stream_t*)tcp);
}
if (socket_protocol_info->iAddressFamily == AF_INET6) {
tcp->flags |= UV_HANDLE_IPV6;
}
return uv_tcp_set_socket(tcp->loop, tcp, socket, 1);
if (uv_tcp_set_socket(tcp->loop, tcp, socket, 1) != 0) {
return -1;
}
tcp->loop->active_tcp_streams++;
return 0;
}
@ -1097,28 +1176,24 @@ int uv_tcp_keepalive(uv_tcp_t* handle, int enable, unsigned int delay) {
int uv_tcp_duplicate_socket(uv_tcp_t* handle, int pid,
LPWSAPROTOCOL_INFOW protocol_info) {
assert(!(handle->flags & UV_HANDLE_CONNECTION));
/*
* We're about to share the socket with another process. Because
* this is a listening socket, we assume that the other process will
* be accepting connections on it. So, before sharing the socket
* with another process, we call listen here in the parent process.
* This needs to be modified if the socket is shared with
* another process for anything other than accepting connections.
*/
if (!(handle->flags & UV_HANDLE_LISTENING)) {
if (!(handle->flags & UV_HANDLE_BOUND)) {
uv__set_artificial_error(handle->loop, UV_EINVAL);
return -1;
}
if (listen(handle->socket, SOMAXCONN) == SOCKET_ERROR) {
uv__set_sys_error(handle->loop, WSAGetLastError());
return -1;
if (!(handle->flags & UV_HANDLE_CONNECTION)) {
/*
* We're about to share the socket with another process. Because
* this is a listening socket, we assume that the other process will
* be accepting connections on it. So, before sharing the socket
* with another process, we call listen here in the parent process.
*/
if (!(handle->flags & UV_HANDLE_LISTENING)) {
if (!(handle->flags & UV_HANDLE_BOUND)) {
uv__set_artificial_error(handle->loop, UV_EINVAL);
return -1;
}
if (listen(handle->socket, SOMAXCONN) == SOCKET_ERROR) {
uv__set_sys_error(handle->loop, WSAGetLastError());
return -1;
}
}
handle->flags |= UV_HANDLE_SHARED_TCP_SERVER;
}
if (WSADuplicateSocketW(handle->socket, pid, protocol_info)) {
@ -1126,6 +1201,8 @@ int uv_tcp_duplicate_socket(uv_tcp_t* handle, int pid,
return -1;
}
handle->flags |= UV_HANDLE_SHARED_TCP_SOCKET;
return 0;
}
@ -1161,4 +1238,46 @@ int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) {
}
return 0;
}
}
void uv_tcp_close(uv_tcp_t* tcp) {
int non_ifs_lsp;
int close_socket = 1;
/*
* In order for winsock to do a graceful close there must not be
* any pending reads.
*/
if (tcp->flags & UV_HANDLE_READ_PENDING) {
/* Just do shutdown on non-shared sockets, which ensures graceful close. */
if (!(tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET)) {
shutdown(tcp->socket, SD_SEND);
tcp->flags |= UV_HANDLE_SHUT;
} else {
/* Check if we have any non-IFS LSPs stacked on top of TCP */
non_ifs_lsp = (tcp->flags & UV_HANDLE_IPV6) ? uv_tcp_non_ifs_lsp_ipv6 :
uv_tcp_non_ifs_lsp_ipv4;
if (!non_ifs_lsp) {
/*
* Shared socket with no non-IFS LSPs, request to cancel pending I/O.
* The socket will be closed inside endgame.
*/
CancelIo((HANDLE)tcp->socket);
close_socket = 0;
}
}
}
tcp->flags &= ~(UV_HANDLE_READING | UV_HANDLE_LISTENING);
if (close_socket) {
closesocket(tcp->socket);
tcp->flags |= UV_HANDLE_TCP_SOCKET_CLOSED;
}
if (tcp->reqs_pending == 0) {
uv_want_endgame(tcp->loop, (uv_handle_t*)tcp);
}
}

2
deps/uv/src/win/winapi.h

@ -4327,7 +4327,7 @@ typedef NTSTATUS (NTAPI *sNtSetInformationFile)
# define FILE_SKIP_COMPLETION_PORT_ON_SUCCESS 0x1
#endif
#ifdef FILE_SKIP_SET_EVENT_ON_HANDLE
#ifndef FILE_SKIP_SET_EVENT_ON_HANDLE
# define FILE_SKIP_SET_EVENT_ON_HANDLE 0x2
#endif

14
deps/uv/test/benchmark-ares.c

@ -28,15 +28,13 @@
static uv_loop_t* loop;
ares_channel channel;
struct ares_options options;
int optmask;
static ares_channel channel;
static struct ares_options options;
static int optmask;
struct in_addr testsrv;
int ares_callbacks;
int ares_errors;
int argument;
static int ares_callbacks;
static int ares_errors;
static int argument;
#define NUM_CALLS_TO_START 1000

7
deps/uv/test/dns-server.c

@ -50,14 +50,12 @@ typedef struct {
static uv_loop_t* loop;
static int server_closed;
static uv_tcp_t server;
static void after_write(uv_write_t* req, int status);
static void after_read(uv_stream_t*, ssize_t nread, uv_buf_t buf);
static void on_close(uv_handle_t* peer);
static void on_server_close(uv_handle_t* handle);
static void on_connection(uv_stream_t*, int status);
#define WRITE_BUF_LEN (64*1024)
@ -283,11 +281,6 @@ static void on_connection(uv_stream_t* server, int status) {
}
static void on_server_close(uv_handle_t* handle) {
ASSERT(handle == (uv_handle_t*)&server);
}
static int dns_start(int port) {
struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", port);
int r;

216
deps/uv/test/run-tests.c

@ -32,6 +32,11 @@
/* The time in milliseconds after which a single test times out. */
#define TEST_TIMEOUT 5000
int ipc_helper(int listen_after_write);
int ipc_helper_tcp_connection(void);
int ipc_send_recv_helper(void);
int stdio_over_pipes_helper(void);
static int maybe_run_test(int argc, char **argv);
@ -51,212 +56,6 @@ int main(int argc, char **argv) {
}
static uv_pipe_t channel;
static uv_tcp_t tcp_server;
static uv_write_t conn_notify_req;
static int close_cb_called;
static int connection_accepted;
static uv_pipe_t stdin_pipe;
static uv_pipe_t stdout_pipe;
static int on_pipe_read_called;
static int after_write_called;
static void close_cb(uv_handle_t* handle) {
close_cb_called++;
}
static void close_conn_cb(uv_handle_t* handle) {
free(handle);
close_cb_called++;
}
void conn_notify_write_cb(uv_write_t* req, int status) {
uv_close((uv_handle_t*)&tcp_server, close_cb);
uv_close((uv_handle_t*)&channel, close_cb);
}
static void ipc_on_connection(uv_stream_t* server, int status) {
int r;
uv_buf_t buf;
uv_tcp_t* conn;
if (!connection_accepted) {
/*
* Accept the connection and close it. Also let the other
* side know.
*/
ASSERT(status == 0);
ASSERT((uv_stream_t*)&tcp_server == server);
conn = malloc(sizeof(*conn));
ASSERT(conn);
r = uv_tcp_init(server->loop, conn);
ASSERT(r == 0);
r = uv_accept(server, (uv_stream_t*)conn);
ASSERT(r == 0);
uv_close((uv_handle_t*)conn, close_conn_cb);
buf = uv_buf_init("accepted_connection\n", 20);
r = uv_write2(&conn_notify_req, (uv_stream_t*)&channel, &buf, 1,
NULL, conn_notify_write_cb);
ASSERT(r == 0);
connection_accepted = 1;
}
}
static int ipc_helper(int listen_after_write) {
/*
* This is launched from test-ipc.c. stdin is a duplex channel that we
* over which a handle will be transmitted. In this initial version only
* data is transfered over the channel. XXX edit this comment after handle
* transfer is added.
*/
uv_write_t write_req;
int r;
uv_buf_t buf;
r = uv_pipe_init(uv_default_loop(), &channel, 1);
ASSERT(r == 0);
uv_pipe_open(&channel, 0);
ASSERT(uv_is_readable((uv_stream_t*) &channel));
ASSERT(uv_is_writable((uv_stream_t*) &channel));
r = uv_tcp_init(uv_default_loop(), &tcp_server);
ASSERT(r == 0);
r = uv_tcp_bind(&tcp_server, uv_ip4_addr("0.0.0.0", TEST_PORT));
ASSERT(r == 0);
if (!listen_after_write) {
r = uv_listen((uv_stream_t*)&tcp_server, 12, ipc_on_connection);
ASSERT(r == 0);
}
buf = uv_buf_init("hello\n", 6);
r = uv_write2(&write_req, (uv_stream_t*)&channel, &buf, 1,
(uv_stream_t*)&tcp_server, NULL);
ASSERT(r == 0);
if (listen_after_write) {
r = uv_listen((uv_stream_t*)&tcp_server, 12, ipc_on_connection);
ASSERT(r == 0);
}
r = uv_run(uv_default_loop());
ASSERT(r == 0);
ASSERT(connection_accepted == 1);
ASSERT(close_cb_called == 3);
return 0;
}
void on_pipe_read(uv_stream_t* tcp, ssize_t nread, uv_buf_t buf) {
ASSERT(nread > 0);
ASSERT(memcmp("hello world\n", buf.base, nread) == 0);
on_pipe_read_called++;
free(buf.base);
uv_close((uv_handle_t*)&stdin_pipe, close_cb);
uv_close((uv_handle_t*)&stdout_pipe, close_cb);
}
static uv_buf_t on_pipe_read_alloc(uv_handle_t* handle,
size_t suggested_size) {
uv_buf_t buf;
buf.base = (char*)malloc(suggested_size);
buf.len = suggested_size;
return buf;
}
static void after_pipe_write(uv_write_t* req, int status) {
ASSERT(status == 0);
after_write_called++;
}
static int stdio_over_pipes_helper() {
/* Write several buffers to test that the write order is preserved. */
char* buffers[] = {
"he",
"ll",
"o ",
"wo",
"rl",
"d",
"\n"
};
uv_write_t write_req[ARRAY_SIZE(buffers)];
uv_buf_t buf[ARRAY_SIZE(buffers)];
int r, i;
uv_loop_t* loop = uv_default_loop();
ASSERT(UV_NAMED_PIPE == uv_guess_handle(0));
ASSERT(UV_NAMED_PIPE == uv_guess_handle(1));
r = uv_pipe_init(loop, &stdin_pipe, 0);
ASSERT(r == 0);
r = uv_pipe_init(loop, &stdout_pipe, 0);
ASSERT(r == 0);
uv_pipe_open(&stdin_pipe, 0);
uv_pipe_open(&stdout_pipe, 1);
/* Unref both stdio handles to make sure that all writes complete. */
uv_unref(loop);
uv_unref(loop);
for (i = 0; i < ARRAY_SIZE(buffers); i++) {
buf[i] = uv_buf_init((char*)buffers[i], strlen(buffers[i]));
}
for (i = 0; i < ARRAY_SIZE(buffers); i++) {
r = uv_write(&write_req[i], (uv_stream_t*)&stdout_pipe, &buf[i], 1,
after_pipe_write);
ASSERT(r == 0);
}
uv_run(loop);
ASSERT(after_write_called == 7);
ASSERT(on_pipe_read_called == 0);
ASSERT(close_cb_called == 0);
uv_ref(loop);
uv_ref(loop);
r = uv_read_start((uv_stream_t*)&stdin_pipe, on_pipe_read_alloc,
on_pipe_read);
ASSERT(r == 0);
uv_run(loop);
ASSERT(after_write_called == 7);
ASSERT(on_pipe_read_called == 1);
ASSERT(close_cb_called == 2);
return 0;
}
static int maybe_run_test(int argc, char **argv) {
if (strcmp(argv[1], "--list") == 0) {
print_tests(stdout);
@ -272,10 +71,13 @@ static int maybe_run_test(int argc, char **argv) {
}
if (strcmp(argv[1], "ipc_send_recv_helper") == 0) {
int ipc_send_recv_helper(void); /* See test-ipc-send-recv.c */
return ipc_send_recv_helper();
}
if (strcmp(argv[1], "ipc_helper_tcp_connection") == 0) {
return ipc_helper_tcp_connection();
}
if (strcmp(argv[1], "stdio_over_pipes_helper") == 0) {
return stdio_over_pipes_helper();
}

3
deps/uv/test/runner.c

@ -86,6 +86,7 @@ int run_test(const char* test, int timeout, int benchmark_output) {
int i;
status = 255;
main_proc = NULL;
process_count = 0;
/* If it's a helper the user asks for, start it directly. */
@ -123,7 +124,7 @@ int run_test(const char* test, int timeout, int benchmark_output) {
uv_sleep(250);
/* Now start the test itself. */
for (main_proc = NULL, task = TASKS; task->main; task++) {
for (task = TASKS; task->main; task++) {
if (strcmp(test, task->task_name) != 0) {
continue;
}

106
deps/uv/test/test-fs-event.c

@ -27,16 +27,16 @@
static uv_fs_event_t fs_event;
static uv_timer_t timer;
static int timer_cb_called;
static int close_cb_called;
static int fs_event_cb_called;
static int timer_cb_touch_called;
static int timer_cb_called = 0;
static int close_cb_called = 0;
static int fs_event_cb_called = 0;
static int timer_cb_touch_called = 0;
static void create_dir(uv_loop_t* loop, const char* name) {
int r;
uv_fs_t req;
r = uv_fs_mkdir(loop, &req, name, 0755, NULL);
ASSERT(r == 0);
ASSERT(r == 0 || uv_last_error(loop).code == UV_EEXIST);
uv_fs_req_cleanup(&req);
}
@ -137,7 +137,7 @@ static void timer_cb_dir(uv_timer_t* handle, int status) {
static void timer_cb_file(uv_timer_t* handle, int status) {
++timer_cb_called;
if (timer_cb_called == 1) {
touch_file(handle->loop, "watch_dir/file1");
} else {
@ -291,7 +291,7 @@ TEST_IMPL(fs_event_no_callback_on_close) {
static void fs_event_fail(uv_fs_event_t* handle, const char* filename,
int events, int status) {
int events, int status) {
ASSERT(0 && "should never be called");
}
@ -328,3 +328,95 @@ TEST_IMPL(fs_event_immediate_close) {
return 0;
}
TEST_IMPL(fs_event_close_with_pending_event) {
uv_loop_t* loop;
uv_fs_t fs_req;
int r;
loop = uv_default_loop();
create_dir(loop, "watch_dir");
create_file(loop, "watch_dir/file");
r = uv_fs_event_init(loop, &fs_event, "watch_dir", fs_event_fail, 0);
ASSERT(r == 0);
/* Generate an fs event. */
touch_file(loop, "watch_dir/file");
uv_close((uv_handle_t*)&fs_event, close_cb);
uv_run(loop);
ASSERT(close_cb_called == 1);
/* Clean up */
r = uv_fs_unlink(loop, &fs_req, "watch_dir/file", NULL);
ASSERT(r == 0);
r = uv_fs_rmdir(loop, &fs_req, "watch_dir", NULL);
ASSERT(r == 0);
return 0;
}
static void fs_event_cb_close(uv_fs_event_t* handle, const char* filename,
int events, int status) {
ASSERT(status == 0);
ASSERT(fs_event_cb_called < 3);
++fs_event_cb_called;
if (fs_event_cb_called == 3) {
uv_close((uv_handle_t*) handle, close_cb);
}
}
TEST_IMPL(fs_event_close_in_callback) {
uv_loop_t* loop;
uv_fs_t fs_req;
int r;
loop = uv_default_loop();
create_dir(loop, "watch_dir");
create_file(loop, "watch_dir/file1");
create_file(loop, "watch_dir/file2");
create_file(loop, "watch_dir/file3");
create_file(loop, "watch_dir/file4");
create_file(loop, "watch_dir/file5");
r = uv_fs_event_init(loop, &fs_event, "watch_dir", fs_event_cb_close, 0);
ASSERT(r == 0);
/* Generate a couple of fs events. */
touch_file(loop, "watch_dir/file1");
touch_file(loop, "watch_dir/file2");
touch_file(loop, "watch_dir/file3");
touch_file(loop, "watch_dir/file4");
touch_file(loop, "watch_dir/file5");
uv_run(loop);
ASSERT(close_cb_called == 1);
ASSERT(fs_event_cb_called == 3);
/* Clean up */
r = uv_fs_unlink(loop, &fs_req, "watch_dir/file1", NULL);
ASSERT(r == 0);
r = uv_fs_unlink(loop, &fs_req, "watch_dir/file2", NULL);
ASSERT(r == 0);
r = uv_fs_unlink(loop, &fs_req, "watch_dir/file3", NULL);
ASSERT(r == 0);
r = uv_fs_unlink(loop, &fs_req, "watch_dir/file4", NULL);
ASSERT(r == 0);
r = uv_fs_unlink(loop, &fs_req, "watch_dir/file5", NULL);
ASSERT(r == 0);
r = uv_fs_rmdir(loop, &fs_req, "watch_dir", NULL);
ASSERT(r == 0);
return 0;
}

2
deps/uv/test/test-get-memory.c

@ -26,7 +26,7 @@ TEST_IMPL(get_memory) {
uint64_t free_mem = uv_get_free_memory();
uint64_t total_mem = uv_get_total_memory();
printf("free_mem=%llu, total_mem=%llu\n", free_mem, total_mem);
printf("free_mem=%zu, total_mem=%zu\n", (size_t)free_mem, (size_t)total_mem);
ASSERT(free_mem > 0);
ASSERT(total_mem > 0);

345
deps/uv/test/test-ipc.c

@ -27,17 +27,28 @@
static uv_pipe_t channel;
static uv_tcp_t tcp_server;
static uv_tcp_t tcp_connection;
static int exit_cb_called;
static int read2_cb_called;
static int tcp_write_cb_called;
static int tcp_read_cb_called;
static int on_pipe_read_called;
static int local_conn_accepted;
static int remote_conn_accepted;
static int tcp_server_listening;
static uv_write_t write_req;
static uv_pipe_t channel;
static uv_tcp_t tcp_server;
static uv_write_t conn_notify_req;
static int close_cb_called;
static int connection_accepted;
static int tcp_conn_read_cb_called;
static int tcp_conn_write_cb_called;
typedef struct {
uv_connect_t conn_req;
uv_write_t tcp_write_req;
uv_tcp_t conn;
} tcp_conn;
@ -49,7 +60,7 @@ static void close_server_conn_cb(uv_handle_t* handle) {
}
static void ipc_on_connection(uv_stream_t* server, int status) {
static void on_connection(uv_stream_t* server, int status) {
uv_tcp_t* conn;
int r;
@ -156,7 +167,7 @@ static void on_read(uv_pipe_t* pipe, ssize_t nread, uv_buf_t buf,
r = uv_accept((uv_stream_t*)pipe, (uv_stream_t*)&tcp_server);
ASSERT(r == 0);
r = uv_listen((uv_stream_t*)&tcp_server, 12, ipc_on_connection);
r = uv_listen((uv_stream_t*)&tcp_server, 12, on_connection);
ASSERT(r == 0);
tcp_server_listening = 1;
@ -214,32 +225,127 @@ void spawn_helper(uv_pipe_t* channel,
}
static int run_ipc_test(const char* helper) {
static void on_tcp_write(uv_write_t* req, int status) {
ASSERT(status == 0);
ASSERT(req->handle == (uv_stream_t*)&tcp_connection);
tcp_write_cb_called++;
}
static uv_buf_t on_read_alloc(uv_handle_t* handle, size_t suggested_size) {
uv_buf_t buf;
buf.base = (char*)malloc(suggested_size);
buf.len = suggested_size;
return buf;
}
static void on_tcp_read(uv_stream_t* tcp, ssize_t nread, uv_buf_t buf) {
ASSERT(nread > 0);
ASSERT(memcmp("hello again\n", buf.base, nread) == 0);
ASSERT(tcp == (uv_stream_t*)&tcp_connection);
free(buf.base);
tcp_read_cb_called++;
uv_close((uv_handle_t*)tcp, NULL);
uv_close((uv_handle_t*)&channel, NULL);
}
static void on_read_connection(uv_pipe_t* pipe, ssize_t nread, uv_buf_t buf,
uv_handle_type pending) {
int r;
uv_buf_t outbuf;
uv_err_t err;
if (nread == 0) {
/* Everything OK, but nothing read. */
free(buf.base);
return;
}
if (nread < 0) {
err = uv_last_error(pipe->loop);
if (err.code == UV_EOF) {
free(buf.base);
return;
}
printf("error recving on channel: %s\n", uv_strerror(err));
abort();
}
fprintf(stderr, "got %d bytes\n", (int)nread);
ASSERT(nread > 0 && buf.base && pending != UV_UNKNOWN_HANDLE);
read2_cb_called++;
/* Accept the pending TCP connection */
ASSERT(pending == UV_TCP);
r = uv_tcp_init(uv_default_loop(), &tcp_connection);
ASSERT(r == 0);
r = uv_accept((uv_stream_t*)pipe, (uv_stream_t*)&tcp_connection);
ASSERT(r == 0);
/* Make sure that the expected data is correctly multiplexed. */
ASSERT(memcmp("hello\n", buf.base, nread) == 0);
/* Write/read to/from the connection */
outbuf = uv_buf_init("world\n", 6);
r = uv_write(&write_req, (uv_stream_t*)&tcp_connection, &outbuf, 1,
on_tcp_write);
ASSERT(r == 0);
r = uv_read_start((uv_stream_t*)&tcp_connection, on_read_alloc, on_tcp_read);
ASSERT(r == 0);
free(buf.base);
}
static int run_ipc_test(const char* helper, uv_read2_cb read_cb) {
uv_process_t process;
int r;
spawn_helper(&channel, &process, helper);
uv_read2_start((uv_stream_t*)&channel, on_alloc, on_read);
uv_read2_start((uv_stream_t*)&channel, on_alloc, read_cb);
r = uv_run(uv_default_loop());
ASSERT(r == 0);
return 0;
}
TEST_IMPL(ipc_listen_before_write) {
int r = run_ipc_test("ipc_helper_listen_before_write", on_read);
ASSERT(local_conn_accepted == 1);
ASSERT(remote_conn_accepted == 1);
ASSERT(read2_cb_called == 1);
ASSERT(exit_cb_called == 1);
return 0;
return r;
}
TEST_IMPL(ipc_listen_before_write) {
return run_ipc_test("ipc_helper_listen_before_write");
TEST_IMPL(ipc_listen_after_write) {
int r = run_ipc_test("ipc_helper_listen_after_write", on_read);
ASSERT(local_conn_accepted == 1);
ASSERT(remote_conn_accepted == 1);
ASSERT(read2_cb_called == 1);
ASSERT(exit_cb_called == 1);
return r;
}
TEST_IMPL(ipc_listen_after_write) {
return run_ipc_test("ipc_helper_listen_after_write");
TEST_IMPL(ipc_tcp_connection) {
int r = run_ipc_test("ipc_helper_tcp_connection", on_read_connection);
ASSERT(read2_cb_called == 1);
ASSERT(tcp_write_cb_called == 1);
ASSERT(tcp_read_cb_called == 1);
ASSERT(exit_cb_called == 1);
return r;
}
@ -287,3 +393,220 @@ TEST_IMPL(listen_no_simultaneous_accepts) {
return 0;
}
#endif
/* Everything here runs in a child process. */
tcp_conn conn;
static void close_cb(uv_handle_t* handle) {
close_cb_called++;
}
static void conn_notify_write_cb(uv_write_t* req, int status) {
uv_close((uv_handle_t*)&tcp_server, close_cb);
uv_close((uv_handle_t*)&channel, close_cb);
}
static void tcp_connection_write_cb(uv_write_t* req, int status) {
ASSERT((uv_handle_t*)&conn.conn == (uv_handle_t*)req->handle);
uv_close((uv_handle_t*)req->handle, close_cb);
uv_close((uv_handle_t*)&channel, close_cb);
uv_close((uv_handle_t*)&tcp_server, close_cb);
tcp_conn_write_cb_called++;
}
static void on_tcp_child_process_read(uv_stream_t* tcp, ssize_t nread, uv_buf_t buf) {
uv_buf_t outbuf;
int r;
if (nread < 0) {
if (uv_last_error(tcp->loop).code == UV_EOF) {
free(buf.base);
return;
}
printf("error recving on tcp connection: %s\n",
uv_strerror(uv_last_error(tcp->loop)));
abort();
}
ASSERT(nread > 0);
ASSERT(memcmp("world\n", buf.base, nread) == 0);
on_pipe_read_called++;
free(buf.base);
/* Write to the socket */
outbuf = uv_buf_init("hello again\n", 12);
r = uv_write(&conn.tcp_write_req, tcp, &outbuf, 1, tcp_connection_write_cb);
ASSERT(r == 0);
tcp_conn_read_cb_called++;
}
static void connect_child_process_cb(uv_connect_t* req, int status) {
int r;
ASSERT(status == 0);
r = uv_read_start(req->handle, on_read_alloc, on_tcp_child_process_read);
ASSERT(r == 0);
}
static void ipc_on_connection(uv_stream_t* server, int status) {
int r;
uv_buf_t buf;
if (!connection_accepted) {
/*
* Accept the connection and close it. Also let the other
* side know.
*/
ASSERT(status == 0);
ASSERT((uv_stream_t*)&tcp_server == server);
r = uv_tcp_init(server->loop, &conn.conn);
ASSERT(r == 0);
r = uv_accept(server, (uv_stream_t*)&conn.conn);
ASSERT(r == 0);
uv_close((uv_handle_t*)&conn.conn, close_cb);
buf = uv_buf_init("accepted_connection\n", 20);
r = uv_write2(&conn_notify_req, (uv_stream_t*)&channel, &buf, 1,
NULL, conn_notify_write_cb);
ASSERT(r == 0);
connection_accepted = 1;
}
}
static void ipc_on_connection_tcp_conn(uv_stream_t* server, int status) {
int r;
uv_buf_t buf;
uv_tcp_t* conn;
ASSERT(status == 0);
ASSERT((uv_stream_t*)&tcp_server == server);
conn = malloc(sizeof(*conn));
ASSERT(conn);
r = uv_tcp_init(server->loop, conn);
ASSERT(r == 0);
r = uv_accept(server, (uv_stream_t*)conn);
ASSERT(r == 0);
/* Send the accepted connection to the other process */
buf = uv_buf_init("hello\n", 6);
r = uv_write2(&conn_notify_req, (uv_stream_t*)&channel, &buf, 1,
(uv_stream_t*)conn, NULL);
ASSERT(r == 0);
r = uv_read_start((uv_stream_t*)conn, on_read_alloc, on_tcp_child_process_read);
ASSERT(r == 0);
uv_close((uv_handle_t*)conn, close_cb);
}
int ipc_helper(int listen_after_write) {
/*
* This is launched from test-ipc.c. stdin is a duplex channel that we
* over which a handle will be transmitted.
*/
uv_write_t write_req;
int r;
uv_buf_t buf;
r = uv_pipe_init(uv_default_loop(), &channel, 1);
ASSERT(r == 0);
uv_pipe_open(&channel, 0);
ASSERT(uv_is_readable((uv_stream_t*) &channel));
ASSERT(uv_is_writable((uv_stream_t*) &channel));
r = uv_tcp_init(uv_default_loop(), &tcp_server);
ASSERT(r == 0);
r = uv_tcp_bind(&tcp_server, uv_ip4_addr("0.0.0.0", TEST_PORT));
ASSERT(r == 0);
if (!listen_after_write) {
r = uv_listen((uv_stream_t*)&tcp_server, 12, ipc_on_connection);
ASSERT(r == 0);
}
buf = uv_buf_init("hello\n", 6);
r = uv_write2(&write_req, (uv_stream_t*)&channel, &buf, 1,
(uv_stream_t*)&tcp_server, NULL);
ASSERT(r == 0);
if (listen_after_write) {
r = uv_listen((uv_stream_t*)&tcp_server, 12, ipc_on_connection);
ASSERT(r == 0);
}
r = uv_run(uv_default_loop());
ASSERT(r == 0);
ASSERT(connection_accepted == 1);
ASSERT(close_cb_called == 3);
return 0;
}
int ipc_helper_tcp_connection() {
/*
* This is launched from test-ipc.c. stdin is a duplex channel that we
* over which a handle will be transmitted.
*/
int r;
struct sockaddr_in addr;
r = uv_pipe_init(uv_default_loop(), &channel, 1);
ASSERT(r == 0);
uv_pipe_open(&channel, 0);
ASSERT(uv_is_readable((uv_stream_t*)&channel));
ASSERT(uv_is_writable((uv_stream_t*)&channel));
r = uv_tcp_init(uv_default_loop(), &tcp_server);
ASSERT(r == 0);
r = uv_tcp_bind(&tcp_server, uv_ip4_addr("0.0.0.0", TEST_PORT));
ASSERT(r == 0);
r = uv_listen((uv_stream_t*)&tcp_server, 12, ipc_on_connection_tcp_conn);
ASSERT(r == 0);
/* Make a connection to the server */
r = uv_tcp_init(uv_default_loop(), &conn.conn);
ASSERT(r == 0);
addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
r = uv_tcp_connect(&conn.conn_req, (uv_tcp_t*)&conn.conn, addr, connect_child_process_cb);
ASSERT(r == 0);
r = uv_run(uv_default_loop());
ASSERT(r == 0);
ASSERT(tcp_conn_read_cb_called == 1);
ASSERT(tcp_conn_write_cb_called == 1);
ASSERT(close_cb_called == 4);
return 0;
}

6
deps/uv/test/test-list.h

@ -26,6 +26,7 @@ TEST_DECLARE (ipc_listen_before_write)
TEST_DECLARE (ipc_listen_after_write)
TEST_DECLARE (ipc_send_recv_pipe)
TEST_DECLARE (ipc_send_recv_tcp)
TEST_DECLARE (ipc_tcp_connection)
TEST_DECLARE (tcp_ping_pong)
TEST_DECLARE (tcp_ping_pong_v6)
TEST_DECLARE (pipe_ping_pong)
@ -138,6 +139,8 @@ TEST_DECLARE (fs_event_watch_file)
TEST_DECLARE (fs_event_watch_file_current_dir)
TEST_DECLARE (fs_event_no_callback_on_close)
TEST_DECLARE (fs_event_immediate_close)
TEST_DECLARE (fs_event_close_with_pending_event)
TEST_DECLARE (fs_event_close_in_callback);
TEST_DECLARE (fs_readdir_empty_dir)
TEST_DECLARE (fs_readdir_file)
TEST_DECLARE (fs_open_dir)
@ -178,6 +181,7 @@ TASK_LIST_START
TEST_ENTRY (ipc_listen_after_write)
TEST_ENTRY (ipc_send_recv_pipe)
TEST_ENTRY (ipc_send_recv_tcp)
TEST_ENTRY (ipc_tcp_connection)
TEST_ENTRY (tcp_ping_pong)
TEST_HELPER (tcp_ping_pong, tcp4_echo_server)
@ -339,6 +343,8 @@ TASK_LIST_START
TEST_ENTRY (fs_event_watch_file_current_dir)
TEST_ENTRY (fs_event_no_callback_on_close)
TEST_ENTRY (fs_event_immediate_close)
TEST_ENTRY (fs_event_close_with_pending_event)
TEST_ENTRY (fs_event_close_in_callback)
TEST_ENTRY (fs_readdir_empty_dir)
TEST_ENTRY (fs_readdir_file)
TEST_ENTRY (fs_open_dir)

12
deps/uv/test/test-platform-output.c

@ -40,7 +40,7 @@ TEST_IMPL(platform_output) {
err = uv_resident_set_memory(&rss);
ASSERT(UV_OK == err.code);
fprintf(stderr, "uv_resident_set_memory: %d\n", rss);
fprintf(stderr, "uv_resident_set_memory: %zu\n", rss);
err = uv_uptime(&uptime);
ASSERT(UV_OK == err.code);
@ -53,11 +53,11 @@ TEST_IMPL(platform_output) {
for (i = 0; i < count; i++) {
fprintf(stderr, " model: %s\n", cpus[i].model);
fprintf(stderr, " speed: %d\n", cpus[i].speed);
fprintf(stderr, " times.sys: %llu\n", cpus[i].cpu_times.sys);
fprintf(stderr, " times.user: %llu\n", cpus[i].cpu_times.user);
fprintf(stderr, " times.idle: %llu\n", cpus[i].cpu_times.idle);
fprintf(stderr, " times.irq: %llu\n", cpus[i].cpu_times.irq);
fprintf(stderr, " times.nice: %llu\n", cpus[i].cpu_times.nice);
fprintf(stderr, " times.sys: %zu\n", (size_t)cpus[i].cpu_times.sys);
fprintf(stderr, " times.user: %zu\n", (size_t)cpus[i].cpu_times.user);
fprintf(stderr, " times.idle: %zu\n", (size_t)cpus[i].cpu_times.idle);
fprintf(stderr, " times.irq: %zu\n", (size_t)cpus[i].cpu_times.irq);
fprintf(stderr, " times.nice: %zu\n", (size_t)cpus[i].cpu_times.nice);
}
uv_free_cpu_info(cpus, count);

99
deps/uv/test/test-stdio-over-pipes.c

@ -40,6 +40,7 @@ static uv_loop_t* loop;
static char output[OUTPUT_SIZE];
static int output_used;
typedef struct {
uv_write_t req;
uv_buf_t buf;
@ -155,3 +156,101 @@ TEST_IMPL(stdio_over_pipes) {
return 0;
}
/* Everything here runs in a child process. */
static int on_pipe_read_called;
static int after_write_called;
static uv_pipe_t stdin_pipe;
static uv_pipe_t stdout_pipe;
static void on_pipe_read(uv_stream_t* tcp, ssize_t nread, uv_buf_t buf) {
ASSERT(nread > 0);
ASSERT(memcmp("hello world\n", buf.base, nread) == 0);
on_pipe_read_called++;
free(buf.base);
uv_close((uv_handle_t*)&stdin_pipe, close_cb);
uv_close((uv_handle_t*)&stdout_pipe, close_cb);
}
static void after_pipe_write(uv_write_t* req, int status) {
ASSERT(status == 0);
after_write_called++;
}
static uv_buf_t on_read_alloc(uv_handle_t* handle,
size_t suggested_size) {
uv_buf_t buf;
buf.base = (char*)malloc(suggested_size);
buf.len = suggested_size;
return buf;
}
int stdio_over_pipes_helper() {
/* Write several buffers to test that the write order is preserved. */
char* buffers[] = {
"he",
"ll",
"o ",
"wo",
"rl",
"d",
"\n"
};
uv_write_t write_req[ARRAY_SIZE(buffers)];
uv_buf_t buf[ARRAY_SIZE(buffers)];
int r, i;
uv_loop_t* loop = uv_default_loop();
ASSERT(UV_NAMED_PIPE == uv_guess_handle(0));
ASSERT(UV_NAMED_PIPE == uv_guess_handle(1));
r = uv_pipe_init(loop, &stdin_pipe, 0);
ASSERT(r == 0);
r = uv_pipe_init(loop, &stdout_pipe, 0);
ASSERT(r == 0);
uv_pipe_open(&stdin_pipe, 0);
uv_pipe_open(&stdout_pipe, 1);
/* Unref both stdio handles to make sure that all writes complete. */
uv_unref(loop);
uv_unref(loop);
for (i = 0; i < ARRAY_SIZE(buffers); i++) {
buf[i] = uv_buf_init((char*)buffers[i], strlen(buffers[i]));
}
for (i = 0; i < ARRAY_SIZE(buffers); i++) {
r = uv_write(&write_req[i], (uv_stream_t*)&stdout_pipe, &buf[i], 1,
after_pipe_write);
ASSERT(r == 0);
}
uv_run(loop);
ASSERT(after_write_called == 7);
ASSERT(on_pipe_read_called == 0);
ASSERT(close_cb_called == 0);
uv_ref(loop);
uv_ref(loop);
r = uv_read_start((uv_stream_t*)&stdin_pipe, on_read_alloc,
on_pipe_read);
ASSERT(r == 0);
uv_run(loop);
ASSERT(after_write_called == 7);
ASSERT(on_pipe_read_called == 1);
ASSERT(close_cb_called == 2);
return 0;
}

1
deps/uv/test/test-tcp-writealot.c

@ -41,7 +41,6 @@ static int write_cb_called = 0;
static int close_cb_called = 0;
static int bytes_sent = 0;
static int bytes_sent_done = 0;
static int bytes_received = 0;
static int bytes_received_done = 0;

3
deps/uv/test/test-udp-multicast-ttl.c

@ -32,10 +32,7 @@
static uv_udp_t server;
static uv_udp_t client;
static int cl_recv_cb_called;
static int sv_send_cb_called;
static int close_cb_called;

1
deps/uv/uv.gyp

@ -211,7 +211,6 @@
'sources': [ 'src/unix/darwin.c' ],
'direct_dependent_settings': {
'libraries': [
'$(SDKROOT)/System/Library/Frameworks/Carbon.framework',
'$(SDKROOT)/System/Library/Frameworks/CoreServices.framework',
],
},

Loading…
Cancel
Save