Browse Source

Upgrade libuv to e58a1abff02d7bacf89a56de9050e27690a97bc5

v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
7a5977b5d6
  1. 1
      deps/uv/AUTHORS
  2. 15
      deps/uv/README
  3. 2
      deps/uv/config-mingw.mk
  4. 2
      deps/uv/config-unix.mk
  5. 171
      deps/uv/desired-api.md
  6. 2
      deps/uv/msvs/libuv-test.vcxproj
  7. 10
      deps/uv/test/benchmark-list.h
  8. 38
      deps/uv/test/benchmark-ping-pongs.c
  9. 229
      deps/uv/test/benchmark-pump.c
  10. 7
      deps/uv/test/benchmark-sizes.c
  11. 73
      deps/uv/test/echo-server.c
  12. 6
      deps/uv/test/runner-unix.c
  13. 2
      deps/uv/test/runner-win.c
  14. 9
      deps/uv/test/runner.c
  15. 2
      deps/uv/test/runner.h
  16. 17
      deps/uv/test/test-async.c
  17. 56
      deps/uv/test/test-bind-error.c
  18. 37
      deps/uv/test/test-callback-stack.c
  19. 93
      deps/uv/test/test-connection-fail.c
  20. 64
      deps/uv/test/test-delayed-accept.c
  21. 53
      deps/uv/test/test-get-currentexe.c
  22. 9
      deps/uv/test/test-list.h
  23. 79
      deps/uv/test/test-loop-handles.c
  24. 39
      deps/uv/test/test-ping-pong.c
  25. 176
      deps/uv/test/test-shutdown-eof.c
  26. 48
      deps/uv/test/test-tcp-writealot.c
  27. 24
      deps/uv/test/test-timer-again.c
  28. 15
      deps/uv/test/test-timer.c
  29. 755
      deps/uv/uv-unix.c
  30. 47
      deps/uv/uv-unix.h
  31. 591
      deps/uv/uv-win.c
  32. 40
      deps/uv/uv-win.h
  33. 271
      deps/uv/uv.h
  34. 71
      src/node.cc
  35. 6
      src/timer_wrap.cc

1
deps/uv/AUTHORS

@ -3,3 +3,4 @@ Ryan Dahl <ryan@joyent.com>
Bert Belder <bertbelder@gmail.com> Bert Belder <bertbelder@gmail.com>
Josh Roesslein <jroesslein@gmail.com> Josh Roesslein <jroesslein@gmail.com>
Alan Gutierrez <alan@prettyrobots.com> Alan Gutierrez <alan@prettyrobots.com>
Vanilla Hsu <vanilla@fatpipi.com>

15
deps/uv/README

@ -6,7 +6,7 @@ http://nodejs.org/
(This was previously called liboio) (This was previously called liboio)
## Supported Platforms Supported Platforms:
Microsoft Windows operating systems since Windows XP sp2. It can be built Microsoft Windows operating systems since Windows XP sp2. It can be built
with either Visual Studio or MinGW. with either Visual Studio or MinGW.
@ -14,16 +14,3 @@ with either Visual Studio or MinGW.
Linux 2.6 and MacOS using the GCC toolchain. Linux 2.6 and MacOS using the GCC toolchain.
Solaris 121 and later using GCC toolchain. Solaris 121 and later using GCC toolchain.
## Design
The goal of this library is to provide high-concurrency high-performance I/O
on all operating systems. This is a large undertaking. Some of the API
decisions may seem arbitrary but are actually borne out of various specific
platform issues.
## uv_read_start(), uv_read_stop()
Originally we wanted to provide a asynchronous read function that was
similar to WSARecv().

2
deps/uv/config-mingw.mk

@ -24,7 +24,7 @@ CC = $(PREFIX)gcc
AR = $(PREFIX)ar AR = $(PREFIX)ar
E=.exe E=.exe
CFLAGS=-g --std=gnu89 CFLAGS=-g --std=gnu89 -pedantic -Wno-variadic-macros
LINKFLAGS=-lm LINKFLAGS=-lm
RUNNER_CFLAGS=$(CFLAGS) -D_GNU_SOURCE # Need _GNU_SOURCE for strdup? RUNNER_CFLAGS=$(CFLAGS) -D_GNU_SOURCE # Need _GNU_SOURCE for strdup?

2
deps/uv/config-unix.mk

@ -21,7 +21,7 @@
CC = $(PREFIX)gcc CC = $(PREFIX)gcc
AR = $(PREFIX)ar AR = $(PREFIX)ar
E= E=
CFLAGS=--std=gnu89 -g -DEV_MULTIPLICITY=0 CFLAGS=--std=gnu89 -pedantic -Wno-variadic-macros -g -DEV_MULTIPLICITY=0
LINKFLAGS=-lm LINKFLAGS=-lm
ifeq (SunOS,$(uname_S)) ifeq (SunOS,$(uname_S))

171
deps/uv/desired-api.md

@ -0,0 +1,171 @@
Warning: this is not actual API but desired API.
# `uv_handle_t`
This is the abstract base class of all types of handles. All handles have in
common:
* When handles are initialized, the reference count to the event loop is
increased by one.
* The user owns the `uv_handle_t` memory and is in charge of freeing it.
* In order to free resources associated with a handle, one must `uv_close()`
and wait for the `uv_close_cb` callback. After the close callback has been
made, the user is allowed to the `uv_handle_t` object.
* The `uv_close_cb` is always made directly off the event loop. That is, it
is not called from `uv_close()`.
# `uv_tcp_server_t`
A TCP server class that is a subclass of `uv_handle_t`. This can be bound to
an address and begin accepting new TCP sockets.
int uv_bind4(uv_tcp_server_t* tcp_server, struct sockaddr_in* address);
int uv_bind6(uv_tcp_server_t* tcp_server, struct sockaddr_in6* address);
Binds the TCP server to an address. The `address` can be created with
`uv_ip4_addr()`. Call this before `uv_listen()`
Returns zero on success, -1 on failure. Errors in order of least-seriousness:
* `UV_EADDRINUSE` There is already another socket bound to the specified
address.
* `UV_EADDRNOTAVAIL` The `address` parameter is an IP address that is not
* `UV_EINVAL` The server is already bound to an address.
* `UV_EFAULT` Memory of `address` parameter is unintelligible.
int uv_listen(uv_tcp_server_t*, int backlog, uv_connection_cb cb);
Begins listening for connections. The accept callback is level-triggered.
int uv_accept(uv_tcp_server_t* server,
uv_tcp_t* client,
uv_close_cb close_cb,
void* data);
Accepts a connection. This should be called after the accept callback is
made. The `client` parameter should be uninitialized memory; `uv_accept` is
used instead of `uv_tcp_init` for server-side `uv_tcp_t` initialization.
Return value 0 indicates success, -1 failure. Possible errors:
* `UV_EAGAIN` There are no connections. Wait for the `uv_connection_cb` callback
to be called again.
* `UV_EFAULT` The memory of either `server` is unintelligible.
# `uv_stream_t`
An abstract subclass of `uv_handle_t`. Streams represent something that
reads and/or writes data. Streams can be half or full-duplex. TCP sockets
are streams, files are streams with offsets.
int uv_read_start(uv_stream_t* stream,
uv_alloc_cb alloc_cb,
uv_read_cb read_cb);
Starts the stream reading continuously. The `alloc_cb` is used to allow the
user to implement various means of supplying the stream with buffers to
fill. The `read_cb` returns buffers to the user filled with data.
Sometimes the buffers returned to the user do not contain data. This does
not indicate EOF as in other systems. EOF is made via the `uv_eof_cb` which
can be set like this `uv_set_eof_cb(stream, eof_cb);`
int uv_read_stop(uv_stream_t* stream);
Stops reading from the stream.
int uv_write_req_init(uv_write_req_t*,
uv_stream_t*,
uv_buf_t bufs[],
int butcnf,
uv_close_cb close_cb,
void* data);
Initiates a write request on a stream.
int uv_shutdown_req_init(uv_shutdown_req_t*, uv_stream_t*)
Initiates a shutdown of outgoing data once the write queue drains.
# `uv_tcp_t`
The TCP handle class represents one endpoint of a duplex TCP stream.
`uv_tcp_t` is a subclass of `uv_stream_t`. A TCP handle can represent a
client side connection (one that has been used with uv_connect_req_init`)
or a server-side connection (one that was initialized with `uv_accept`)
int uv_connect_req_init(uv_connect_req_t* req,
uv_tcp_t* socket,
struct sockaddr* addr,
uv_close_cb close_cb,
void* data);
Initiates a request to open a connection.
# `uv_req_t`
Abstract class represents an asynchronous request. This is a subclass of `uv_handle_t`.
# `uv_connect_req_t`
Subclass of `uv_req_t`. Represents a request for a TCP connection. Operates
on `uv_tcp_t` handles. Like other types of requests the `close_cb` indicates
completion of the request.
int uv_connect_req_init(uv_connect_req_t* req,
uv_tcp_t* socket,
struct sockaddr* addr,
uv_close_cb close_cb,
void* data);
Initializes the connection request. Returning 0 indicates success, -1 if
there was an error. The following values can be retrieved from
`uv_last_error` in the case of an error:
* ???
# `uv_shutdown_req_t`
Subclass of `uv_req_t`. Represents an ongoing shutdown request. Once the
write queue of the parent `uv_stream_t` is drained, the outbound data
channel is shutdown. Once a shutdown request is initiated on a stream, the
stream will allow no more writes.
int uv_shutdown_req_init(uv_shutdown_req_t*,
uv_stream_t* parent,
uv_close_cb close_cb,
void* data);
Initializes the shutdown request.
# `uv_write_req_t`
int uv_write_req_init(uv_write_req_t*,
uv_stream_t*,
uv_buf_t bufs[],
int butcnf,
uv_close_cb close_cb,
void* data);
Initiates a write request on a stream.

2
deps/uv/msvs/libuv-test.vcxproj

@ -147,6 +147,7 @@
<ClCompile Include="..\test\test-delayed-accept.c" /> <ClCompile Include="..\test\test-delayed-accept.c" />
<ClCompile Include="..\test\test-callback-stack.c" /> <ClCompile Include="..\test\test-callback-stack.c" />
<ClCompile Include="..\test\test-connection-fail.c" /> <ClCompile Include="..\test\test-connection-fail.c" />
<ClCompile Include="..\test\test-get-currentexe.c" />
<ClCompile Include="..\test\test-fail-always.c" /> <ClCompile Include="..\test\test-fail-always.c" />
<ClCompile Include="..\test\test-loop-handles.c" /> <ClCompile Include="..\test\test-loop-handles.c" />
<ClCompile Include="..\test\test-pass-always.c" /> <ClCompile Include="..\test\test-pass-always.c" />
@ -154,6 +155,7 @@
<ClCompile Include="..\test\runner-win.c" /> <ClCompile Include="..\test\runner-win.c" />
<ClCompile Include="..\test\runner.c" /> <ClCompile Include="..\test\runner.c" />
<ClCompile Include="..\test\test-bind-error.c" /> <ClCompile Include="..\test\test-bind-error.c" />
<ClCompile Include="..\test\test-shutdown-eof.c" />
<ClCompile Include="..\test\test-tcp-writealot.c" /> <ClCompile Include="..\test\test-tcp-writealot.c" />
<ClCompile Include="..\test\test-timer-again.c" /> <ClCompile Include="..\test\test-timer-again.c" />
<ClCompile Include="..\test\test-timer.c" /> <ClCompile Include="..\test\test-timer.c" />

10
deps/uv/test/benchmark-list.h

@ -21,7 +21,9 @@
BENCHMARK_DECLARE (sizes) BENCHMARK_DECLARE (sizes)
BENCHMARK_DECLARE (ping_pongs) BENCHMARK_DECLARE (ping_pongs)
BENCHMARK_DECLARE (pump) BENCHMARK_DECLARE (pump100_client)
BENCHMARK_DECLARE (pump1_client)
HELPER_DECLARE (pump_server)
HELPER_DECLARE (echo_server) HELPER_DECLARE (echo_server)
TASK_LIST_START TASK_LIST_START
@ -30,5 +32,9 @@ TASK_LIST_START
BENCHMARK_ENTRY (ping_pongs) BENCHMARK_ENTRY (ping_pongs)
BENCHMARK_HELPER (ping_pongs, echo_server) BENCHMARK_HELPER (ping_pongs, echo_server)
BENCHMARK_ENTRY (pump) BENCHMARK_ENTRY (pump100_client)
BENCHMARK_HELPER (pump100_client, pump_server)
BENCHMARK_ENTRY (pump1_client)
BENCHMARK_HELPER (pump1_client, pump_server)
TASK_LIST_END TASK_LIST_END

38
deps/uv/test/benchmark-ping-pongs.c

@ -33,7 +33,7 @@
typedef struct { typedef struct {
int pongs; int pongs;
int state; int state;
uv_handle_t handle; uv_tcp_t tcp;
uv_req_t connect_req; uv_req_t connect_req;
uv_req_t shutdown_req; uv_req_t shutdown_req;
} pinger_t; } pinger_t;
@ -47,12 +47,12 @@ typedef struct buf_s {
static char PING[] = "PING\n"; static char PING[] = "PING\n";
static buf_t* buf_freelist = NULL; static buf_t* buf_freelist = NULL;
static int pinger_shutdown_cb_called;
static int completed_pingers = 0; static int completed_pingers = 0;
static int64_t start_time; static int64_t start_time;
static uv_buf_t buf_alloc(uv_handle_t* handle, size_t size) { static uv_buf_t buf_alloc(uv_tcp_t* tcp, size_t size) {
buf_t* ab; buf_t* ab;
ab = buf_freelist; ab = buf_freelist;
@ -107,7 +107,7 @@ static void pinger_write_ping(pinger_t* pinger) {
buf.len = strlen(PING); buf.len = strlen(PING);
req = (uv_req_t*)malloc(sizeof(*req)); req = (uv_req_t*)malloc(sizeof(*req));
uv_req_init(req, &pinger->handle, pinger_write_cb); uv_req_init(req, (uv_handle_t*)(&pinger->tcp), pinger_write_cb);
if (uv_write(req, &buf, 1)) { if (uv_write(req, &buf, 1)) {
FATAL("uv_write failed"); FATAL("uv_write failed");
@ -117,14 +117,21 @@ static void pinger_write_ping(pinger_t* pinger) {
static void pinger_shutdown_cb(uv_handle_t* handle, int status) { static void pinger_shutdown_cb(uv_handle_t* handle, int status) {
ASSERT(status == 0); ASSERT(status == 0);
pinger_shutdown_cb_called++;
/*
* The close callback has not been triggered yet. We must wait for EOF
* until we close the connection.
*/
ASSERT(completed_pingers == 0);
} }
static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { static void pinger_read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
unsigned int i; unsigned int i;
pinger_t* pinger; pinger_t* pinger;
pinger = (pinger_t*)handle->data; pinger = (pinger_t*)tcp->data;
if (nread < 0) { if (nread < 0) {
ASSERT(uv_last_error().code == UV_EOF); ASSERT(uv_last_error().code == UV_EOF);
@ -133,6 +140,9 @@ static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
buf_free(buf); buf_free(buf);
} }
ASSERT(pinger_shutdown_cb_called == 1);
uv_close((uv_handle_t*)tcp);
return; return;
} }
@ -143,10 +153,9 @@ static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
if (pinger->state == 0) { if (pinger->state == 0) {
pinger->pongs++; pinger->pongs++;
if (uv_now() - start_time > TIME) { if (uv_now() - start_time > TIME) {
uv_req_init(&pinger->shutdown_req, handle, pinger_shutdown_cb); uv_req_init(&pinger->shutdown_req, (uv_handle_t*)tcp, pinger_shutdown_cb);
uv_shutdown(&pinger->shutdown_req); uv_shutdown(&pinger->shutdown_req);
break; break;
return;
} else { } else {
pinger_write_ping(pinger); pinger_write_ping(pinger);
} }
@ -164,7 +173,7 @@ static void pinger_connect_cb(uv_req_t *req, int status) {
pinger_write_ping(pinger); pinger_write_ping(pinger);
if (uv_read_start(req->handle, pinger_read_cb)) { if (uv_read_start((uv_tcp_t*)(req->handle), buf_alloc, pinger_read_cb)) {
FATAL("uv_read_start failed"); FATAL("uv_read_start failed");
} }
} }
@ -181,21 +190,22 @@ static void pinger_new() {
pinger->pongs = 0; pinger->pongs = 0;
/* Try to connec to the server and do NUM_PINGS ping-pongs. */ /* Try to connec to the server and do NUM_PINGS ping-pongs. */
r = uv_tcp_init(&pinger->handle, pinger_close_cb, (void*)pinger); r = uv_tcp_init(&pinger->tcp, pinger_close_cb, (void*)pinger);
ASSERT(!r); ASSERT(!r);
/* We are never doing multiple reads/connects at a time anyway. */ /* We are never doing multiple reads/connects at a time anyway. */
/* so these handles can be pre-initialized. */ /* so these handles can be pre-initialized. */
uv_req_init(&pinger->connect_req, &pinger->handle, pinger_connect_cb); uv_req_init(&pinger->connect_req, (uv_handle_t*)&pinger->tcp,
pinger_connect_cb);
uv_bind(&pinger->handle, (struct sockaddr*)&client_addr); uv_bind(&pinger->tcp, client_addr);
r = uv_connect(&pinger->connect_req, (struct sockaddr*)&server_addr); r = uv_connect(&pinger->connect_req, server_addr);
ASSERT(!r); ASSERT(!r);
} }
BENCHMARK_IMPL(ping_pongs) { BENCHMARK_IMPL(ping_pongs) {
uv_init(buf_alloc); uv_init();
start_time = uv_now(); start_time = uv_now();
pinger_new(); pinger_new();

229
deps/uv/test/benchmark-pump.c

@ -26,7 +26,7 @@
#include <stdio.h> #include <stdio.h>
#define TARGET_CONNECTIONS 100 static int TARGET_CONNECTIONS;
#define WRITE_BUFFER_SIZE 8192 #define WRITE_BUFFER_SIZE 8192
#define MAX_SIMULTANEOUS_CONNECTS 100 #define MAX_SIMULTANEOUS_CONNECTS 100
@ -35,22 +35,24 @@
#define STATS_COUNT 5 #define STATS_COUNT 5
static void do_write(uv_handle_t* handle); static void do_write(uv_tcp_t*);
static void maybe_connect_some(); static void maybe_connect_some();
static uv_req_t* req_alloc(); static uv_req_t* req_alloc();
static void req_free(uv_req_t* uv_req); static void req_free(uv_req_t* uv_req);
static uv_buf_t buf_alloc(uv_handle_t* handle, size_t size); static uv_buf_t buf_alloc(uv_tcp_t*, size_t size);
static void buf_free(uv_buf_t uv_buf_t); static void buf_free(uv_buf_t uv_buf_t);
static uv_tcp_t server;
static struct sockaddr_in listen_addr; static struct sockaddr_in listen_addr;
static struct sockaddr_in connect_addr; static struct sockaddr_in connect_addr;
static int64_t start_time; static int64_t start_time;
static int max_connect_socket = 0; static int max_connect_socket = 0;
static int max_read_sockets = 0;
static int read_sockets = 0; static int read_sockets = 0;
static int write_sockets = 0; static int write_sockets = 0;
@ -63,10 +65,12 @@ static int stats_left = 0;
static char write_buffer[WRITE_BUFFER_SIZE]; static char write_buffer[WRITE_BUFFER_SIZE];
static uv_handle_t read_handles[TARGET_CONNECTIONS]; /* Make this as large as you need. */
static uv_handle_t write_handles[TARGET_CONNECTIONS]; #define MAX_WRITE_HANDLES 1000
static uv_handle_t timer_handle; static uv_tcp_t write_handles[MAX_WRITE_HANDLES];
static uv_timer_t timer_handle;
static double gbit(int64_t bytes, int64_t passed_ms) { static double gbit(int64_t bytes, int64_t passed_ms) {
@ -79,9 +83,8 @@ static void show_stats(uv_handle_t *handle, int status) {
int64_t diff; int64_t diff;
#if PRINT_STATS #if PRINT_STATS
LOGF("connections: %d, read: %.1f gbit/s, write: %.1f gbit/s\n", LOGF("connections: %d, write: %.1f gbit/s\n",
read_sockets, write_sockets,
gbit(nrecv, STATS_INTERVAL),
gbit(nsent, STATS_INTERVAL)); gbit(nsent, STATS_INTERVAL));
#endif #endif
@ -91,8 +94,8 @@ static void show_stats(uv_handle_t *handle, int status) {
uv_update_time(); uv_update_time();
diff = uv_now() - start_time; diff = uv_now() - start_time;
LOGF("pump_%d: %.1f gbit/s\n", read_sockets, LOGF("pump%d_client: %.1f gbit/s\n", write_sockets,
gbit(nrecv_total, diff)); gbit(nsent_total, diff));
exit(0); exit(0);
} }
@ -103,8 +106,38 @@ static void show_stats(uv_handle_t *handle, int status) {
} }
void close_cb(uv_handle_t* handle, int status) { static void read_show_stats() {
int64_t diff;
uv_update_time();
diff = uv_now() - start_time;
LOGF("pump%d_server: %.1f gbit/s\n", max_read_sockets,
gbit(nrecv_total, diff));
}
void write_sockets_close_cb(uv_handle_t* handle, int status) {
ASSERT(status == 0); ASSERT(status == 0);
/* If any client closes, the process is done. */
exit(0);
}
void read_sockets_close_cb(uv_handle_t* handle, int status) {
ASSERT(status == 0);
free(handle);
read_sockets--;
/* If it's past the first second and everyone has closed their connection
* Then print stats.
*/
if (uv_now() - start_time > 1000 && read_sockets == 0) {
read_show_stats();
uv_close((uv_handle_t*)&server);
}
} }
@ -114,15 +147,27 @@ static void start_stats_collection() {
/* Show-stats timer */ /* Show-stats timer */
stats_left = STATS_COUNT; stats_left = STATS_COUNT;
r = uv_timer_init(&timer_handle, close_cb, NULL); r = uv_timer_init(&timer_handle, NULL, NULL);
ASSERT(r == 0); ASSERT(r == 0);
r = uv_timer_start(&timer_handle, show_stats, STATS_INTERVAL, STATS_INTERVAL); r = uv_timer_start(&timer_handle, show_stats, STATS_INTERVAL, STATS_INTERVAL);
ASSERT(r == 0); ASSERT(r == 0);
uv_update_time();
start_time = uv_now();
} }
static void read_cb(uv_handle_t* handle, int bytes, uv_buf_t buf) { static void read_cb(uv_tcp_t* tcp, int bytes, uv_buf_t buf) {
ASSERT(bytes >= 0); if (nrecv_total == 0) {
ASSERT(start_time == 0);
uv_update_time();
start_time = uv_now();
}
if (bytes < 0) {
uv_close((uv_handle_t*)tcp);
return;
}
buf_free(buf); buf_free(buf);
@ -141,11 +186,11 @@ static void write_cb(uv_req_t *req, int status) {
nsent += sizeof write_buffer; nsent += sizeof write_buffer;
nsent_total += sizeof write_buffer; nsent_total += sizeof write_buffer;
do_write(req->handle); do_write((uv_tcp_t*)req->handle);
} }
static void do_write(uv_handle_t* handle) { static void do_write(uv_tcp_t* tcp) {
uv_req_t* req; uv_req_t* req;
uv_buf_t buf; uv_buf_t buf;
int r; int r;
@ -153,31 +198,19 @@ static void do_write(uv_handle_t* handle) {
buf.base = (char*) &write_buffer; buf.base = (char*) &write_buffer;
buf.len = sizeof write_buffer; buf.len = sizeof write_buffer;
while (handle->write_queue_size == 0) { while (tcp->write_queue_size == 0) {
req = req_alloc(); req = req_alloc();
uv_req_init(req, handle, write_cb); uv_req_init(req, (uv_handle_t*)tcp, write_cb);
r = uv_write(req, &buf, 1); r = uv_write(req, &buf, 1);
ASSERT(r == 0); ASSERT(r == 0);
} }
} }
static void maybe_start_writing() {
int i;
if (read_sockets == TARGET_CONNECTIONS &&
write_sockets == TARGET_CONNECTIONS) {
start_stats_collection();
/* Yay! start writing */
for (i = 0; i < write_sockets; i++) {
do_write(&write_handles[i]);
}
}
}
static void connect_cb(uv_req_t* req, int status) { static void connect_cb(uv_req_t* req, int status) {
int i;
if (status) LOG(uv_strerror(uv_last_error())); if (status) LOG(uv_strerror(uv_last_error()));
ASSERT(status == 0); ASSERT(status == 0);
@ -185,78 +218,55 @@ static void connect_cb(uv_req_t* req, int status) {
req_free(req); req_free(req);
maybe_connect_some(); maybe_connect_some();
maybe_start_writing();
}
static void do_connect(uv_handle_t* handle, struct sockaddr* addr) {
uv_req_t* req;
int r;
r = uv_tcp_init(handle, close_cb, NULL);
ASSERT(r == 0);
req = req_alloc();
uv_req_init(req, handle, connect_cb);
r = uv_connect(req, addr);
ASSERT(r == 0);
}
if (write_sockets == TARGET_CONNECTIONS) {
start_stats_collection();
static void maybe_connect_some() { /* Yay! start writing */
while (max_connect_socket < TARGET_CONNECTIONS && for (i = 0; i < write_sockets; i++) {
max_connect_socket < write_sockets + MAX_SIMULTANEOUS_CONNECTS) { do_write(&write_handles[i]);
do_connect(&write_handles[max_connect_socket++], }
(struct sockaddr*) &connect_addr);
} }
} }
static void accept_cb(uv_handle_t* server) { static void maybe_connect_some() {
uv_handle_t* handle; uv_req_t* req;
uv_tcp_t* tcp;
int r; int r;
ASSERT(read_sockets < TARGET_CONNECTIONS); while (max_connect_socket < TARGET_CONNECTIONS &&
handle = &read_handles[read_sockets]; max_connect_socket < write_sockets + MAX_SIMULTANEOUS_CONNECTS) {
tcp = &write_handles[max_connect_socket++];
r = uv_accept(server, handle, close_cb, NULL);
ASSERT(r == 0);
r = uv_read_start(handle, read_cb);
ASSERT(r == 0);
read_sockets++; r = uv_tcp_init(tcp, write_sockets_close_cb, NULL);
ASSERT(r == 0);
maybe_start_writing(); req = req_alloc();
uv_req_init(req, (uv_handle_t*)tcp, connect_cb);
r = uv_connect(req, connect_addr);
ASSERT(r == 0);
}
} }
BENCHMARK_IMPL(pump) { static void connection_cb(uv_tcp_t* s, int status) {
uv_handle_t server; uv_tcp_t* tcp;
int r; int r;
uv_init(buf_alloc); ASSERT(&server == s);
ASSERT(status == 0);
listen_addr = uv_ip4_addr("0.0.0.0", TEST_PORT); tcp = malloc(sizeof(uv_tcp_t));
connect_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
/* Server */ r = uv_accept(s, tcp, read_sockets_close_cb, NULL);
r = uv_tcp_init(&server, close_cb, NULL);
ASSERT(r == 0);
r = uv_bind(&server, (struct sockaddr*) &listen_addr);
ASSERT(r == 0);
r = uv_listen(&server, TARGET_CONNECTIONS, accept_cb);
ASSERT(r == 0); ASSERT(r == 0);
uv_update_time(); r = uv_read_start(tcp, buf_alloc, read_cb);
start_time = uv_now(); ASSERT(r == 0);
/* Start making connections */
maybe_connect_some();
uv_run();
return 0; read_sockets++;
max_read_sockets++;
} }
@ -308,7 +318,7 @@ typedef struct buf_list_s {
static buf_list_t* buf_freelist = NULL; static buf_list_t* buf_freelist = NULL;
static uv_buf_t buf_alloc(uv_handle_t* handle, size_t size) { static uv_buf_t buf_alloc(uv_tcp_t* tcp, size_t size) {
buf_list_t* buf; buf_list_t* buf;
buf = buf_freelist; buf = buf_freelist;
@ -331,3 +341,50 @@ static void buf_free(uv_buf_t uv_buf_t) {
buf->next = buf_freelist; buf->next = buf_freelist;
buf_freelist = buf; buf_freelist = buf;
} }
HELPER_IMPL(pump_server) {
int r;
uv_init();
listen_addr = uv_ip4_addr("0.0.0.0", TEST_PORT);
/* Server */
r = uv_tcp_init(&server, NULL, NULL);
ASSERT(r == 0);
r = uv_bind(&server, listen_addr);
ASSERT(r == 0);
r = uv_listen(&server, MAX_WRITE_HANDLES, connection_cb);
ASSERT(r == 0);
uv_run();
return 0;
}
void pump(int n) {
ASSERT(n <= MAX_WRITE_HANDLES);
TARGET_CONNECTIONS = n;
uv_init();
connect_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
/* Start making connections */
maybe_connect_some();
uv_run();
}
BENCHMARK_IMPL(pump100_client) {
pump(100);
return 0;
}
BENCHMARK_IMPL(pump1_client) {
pump(1);
return 0;
}

7
deps/uv/test/benchmark-sizes.c

@ -24,7 +24,12 @@
BENCHMARK_IMPL(sizes) { BENCHMARK_IMPL(sizes) {
LOGF("uv_handle_t: %lu bytes\n", sizeof(uv_handle_t));
LOGF("uv_req_t: %lu bytes\n", sizeof(uv_req_t)); LOGF("uv_req_t: %lu bytes\n", sizeof(uv_req_t));
LOGF("uv_tcp_t: %lu bytes\n", sizeof(uv_tcp_t));
LOGF("uv_prepare_t: %lu bytes\n", sizeof(uv_prepare_t));
LOGF("uv_check_t: %lu bytes\n", sizeof(uv_check_t));
LOGF("uv_idle_t: %lu bytes\n", sizeof(uv_idle_t));
LOGF("uv_async_t: %lu bytes\n", sizeof(uv_async_t));
LOGF("uv_timer_t: %lu bytes\n", sizeof(uv_timer_t));
return 0; return 0;
} }

73
deps/uv/test/echo-server.c

@ -31,13 +31,14 @@ typedef struct {
} write_req_t; } write_req_t;
static uv_handle_t server; static int server_closed;
static uv_tcp_t server;
static void after_write(uv_req_t* req, int status); static void after_write(uv_req_t* req, int status);
static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf); static void after_read(uv_tcp_t*, int nread, uv_buf_t buf);
static void on_close(uv_handle_t* peer, int status); static void on_close(uv_handle_t* peer, int status);
static void on_accept(uv_handle_t* handle); static void on_connection(uv_tcp_t*, int status);
static void after_write(uv_req_t* req, int status) { static void after_write(uv_req_t* req, int status) {
@ -58,11 +59,13 @@ static void after_write(uv_req_t* req, int status) {
static void after_shutdown(uv_req_t* req, int status) { static void after_shutdown(uv_req_t* req, int status) {
uv_close(req->handle);
free(req); free(req);
} }
static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf) { static void after_read(uv_tcp_t* handle, int nread, uv_buf_t buf) {
int i;
write_req_t *wr; write_req_t *wr;
uv_req_t* req; uv_req_t* req;
@ -75,7 +78,7 @@ static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf) {
} }
req = (uv_req_t*) malloc(sizeof *req); req = (uv_req_t*) malloc(sizeof *req);
uv_req_init(req, handle, after_shutdown); uv_req_init(req, (uv_handle_t*)handle, after_shutdown);
uv_shutdown(req); uv_shutdown(req);
return; return;
@ -87,9 +90,19 @@ static void after_read(uv_handle_t* handle, int nread, uv_buf_t buf) {
return; return;
} }
/* Scan for the letter Q which signals that we should quit. */
if (!server_closed) {
for (i = 0; i < nread; i++) {
if (buf.base[i] == 'Q') {
uv_close((uv_handle_t*)&server);
server_closed = 1;
}
}
}
wr = (write_req_t*) malloc(sizeof *wr); wr = (write_req_t*) malloc(sizeof *wr);
uv_req_init(&wr->req, handle, after_write); uv_req_init(&wr->req, (uv_handle_t*)handle, after_write);
wr->buf.base = buf.base; wr->buf.base = buf.base;
wr->buf.len = nread; wr->buf.len = nread;
if (uv_write(&wr->req, &wr->buf, 1)) { if (uv_write(&wr->req, &wr->buf, 1)) {
@ -105,19 +118,33 @@ static void on_close(uv_handle_t* peer, int status) {
} }
static void on_accept(uv_handle_t* server) { static uv_buf_t echo_alloc(uv_tcp_t* handle, size_t suggested_size) {
uv_handle_t* handle = (uv_handle_t*) malloc(sizeof *handle); uv_buf_t buf;
buf.base = (char*) malloc(suggested_size);
buf.len = suggested_size;
return buf;
}
if (uv_accept(server, handle, on_close, NULL)) {
FATAL("uv_accept failed");
}
uv_read_start(handle, after_read); static void on_connection(uv_tcp_t* server, int status) {
uv_tcp_t* handle;
int r;
ASSERT(status == 0);
handle = (uv_tcp_t*) malloc(sizeof *handle);
ASSERT(handle != NULL);
r = uv_accept(server, handle, on_close, NULL);
ASSERT(r == 0);
r = uv_read_start(handle, echo_alloc, after_read);
ASSERT(r == 0);
} }
static void on_server_close(uv_handle_t* handle, int status) { static void on_server_close(uv_handle_t* handle, int status) {
ASSERT(handle == &server); ASSERT(handle == (uv_handle_t*)&server);
ASSERT(status == 0); ASSERT(status == 0);
} }
@ -133,14 +160,14 @@ static int echo_start(int port) {
return 1; return 1;
} }
r = uv_bind(&server, (struct sockaddr*) &addr); r = uv_bind(&server, addr);
if (r) { if (r) {
/* TODO: Error codes */ /* TODO: Error codes */
fprintf(stderr, "Bind error\n"); fprintf(stderr, "Bind error\n");
return 1; return 1;
} }
r = uv_listen(&server, 128, on_accept); r = uv_listen(&server, 128, on_connection);
if (r) { if (r) {
/* TODO: Error codes */ /* TODO: Error codes */
fprintf(stderr, "Listen error\n"); fprintf(stderr, "Listen error\n");
@ -151,25 +178,11 @@ static int echo_start(int port) {
} }
static int echo_stop() {
return uv_close(&server);
}
static uv_buf_t echo_alloc(uv_handle_t* handle, size_t suggested_size) {
uv_buf_t buf;
buf.base = (char*) malloc(suggested_size);
buf.len = suggested_size;
return buf;
}
HELPER_IMPL(echo_server) { HELPER_IMPL(echo_server) {
uv_init(echo_alloc); uv_init();
if (echo_start(TEST_PORT)) if (echo_start(TEST_PORT))
return 1; return 1;
fprintf(stderr, "Listening!\n");
uv_run(); uv_run();
return 0; return 0;
} }

6
deps/uv/test/runner-unix.c

@ -37,10 +37,6 @@
#include <sys/select.h> #include <sys/select.h>
#include <pthread.h> #include <pthread.h>
#define PATHMAX 1024
static char executable_path[PATHMAX] = { '\0' };
#ifdef __APPLE__ #ifdef __APPLE__
#include <mach-o/dyld.h> /* _NSGetExecutablePath */ #include <mach-o/dyld.h> /* _NSGetExecutablePath */
@ -84,7 +80,7 @@ int process_start(char* name, process_info_t* p) {
p->terminated = 0; p->terminated = 0;
p->status = 0; p->status = 0;
pid_t pid = vfork(); pid_t pid = fork();
if (pid < 0) { if (pid < 0) {
perror("vfork"); perror("vfork");

2
deps/uv/test/runner-win.c

@ -47,6 +47,8 @@ void platform_init(int argc, char **argv) {
/* Disable stdio output buffering. */ /* Disable stdio output buffering. */
setvbuf(stdout, NULL, _IONBF, 0); setvbuf(stdout, NULL, _IONBF, 0);
setvbuf(stderr, NULL, _IONBF, 0); setvbuf(stderr, NULL, _IONBF, 0);
strcpy(executable_path, argv[0]);
} }

9
deps/uv/test/runner.c

@ -24,6 +24,7 @@
#include "runner.h" #include "runner.h"
#include "task.h" #include "task.h"
char executable_path[PATHMAX] = { '\0' };
/* Start a specific process declared by TEST_ENTRY or TEST_HELPER. */ /* Start a specific process declared by TEST_ENTRY or TEST_HELPER. */
/* Returns the exit code of the specific process. */ /* Returns the exit code of the specific process. */
@ -75,6 +76,9 @@ int run_task(task_entry_t *test, int timeout, int benchmark_output) {
} }
} }
/* Wait a little bit to allow servers to start. Racy. */
uv_sleep(50);
/* Start the main test process. */ /* Start the main test process. */
if (process_start(test->process_name, &processes[process_count]) == -1) { if (process_start(test->process_name, &processes[process_count]) == -1) {
snprintf((char*)&errmsg, sizeof(errmsg), "process `%s` failed to start.", snprintf((char*)&errmsg, sizeof(errmsg), "process `%s` failed to start.",
@ -117,8 +121,7 @@ finalize:
/* Show error and output from processes if the test failed. */ /* Show error and output from processes if the test failed. */
if (!success) { if (!success) {
LOG("\n=============================================================\n"); LOGF("\n`%s` failed: %s\n", test->task_name, errmsg);
LOGF("`%s` failed: %s\n", test->task_name, errmsg);
for (i = 0; i < process_count; i++) { for (i = 0; i < process_count; i++) {
switch (process_output_size(&processes[i])) { switch (process_output_size(&processes[i])) {
@ -138,7 +141,7 @@ finalize:
break; break;
} }
} }
LOG("\n"); LOG("=============================================================\n");
/* In benchmark mode show concise output from the main process. */ /* In benchmark mode show concise output from the main process. */
} else if (benchmark_output) { } else if (benchmark_output) {

2
deps/uv/test/runner.h

@ -79,6 +79,8 @@ int run_task(task_entry_t *test, int timeout, int benchmark_output);
#define TEST_HELPER HELPER_ENTRY #define TEST_HELPER HELPER_ENTRY
#define BENCHMARK_HELPER HELPER_ENTRY #define BENCHMARK_HELPER HELPER_ENTRY
#define PATHMAX 1024
extern char executable_path[PATHMAX];
/* /*
* Include platform-dependent definitions * Include platform-dependent definitions

17
deps/uv/test/test-async.c

@ -25,9 +25,9 @@
#include <stdlib.h> #include <stdlib.h>
static uv_handle_t prepare_handle; static uv_prepare_t prepare_handle;
static uv_handle_t async1_handle; static uv_async_t async1_handle;
/* static uv_handle_t async2_handle; */ /* static uv_handle_t async2_handle; */
static int prepare_cb_called = 0; static int prepare_cb_called = 0;
@ -120,15 +120,8 @@ static void close_cb(uv_handle_t* handle, int status) {
} }
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
uv_buf_t buf = {0, 0};
FATAL("alloc should not be called");
return buf;
}
static void async1_cb(uv_handle_t* handle, int status) { static void async1_cb(uv_handle_t* handle, int status) {
ASSERT(handle == &async1_handle); ASSERT(handle == (uv_handle_t*)&async1_handle);
ASSERT(status == 0); ASSERT(status == 0);
async1_cb_called++; async1_cb_called++;
@ -159,7 +152,7 @@ static void async2_cb(uv_handle_t* handle, int status) {
static void prepare_cb(uv_handle_t* handle, int status) { static void prepare_cb(uv_handle_t* handle, int status) {
int r; int r;
ASSERT(handle == &prepare_handle); ASSERT(handle == (uv_handle_t*)&prepare_handle);
ASSERT(status == 0); ASSERT(status == 0);
switch (prepare_cb_called) { switch (prepare_cb_called) {
@ -196,7 +189,7 @@ static void prepare_cb(uv_handle_t* handle, int status) {
TEST_IMPL(async) { TEST_IMPL(async) {
int r; int r;
uv_init(alloc_cb); uv_init();
r = uv_prepare_init(&prepare_handle, close_cb, NULL); r = uv_prepare_init(&prepare_handle, close_cb, NULL);
ASSERT(r == 0); ASSERT(r == 0);

56
deps/uv/test/test-bind-error.c

@ -36,28 +36,21 @@ static void close_cb(uv_handle_t* handle, int status) {
} }
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
uv_buf_t buf = {0, 0};
FATAL("alloc should not be called");
return buf;
}
TEST_IMPL(bind_error_addrinuse) { TEST_IMPL(bind_error_addrinuse) {
struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", TEST_PORT); struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", TEST_PORT);
uv_handle_t server1, server2; uv_tcp_t server1, server2;
int r; int r;
uv_init(alloc_cb); uv_init();
r = uv_tcp_init(&server1, close_cb, NULL); r = uv_tcp_init(&server1, close_cb, NULL);
ASSERT(r == 0); ASSERT(r == 0);
r = uv_bind(&server1, (struct sockaddr*) &addr); r = uv_bind(&server1, addr);
ASSERT(r == 0); ASSERT(r == 0);
r = uv_tcp_init(&server2, close_cb, NULL); r = uv_tcp_init(&server2, close_cb, NULL);
ASSERT(r == 0); ASSERT(r == 0);
r = uv_bind(&server2, (struct sockaddr*) &addr); r = uv_bind(&server2, addr);
ASSERT(r == 0); ASSERT(r == 0);
r = uv_listen(&server1, 128, NULL); r = uv_listen(&server1, 128, NULL);
@ -67,8 +60,8 @@ TEST_IMPL(bind_error_addrinuse) {
ASSERT(uv_last_error().code == UV_EADDRINUSE); ASSERT(uv_last_error().code == UV_EADDRINUSE);
uv_close(&server1); uv_close((uv_handle_t*)&server1);
uv_close(&server2); uv_close((uv_handle_t*)&server2);
uv_run(); uv_run();
@ -80,21 +73,21 @@ TEST_IMPL(bind_error_addrinuse) {
TEST_IMPL(bind_error_addrnotavail_1) { TEST_IMPL(bind_error_addrnotavail_1) {
struct sockaddr_in addr = uv_ip4_addr("127.255.255.255", TEST_PORT); struct sockaddr_in addr = uv_ip4_addr("127.255.255.255", TEST_PORT);
uv_handle_t server; uv_tcp_t server;
int r; int r;
uv_init(alloc_cb); uv_init();
r = uv_tcp_init(&server, close_cb, NULL); r = uv_tcp_init(&server, close_cb, NULL);
ASSERT(r == 0); ASSERT(r == 0);
r = uv_bind(&server, (struct sockaddr*) &addr); r = uv_bind(&server, addr);
/* It seems that Linux is broken here - bind succeeds. */ /* It seems that Linux is broken here - bind succeeds. */
if (r == -1) { if (r == -1) {
ASSERT(uv_last_error().code == UV_EADDRNOTAVAIL); ASSERT(uv_last_error().code == UV_EADDRNOTAVAIL);
} }
uv_close(&server); uv_close((uv_handle_t*)&server);
uv_run(); uv_run();
@ -106,18 +99,18 @@ TEST_IMPL(bind_error_addrnotavail_1) {
TEST_IMPL(bind_error_addrnotavail_2) { TEST_IMPL(bind_error_addrnotavail_2) {
struct sockaddr_in addr = uv_ip4_addr("4.4.4.4", TEST_PORT); struct sockaddr_in addr = uv_ip4_addr("4.4.4.4", TEST_PORT);
uv_handle_t server; uv_tcp_t server;
int r; int r;
uv_init(alloc_cb); uv_init();
r = uv_tcp_init(&server, close_cb, NULL); r = uv_tcp_init(&server, close_cb, NULL);
ASSERT(r == 0); ASSERT(r == 0);
r = uv_bind(&server, (struct sockaddr*) &addr); r = uv_bind(&server, addr);
ASSERT(r == -1); ASSERT(r == -1);
ASSERT(uv_last_error().code == UV_EADDRNOTAVAIL); ASSERT(uv_last_error().code == UV_EADDRNOTAVAIL);
uv_close(&server); uv_close((uv_handle_t*)&server);
uv_run(); uv_run();
@ -129,19 +122,22 @@ TEST_IMPL(bind_error_addrnotavail_2) {
TEST_IMPL(bind_error_fault) { TEST_IMPL(bind_error_fault) {
char garbage[] = "blah blah blah blah blah blah blah blah blah blah blah blah"; char garbage[] = "blah blah blah blah blah blah blah blah blah blah blah blah";
uv_handle_t server; struct sockaddr_in* garbage_addr;
uv_tcp_t server;
int r; int r;
uv_init(alloc_cb); garbage_addr = (struct sockaddr_in*) &garbage;
uv_init();
r = uv_tcp_init(&server, close_cb, NULL); r = uv_tcp_init(&server, close_cb, NULL);
ASSERT(r == 0); ASSERT(r == 0);
r = uv_bind(&server, (struct sockaddr*) &garbage); r = uv_bind(&server, *garbage_addr);
ASSERT(r == -1); ASSERT(r == -1);
ASSERT(uv_last_error().code == UV_EFAULT); ASSERT(uv_last_error().code == UV_EFAULT);
uv_close(&server); uv_close((uv_handle_t*)&server);
uv_run(); uv_run();
@ -155,21 +151,21 @@ TEST_IMPL(bind_error_fault) {
TEST_IMPL(bind_error_inval) { TEST_IMPL(bind_error_inval) {
struct sockaddr_in addr1 = uv_ip4_addr("0.0.0.0", TEST_PORT); struct sockaddr_in addr1 = uv_ip4_addr("0.0.0.0", TEST_PORT);
struct sockaddr_in addr2 = uv_ip4_addr("0.0.0.0", TEST_PORT_2); struct sockaddr_in addr2 = uv_ip4_addr("0.0.0.0", TEST_PORT_2);
uv_handle_t server; uv_tcp_t server;
int r; int r;
uv_init(alloc_cb); uv_init();
r = uv_tcp_init(&server, close_cb, NULL); r = uv_tcp_init(&server, close_cb, NULL);
ASSERT(r == 0); ASSERT(r == 0);
r = uv_bind(&server, (struct sockaddr*) &addr1); r = uv_bind(&server, addr1);
ASSERT(r == 0); ASSERT(r == 0);
r = uv_bind(&server, (struct sockaddr*) &addr2); r = uv_bind(&server, addr2);
ASSERT(r == -1); ASSERT(r == -1);
ASSERT(uv_last_error().code == UV_EINVAL); ASSERT(uv_last_error().code == UV_EINVAL);
uv_close(&server); uv_close((uv_handle_t*)&server);
uv_run(); uv_run();

37
deps/uv/test/test-callback-stack.c

@ -30,7 +30,8 @@
static const char MESSAGE[] = "Failure is for the weak. Everyone dies alone."; static const char MESSAGE[] = "Failure is for the weak. Everyone dies alone.";
static uv_handle_t client, timer; static uv_tcp_t client;
static uv_timer_t timer;
static uv_req_t connect_req, write_req, shutdown_req; static uv_req_t connect_req, write_req, shutdown_req;
static int nested = 0; static int nested = 0;
@ -42,6 +43,15 @@ static int bytes_received = 0;
static int shutdown_cb_called = 0; static int shutdown_cb_called = 0;
static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
uv_buf_t buf;
buf.len = size;
buf.base = (char*) malloc(size);
ASSERT(buf.base);
return buf;
}
static void close_cb(uv_handle_t* handle, int status) { static void close_cb(uv_handle_t* handle, int status) {
ASSERT(status == 0); ASSERT(status == 0);
ASSERT(nested == 0 && "close_cb must be called from a fresh stack"); ASSERT(nested == 0 && "close_cb must be called from a fresh stack");
@ -58,7 +68,7 @@ static void shutdown_cb(uv_req_t* req, int status) {
} }
static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
ASSERT(nested == 0 && "read_cb must be called from a fresh stack"); ASSERT(nested == 0 && "read_cb must be called from a fresh stack");
printf("Read. nread == %d\n", nread); printf("Read. nread == %d\n", nread);
@ -72,7 +82,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
ASSERT(uv_last_error().code == UV_EOF); ASSERT(uv_last_error().code == UV_EOF);
nested++; nested++;
if (uv_close(handle)) { if (uv_close((uv_handle_t*)tcp)) {
FATAL("uv_close failed"); FATAL("uv_close failed");
} }
nested--; nested--;
@ -88,7 +98,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
/* from a fresh stack. */ /* from a fresh stack. */
if (bytes_received == sizeof MESSAGE) { if (bytes_received == sizeof MESSAGE) {
nested++; nested++;
uv_req_init(&shutdown_req, handle, shutdown_cb); uv_req_init(&shutdown_req, (uv_handle_t*)tcp, shutdown_cb);
puts("Shutdown"); puts("Shutdown");
@ -103,14 +113,14 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
static void timer_cb(uv_handle_t* handle, int status) { static void timer_cb(uv_handle_t* handle, int status) {
int r; int r;
ASSERT(handle == &timer); ASSERT(handle == (uv_handle_t*)&timer);
ASSERT(status == 0); ASSERT(status == 0);
ASSERT(nested == 0 && "timer_cb must be called from a fresh stack"); ASSERT(nested == 0 && "timer_cb must be called from a fresh stack");
puts("Timeout complete. Now read data..."); puts("Timeout complete. Now read data...");
nested++; nested++;
if (uv_read_start(&client, read_cb)) { if (uv_read_start(&client, alloc_cb, read_cb)) {
FATAL("uv_read_start failed"); FATAL("uv_read_start failed");
} }
nested--; nested--;
@ -170,19 +180,10 @@ static void connect_cb(uv_req_t* req, int status) {
} }
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
uv_buf_t buf;
buf.len = size;
buf.base = (char*) malloc(size);
ASSERT(buf.base);
return buf;
}
TEST_IMPL(callback_stack) { TEST_IMPL(callback_stack) {
struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT); struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
uv_init(alloc_cb); uv_init();
if (uv_tcp_init(&client, &close_cb, NULL)) { if (uv_tcp_init(&client, &close_cb, NULL)) {
FATAL("uv_tcp_init failed"); FATAL("uv_tcp_init failed");
@ -191,8 +192,8 @@ TEST_IMPL(callback_stack) {
puts("Connecting..."); puts("Connecting...");
nested++; nested++;
uv_req_init(&connect_req, &client, connect_cb); uv_req_init(&connect_req, (uv_handle_t*)&client, connect_cb);
if (uv_connect(&connect_req, (struct sockaddr*) &addr)) { if (uv_connect(&connect_req, addr)) {
FATAL("uv_connect failed"); FATAL("uv_connect failed");
} }
nested--; nested--;

93
deps/uv/test/test-connection-fail.c

@ -26,11 +26,15 @@
#include <stdio.h> #include <stdio.h>
static uv_handle_t handle; static uv_tcp_t tcp;
static uv_req_t req; static uv_req_t req;
static int connect_cb_calls; static int connect_cb_calls;
static int close_cb_calls; static int close_cb_calls;
static uv_timer_t timer;
static int timer_close_cb_calls;
static int timer_cb_calls;
static void on_close(uv_handle_t* handle, int status) { static void on_close(uv_handle_t* handle, int status) {
ASSERT(status == 0); ASSERT(status == 0);
@ -38,48 +42,111 @@ static void on_close(uv_handle_t* handle, int status) {
} }
static void on_connect(uv_req_t *req, int status) { static void timer_close_cb(uv_handle_t* handle, int status) {
ASSERT(status == 0);
timer_close_cb_calls++;
}
static void timer_cb(uv_handle_t* handle, int status) {
ASSERT(status == 0);
timer_cb_calls++;
/*
* These are the important asserts. The connection callback has been made,
* but libuv hasn't automatically closed the socket. The user must
* uv_close the handle manually.
*/
ASSERT(close_cb_calls == 0);
ASSERT(connect_cb_calls == 1);
/* Close the tcp handle. */
uv_close((uv_handle_t*)&tcp);
/* Close the timer. */
uv_close(handle);
}
static void on_connect_with_close(uv_req_t *req, int status) {
ASSERT(status == -1); ASSERT(status == -1);
ASSERT(uv_last_error().code == UV_ECONNREFUSED); ASSERT(uv_last_error().code == UV_ECONNREFUSED);
connect_cb_calls++; connect_cb_calls++;
ASSERT(close_cb_calls == 0);
uv_close(req->handle); uv_close(req->handle);
} }
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) { static void on_connect_without_close(uv_req_t *req, int status) {
uv_buf_t buf = {0, 0}; ASSERT(status == -1);
FATAL("alloc should not be called"); ASSERT(uv_last_error().code == UV_ECONNREFUSED);
return buf; connect_cb_calls++;
uv_timer_start(&timer, timer_cb, 100, 0);
ASSERT(close_cb_calls == 0);
} }
TEST_IMPL(connection_fail) { void connection_fail(uv_connect_cb connect_cb) {
struct sockaddr_in client_addr, server_addr; struct sockaddr_in client_addr, server_addr;
int r; int r;
uv_init(alloc_cb);
client_addr = uv_ip4_addr("0.0.0.0", 0); client_addr = uv_ip4_addr("0.0.0.0", 0);
/* There should be no servers listening on this port. */ /* There should be no servers listening on this port. */
server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT); server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
/* Try to connec to the server and do NUM_PINGS ping-pongs. */ /* Try to connec to the server and do NUM_PINGS ping-pongs. */
r = uv_tcp_init(&handle, on_close, NULL); r = uv_tcp_init(&tcp, on_close, NULL);
ASSERT(!r); ASSERT(!r);
/* We are never doing multiple reads/connects at a time anyway. */ /* We are never doing multiple reads/connects at a time anyway. */
/* so these handles can be pre-initialized. */ /* so these handles can be pre-initialized. */
uv_req_init(&req, &handle, on_connect); uv_req_init(&req, (uv_handle_t*)&tcp, connect_cb);
uv_bind(&handle, (struct sockaddr*)&client_addr); uv_bind(&tcp, client_addr);
r = uv_connect(&req, (struct sockaddr*)&server_addr); r = uv_connect(&req, server_addr);
ASSERT(!r); ASSERT(!r);
uv_run(); uv_run();
ASSERT(connect_cb_calls == 1); ASSERT(connect_cb_calls == 1);
ASSERT(close_cb_calls == 1); ASSERT(close_cb_calls == 1);
}
/*
* This test attempts to connect to a port where no server is running. We
* expect an error.
*/
TEST_IMPL(connection_fail) {
uv_init();
connection_fail(on_connect_with_close);
ASSERT(timer_close_cb_calls == 0);
ASSERT(timer_cb_calls == 0);
return 0;
}
/*
* This test is the same as the first except it check that the close
* callback of the tcp handle hasn't been made after the failed connection
* attempt.
*/
TEST_IMPL(connection_fail_doesnt_auto_close) {
uv_init();
uv_timer_init(&timer, timer_close_cb, NULL);
connection_fail(on_connect_without_close);
ASSERT(timer_close_cb_calls == 1);
ASSERT(timer_cb_calls == 1);
return 0; return 0;
} }

64
deps/uv/test/test-delayed-accept.c

@ -27,12 +27,20 @@
static char BUFFER[1024]; static char BUFFER[1024];
static int accept_cb_called = 0; static int connection_cb_called = 0;
static int do_accept_called = 0; static int do_accept_called = 0;
static int close_cb_called = 0; static int close_cb_called = 0;
static int connect_cb_called = 0; static int connect_cb_called = 0;
static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
uv_buf_t buf;
buf.base = (char*)malloc(size);
buf.len = size;
return buf;
}
static void close_cb(uv_handle_t* handle, int status) { static void close_cb(uv_handle_t* handle, int status) {
ASSERT(handle != NULL); ASSERT(handle != NULL);
ASSERT(status == 0); ASSERT(status == 0);
@ -44,27 +52,27 @@ static void close_cb(uv_handle_t* handle, int status) {
static void do_accept(uv_handle_t* timer_handle, int status) { static void do_accept(uv_handle_t* timer_handle, int status) {
uv_handle_t* server; uv_tcp_t* server;
uv_handle_t* accepted_handle = (uv_handle_t*)malloc(sizeof *accepted_handle); uv_tcp_t* accepted_handle = (uv_tcp_t*)malloc(sizeof *accepted_handle);
int r; int r;
ASSERT(timer_handle != NULL); ASSERT(timer_handle != NULL);
ASSERT(status == 0); ASSERT(status == 0);
ASSERT(accepted_handle != NULL); ASSERT(accepted_handle != NULL);
server = (uv_handle_t*)timer_handle->data; server = (uv_tcp_t*)timer_handle->data;
r = uv_accept(server, accepted_handle, close_cb, NULL); r = uv_accept(server, accepted_handle, close_cb, NULL);
ASSERT(r == 0); ASSERT(r == 0);
do_accept_called++; do_accept_called++;
/* Immediately close the accepted handle. */ /* Immediately close the accepted handle. */
r = uv_close(accepted_handle); r = uv_close((uv_handle_t*)accepted_handle);
ASSERT(r == 0); ASSERT(r == 0);
/* After accepting the two clients close the server handle */ /* After accepting the two clients close the server handle */
if (do_accept_called == 2) { if (do_accept_called == 2) {
r = uv_close(server); r = uv_close((uv_handle_t*)server);
ASSERT(r == 0); ASSERT(r == 0);
} }
@ -74,26 +82,28 @@ static void do_accept(uv_handle_t* timer_handle, int status) {
} }
static void accept_cb(uv_handle_t* handle) { static void connection_cb(uv_tcp_t* tcp, int status) {
int r; int r;
uv_handle_t* timer_handle; uv_timer_t* timer_handle;
ASSERT(status == 0);
timer_handle = (uv_handle_t*)malloc(sizeof *timer_handle); timer_handle = (uv_timer_t*)malloc(sizeof *timer_handle);
ASSERT(timer_handle != NULL); ASSERT(timer_handle != NULL);
/* Accept the client after 1 second */ /* Accept the client after 1 second */
r = uv_timer_init(timer_handle, close_cb, (void*)handle); r = uv_timer_init(timer_handle, close_cb, (void*)tcp);
ASSERT(r == 0); ASSERT(r == 0);
r = uv_timer_start(timer_handle, do_accept, 1000, 0); r = uv_timer_start(timer_handle, do_accept, 1000, 0);
ASSERT(r == 0); ASSERT(r == 0);
accept_cb_called++; connection_cb_called++;
} }
static void start_server() { static void start_server() {
struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", TEST_PORT); struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", TEST_PORT);
uv_handle_t* server = (uv_handle_t*)malloc(sizeof *server); uv_tcp_t* server = (uv_tcp_t*)malloc(sizeof *server);
int r; int r;
ASSERT(server != NULL); ASSERT(server != NULL);
@ -101,17 +111,17 @@ static void start_server() {
r = uv_tcp_init(server, close_cb, NULL); r = uv_tcp_init(server, close_cb, NULL);
ASSERT(r == 0); ASSERT(r == 0);
r = uv_bind(server, (struct sockaddr*) &addr); r = uv_bind(server, addr);
ASSERT(r == 0); ASSERT(r == 0);
r = uv_listen(server, 128, accept_cb); r = uv_listen(server, 128, connection_cb);
ASSERT(r == 0); ASSERT(r == 0);
} }
static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
/* The server will not send anything, it should close gracefully. */ /* The server will not send anything, it should close gracefully. */
ASSERT(handle != NULL); ASSERT(tcp != NULL);
ASSERT(nread == -1); ASSERT(nread == -1);
ASSERT(uv_last_error().code == UV_EOF); ASSERT(uv_last_error().code == UV_EOF);
@ -119,7 +129,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
free(buf.base); free(buf.base);
} }
uv_close(handle); uv_close((uv_handle_t*)tcp);
} }
@ -131,7 +141,7 @@ static void connect_cb(uv_req_t* req, int status) {
/* Not that the server will send anything, but otherwise we'll never know */ /* Not that the server will send anything, but otherwise we'll never know */
/* when te server closes the connection. */ /* when te server closes the connection. */
r = uv_read_start(req->handle, read_cb); r = uv_read_start((uv_tcp_t*)(req->handle), alloc_cb, read_cb);
ASSERT(r == 0); ASSERT(r == 0);
connect_cb_called++; connect_cb_called++;
@ -142,7 +152,7 @@ static void connect_cb(uv_req_t* req, int status) {
static void client_connect() { static void client_connect() {
struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT); struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
uv_handle_t* client = (uv_handle_t*)malloc(sizeof *client); uv_tcp_t* client = (uv_tcp_t*)malloc(sizeof *client);
uv_req_t* connect_req = (uv_req_t*)malloc(sizeof *connect_req); uv_req_t* connect_req = (uv_req_t*)malloc(sizeof *connect_req);
int r; int r;
@ -152,23 +162,15 @@ static void client_connect() {
r = uv_tcp_init(client, close_cb, NULL); r = uv_tcp_init(client, close_cb, NULL);
ASSERT(r == 0); ASSERT(r == 0);
uv_req_init(connect_req, client, connect_cb); uv_req_init(connect_req, (uv_handle_t*)client, connect_cb);
r = uv_connect(connect_req, (struct sockaddr*)&addr); r = uv_connect(connect_req, addr);
ASSERT(r == 0); ASSERT(r == 0);
} }
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
uv_buf_t buf;
buf.base = (char*)malloc(size);
buf.len = size;
return buf;
}
TEST_IMPL(delayed_accept) { TEST_IMPL(delayed_accept) {
uv_init(alloc_cb); uv_init();
start_server(); start_server();
@ -177,7 +179,7 @@ TEST_IMPL(delayed_accept) {
uv_run(); uv_run();
ASSERT(accept_cb_called == 2); ASSERT(connection_cb_called == 2);
ASSERT(do_accept_called == 2); ASSERT(do_accept_called == 2);
ASSERT(connect_cb_called == 2); ASSERT(connect_cb_called == 2);
ASSERT(close_cb_called == 7); ASSERT(close_cb_called == 7);

53
deps/uv/test/test-get-currentexe.c

@ -0,0 +1,53 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "../uv.h"
#include "task.h"
#include <string.h>
#define PATHMAX 1024
extern char executable_path[];
TEST_IMPL(get_currentexe) {
char buffer[PATHMAX];
size_t size;
char* match;
int r;
size = sizeof(buffer) / sizeof(buffer[0]);
r = uv_get_exepath(buffer, &size);
ASSERT(!r);
match = strstr(buffer, executable_path);
/* Verify that the path returned from uv_get_exepath is a subdirectory of executable_path */
ASSERT(match && !strcmp(match, executable_path));
ASSERT(size == strlen(buffer));
/* Negative tests */
size = sizeof(buffer) / sizeof(buffer[0]);
r = uv_get_exepath(NULL, &size);
ASSERT(r == -1);
r = uv_get_exepath(buffer, NULL);
ASSERT(r == -1);
return 0;
}

9
deps/uv/test/test-list.h

@ -28,6 +28,8 @@ TEST_DECLARE (bind_error_addrnotavail_2)
TEST_DECLARE (bind_error_fault) TEST_DECLARE (bind_error_fault)
TEST_DECLARE (bind_error_inval) TEST_DECLARE (bind_error_inval)
TEST_DECLARE (connection_fail) TEST_DECLARE (connection_fail)
TEST_DECLARE (connection_fail_doesnt_auto_close)
TEST_DECLARE (shutdown_eof)
TEST_DECLARE (callback_stack) TEST_DECLARE (callback_stack)
TEST_DECLARE (timer) TEST_DECLARE (timer)
TEST_DECLARE (timer_again) TEST_DECLARE (timer_again)
@ -38,6 +40,7 @@ TEST_DECLARE (async_ref)
TEST_DECLARE (prepare_ref) TEST_DECLARE (prepare_ref)
TEST_DECLARE (check_ref) TEST_DECLARE (check_ref)
TEST_DECLARE (async) TEST_DECLARE (async)
TEST_DECLARE (get_currentexe)
TEST_DECLARE (fail_always) TEST_DECLARE (fail_always)
TEST_DECLARE (pass_always) TEST_DECLARE (pass_always)
HELPER_DECLARE (echo_server) HELPER_DECLARE (echo_server)
@ -62,6 +65,10 @@ TASK_LIST_START
TEST_ENTRY (bind_error_inval) TEST_ENTRY (bind_error_inval)
TEST_ENTRY (connection_fail) TEST_ENTRY (connection_fail)
TEST_ENTRY (connection_fail_doesnt_auto_close)
TEST_ENTRY (shutdown_eof)
TEST_HELPER (shutdown_eof, echo_server)
TEST_ENTRY (callback_stack) TEST_ENTRY (callback_stack)
TEST_HELPER (callback_stack, echo_server) TEST_HELPER (callback_stack, echo_server)
@ -80,6 +87,8 @@ TASK_LIST_START
TEST_ENTRY (async) TEST_ENTRY (async)
TEST_ENTRY (get_currentexe)
#if 0 #if 0
/* These are for testing the test runner. */ /* These are for testing the test runner. */
TEST_ENTRY (fail_always) TEST_ENTRY (fail_always)

79
deps/uv/test/test-loop-handles.c

@ -75,15 +75,15 @@
#define TIMEOUT 100 #define TIMEOUT 100
static uv_handle_t prepare_1_handle; static uv_prepare_t prepare_1_handle;
static uv_handle_t prepare_2_handle; static uv_prepare_t prepare_2_handle;
static uv_handle_t check_handle; static uv_check_t check_handle;
static uv_handle_t idle_1_handles[IDLE_COUNT]; static uv_idle_t idle_1_handles[IDLE_COUNT];
static uv_handle_t idle_2_handle; static uv_idle_t idle_2_handle;
static uv_handle_t timer_handle; static uv_timer_t timer_handle;
static int loop_iteration = 0; static int loop_iteration = 0;
@ -110,7 +110,7 @@ static int timer_cb_called = 0;
static void timer_cb(uv_handle_t* handle, int status) { static void timer_cb(uv_handle_t* handle, int status) {
ASSERT(handle == &timer_handle); ASSERT(handle == (uv_handle_t*)&timer_handle);
ASSERT(status == 0); ASSERT(status == 0);
timer_cb_called++; timer_cb_called++;
@ -127,7 +127,7 @@ static void idle_2_cb(uv_handle_t* handle, int status) {
LOG("IDLE_2_CB\n"); LOG("IDLE_2_CB\n");
ASSERT(handle == &idle_2_handle); ASSERT(handle == (uv_handle_t*)&idle_2_handle);
ASSERT(status == 0); ASSERT(status == 0);
idle_2_cb_called++; idle_2_cb_called++;
@ -140,7 +140,7 @@ static void idle_2_cb(uv_handle_t* handle, int status) {
static void idle_2_close_cb(uv_handle_t* handle, int status){ static void idle_2_close_cb(uv_handle_t* handle, int status){
LOG("IDLE_2_CLOSE_CB\n"); LOG("IDLE_2_CLOSE_CB\n");
ASSERT(handle == &idle_2_handle); ASSERT(handle == (uv_handle_t*)&idle_2_handle);
ASSERT(status == 0); ASSERT(status == 0);
ASSERT(idle_2_is_active); ASSERT(idle_2_is_active);
@ -173,7 +173,7 @@ static void idle_1_cb(uv_handle_t* handle, int status) {
idle_1_cb_called++; idle_1_cb_called++;
if (idle_1_cb_called % 5 == 0) { if (idle_1_cb_called % 5 == 0) {
r = uv_idle_stop(handle); r = uv_idle_stop((uv_idle_t*)handle);
ASSERT(r == 0); ASSERT(r == 0);
idles_1_active--; idles_1_active--;
} }
@ -195,7 +195,7 @@ static void check_cb(uv_handle_t* handle, int status) {
LOG("CHECK_CB\n"); LOG("CHECK_CB\n");
ASSERT(handle == &check_handle); ASSERT(handle == (uv_handle_t*)&check_handle);
ASSERT(status == 0); ASSERT(status == 0);
/* XXX /* XXX
@ -213,22 +213,22 @@ static void check_cb(uv_handle_t* handle, int status) {
} else { } else {
/* End of the test - close all handles */ /* End of the test - close all handles */
r = uv_close(&prepare_1_handle); r = uv_close((uv_handle_t*)&prepare_1_handle);
ASSERT(r == 0); ASSERT(r == 0);
r = uv_close(&check_handle); r = uv_close((uv_handle_t*)&check_handle);
ASSERT(r == 0); ASSERT(r == 0);
r = uv_close(&prepare_2_handle); r = uv_close((uv_handle_t*)&prepare_2_handle);
ASSERT(r == 0); ASSERT(r == 0);
for (i = 0; i < IDLE_COUNT; i++) { for (i = 0; i < IDLE_COUNT; i++) {
r = uv_close(&idle_1_handles[i]); r = uv_close((uv_handle_t*)&idle_1_handles[i]);
ASSERT(r == 0); ASSERT(r == 0);
} }
/* This handle is closed/recreated every time, close it only if it is */ /* This handle is closed/recreated every time, close it only if it is */
/* active.*/ /* active.*/
if (idle_2_is_active) { if (idle_2_is_active) {
r = uv_close(&idle_2_handle); r = uv_close((uv_handle_t*)&idle_2_handle);
ASSERT(r == 0); ASSERT(r == 0);
} }
} }
@ -239,7 +239,7 @@ static void check_cb(uv_handle_t* handle, int status) {
static void check_close_cb(uv_handle_t* handle, int status){ static void check_close_cb(uv_handle_t* handle, int status){
LOG("CHECK_CLOSE_CB\n"); LOG("CHECK_CLOSE_CB\n");
ASSERT(handle == &check_handle); ASSERT(handle == (uv_handle_t*)&check_handle);
ASSERT(status == 0); ASSERT(status == 0);
check_close_cb_called++; check_close_cb_called++;
@ -251,7 +251,7 @@ static void prepare_2_cb(uv_handle_t* handle, int status) {
LOG("PREPARE_2_CB\n"); LOG("PREPARE_2_CB\n");
ASSERT(handle == &prepare_2_handle); ASSERT(handle == (uv_handle_t*)&prepare_2_handle);
ASSERT(status == 0); ASSERT(status == 0);
/* XXX ASSERT(idles_1_active == 0); */ /* XXX ASSERT(idles_1_active == 0); */
@ -263,7 +263,7 @@ static void prepare_2_cb(uv_handle_t* handle, int status) {
/* (loop_iteration % 2 == 0) cannot be true. */ /* (loop_iteration % 2 == 0) cannot be true. */
ASSERT(loop_iteration % 2 != 0); ASSERT(loop_iteration % 2 != 0);
r = uv_prepare_stop(handle); r = uv_prepare_stop((uv_prepare_t*)handle);
ASSERT(r == 0); ASSERT(r == 0);
prepare_2_cb_called++; prepare_2_cb_called++;
@ -272,7 +272,7 @@ static void prepare_2_cb(uv_handle_t* handle, int status) {
static void prepare_2_close_cb(uv_handle_t* handle, int status) { static void prepare_2_close_cb(uv_handle_t* handle, int status) {
LOG("PREPARE_2_CLOSE_CB\n"); LOG("PREPARE_2_CLOSE_CB\n");
ASSERT(handle == &prepare_2_handle); ASSERT(handle == (uv_handle_t*)&prepare_2_handle);
ASSERT(status == 0); ASSERT(status == 0);
prepare_2_close_cb_called++; prepare_2_close_cb_called++;
@ -284,7 +284,7 @@ static void prepare_1_cb(uv_handle_t* handle, int status) {
LOG("PREPARE_1_CB\n"); LOG("PREPARE_1_CB\n");
ASSERT(handle == &prepare_1_handle); ASSERT(handle == (uv_handle_t*)&prepare_1_handle);
ASSERT(status == 0); ASSERT(status == 0);
/* XXX /* XXX
@ -306,25 +306,18 @@ static void prepare_1_cb(uv_handle_t* handle, int status) {
static void prepare_1_close_cb(uv_handle_t* handle, int status){ static void prepare_1_close_cb(uv_handle_t* handle, int status){
LOG("PREPARE_1_CLOSE_CB"); LOG("PREPARE_1_CLOSE_CB");
ASSERT(handle == &prepare_1_handle); ASSERT(handle == (uv_handle_t*)&prepare_1_handle);
ASSERT(status == 0); ASSERT(status == 0);
prepare_1_close_cb_called++; prepare_1_close_cb_called++;
} }
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
uv_buf_t rv = { 0, 0 };
FATAL("alloc_cb should never be called in this test");
return rv;
}
TEST_IMPL(loop_handles) { TEST_IMPL(loop_handles) {
int i; int i;
int r; int r;
uv_init(alloc_cb); uv_init();
r = uv_prepare_init(&prepare_1_handle, prepare_1_close_cb, NULL); r = uv_prepare_init(&prepare_1_handle, prepare_1_close_cb, NULL);
ASSERT(r == 0); ASSERT(r == 0);
@ -387,50 +380,50 @@ TEST_IMPL(loop_handles) {
TEST_IMPL(ref) { TEST_IMPL(ref) {
uv_init(alloc_cb); uv_init();
uv_run(); uv_run();
return 0; return 0;
} }
TEST_IMPL(idle_ref) { TEST_IMPL(idle_ref) {
uv_handle_t h; uv_idle_t h;
uv_init(alloc_cb); uv_init();
uv_idle_init(&h, NULL, NULL); uv_idle_init(&h, NULL, NULL);
uv_idle_start(&h, NULL); uv_idle_start(&h, NULL);
uv_unref(&h); uv_unref();
uv_run(); uv_run();
return 0; return 0;
} }
TEST_IMPL(async_ref) { TEST_IMPL(async_ref) {
uv_handle_t h; uv_async_t h;
uv_init(alloc_cb); uv_init();
uv_async_init(&h, NULL, NULL, NULL); uv_async_init(&h, NULL, NULL, NULL);
uv_unref(&h); uv_unref();
uv_run(); uv_run();
return 0; return 0;
} }
TEST_IMPL(prepare_ref) { TEST_IMPL(prepare_ref) {
uv_handle_t h; uv_prepare_t h;
uv_init(alloc_cb); uv_init();
uv_prepare_init(&h, NULL, NULL); uv_prepare_init(&h, NULL, NULL);
uv_prepare_start(&h, NULL); uv_prepare_start(&h, NULL);
uv_unref(&h); uv_unref();
uv_run(); uv_run();
return 0; return 0;
} }
TEST_IMPL(check_ref) { TEST_IMPL(check_ref) {
uv_handle_t h; uv_check_t h;
uv_init(alloc_cb); uv_init();
uv_check_init(&h, NULL, NULL); uv_check_init(&h, NULL, NULL);
uv_check_start(&h, NULL); uv_check_start(&h, NULL);
uv_unref(&h); uv_unref();
uv_run(); uv_run();
return 0; return 0;
} }

39
deps/uv/test/test-ping-pong.c

@ -39,7 +39,7 @@ static char PING[] = "PING\n";
typedef struct { typedef struct {
int pongs; int pongs;
int state; int state;
uv_handle_t handle; uv_tcp_t tcp;
uv_req_t connect_req; uv_req_t connect_req;
uv_req_t read_req; uv_req_t read_req;
char read_buffer[BUFSIZE]; char read_buffer[BUFSIZE];
@ -48,6 +48,14 @@ typedef struct {
void pinger_try_read(pinger_t* pinger); void pinger_try_read(pinger_t* pinger);
static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
uv_buf_t buf;
buf.base = (char*)malloc(size);
buf.len = size;
return buf;
}
static void pinger_on_close(uv_handle_t* handle, int status) { static void pinger_on_close(uv_handle_t* handle, int status) {
pinger_t* pinger = (pinger_t*)handle->data; pinger_t* pinger = (pinger_t*)handle->data;
@ -75,7 +83,7 @@ static void pinger_write_ping(pinger_t* pinger) {
buf.len = strlen(PING); buf.len = strlen(PING);
req = (uv_req_t*)malloc(sizeof(*req)); req = (uv_req_t*)malloc(sizeof(*req));
uv_req_init(req, &pinger->handle, pinger_after_write); uv_req_init(req, (uv_handle_t*)(&pinger->tcp), pinger_after_write);
if (uv_write(req, &buf, 1)) { if (uv_write(req, &buf, 1)) {
FATAL("uv_write failed"); FATAL("uv_write failed");
@ -85,11 +93,11 @@ static void pinger_write_ping(pinger_t* pinger) {
} }
static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { static void pinger_read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
unsigned int i; unsigned int i;
pinger_t* pinger; pinger_t* pinger;
pinger = (pinger_t*)handle->data; pinger = (pinger_t*)tcp->data;
if (nread < 0) { if (nread < 0) {
ASSERT(uv_last_error().code == UV_EOF); ASSERT(uv_last_error().code == UV_EOF);
@ -100,7 +108,7 @@ static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
free(buf.base); free(buf.base);
} }
uv_close(&pinger->handle); uv_close((uv_handle_t*)(&pinger->tcp));
return; return;
} }
@ -115,7 +123,7 @@ static void pinger_read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
if (pinger->pongs < NUM_PINGS) { if (pinger->pongs < NUM_PINGS) {
pinger_write_ping(pinger); pinger_write_ping(pinger);
} else { } else {
uv_close(&pinger->handle); uv_close((uv_handle_t*)(&pinger->tcp));
return; return;
} }
} }
@ -130,7 +138,7 @@ static void pinger_on_connect(uv_req_t *req, int status) {
pinger_write_ping(pinger); pinger_write_ping(pinger);
uv_read_start(req->handle, pinger_read_cb); uv_read_start((uv_tcp_t*)(req->handle), alloc_cb, pinger_read_cb);
} }
@ -144,28 +152,21 @@ static void pinger_new() {
pinger->pongs = 0; pinger->pongs = 0;
/* Try to connec to the server and do NUM_PINGS ping-pongs. */ /* Try to connec to the server and do NUM_PINGS ping-pongs. */
r = uv_tcp_init(&pinger->handle, pinger_on_close, (void*)pinger); r = uv_tcp_init(&pinger->tcp, pinger_on_close, (void*)pinger);
ASSERT(!r); ASSERT(!r);
/* We are never doing multiple reads/connects at a time anyway. */ /* We are never doing multiple reads/connects at a time anyway. */
/* so these handles can be pre-initialized. */ /* so these handles can be pre-initialized. */
uv_req_init(&pinger->connect_req, &pinger->handle, pinger_on_connect); uv_req_init(&pinger->connect_req, (uv_handle_t*)(&pinger->tcp),
pinger_on_connect);
r = uv_connect(&pinger->connect_req, (struct sockaddr*)&server_addr); r = uv_connect(&pinger->connect_req, server_addr);
ASSERT(!r); ASSERT(!r);
} }
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
uv_buf_t buf;
buf.base = (char*)malloc(size);
buf.len = size;
return buf;
}
TEST_IMPL(ping_pong) { TEST_IMPL(ping_pong) {
uv_init(alloc_cb); uv_init();
pinger_new(); pinger_new();
uv_run(); uv_run();

176
deps/uv/test/test-shutdown-eof.c

@ -0,0 +1,176 @@
/* Copyright Joyent, Inc. and other Node contributors. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include "../uv.h"
#include "task.h"
#include <stdio.h>
#include <stdlib.h>
static uv_timer_t timer;
static uv_tcp_t tcp;
static uv_req_t connect_req, write_req, shutdown_req;
static uv_buf_t qbuf;
static int got_q;
static int got_eof;
static int called_connect_cb;
static int called_shutdown_cb;
static int called_tcp_close_cb;
static int called_timer_close_cb;
static int called_timer_cb;
static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
uv_buf_t buf;
buf.base = (char*)malloc(size);
buf.len = size;
return buf;
}
static void read_cb(uv_tcp_t* t, int nread, uv_buf_t buf) {
ASSERT(t == &tcp);
if (!got_q) {
ASSERT(nread == 1);
ASSERT(!got_eof);
ASSERT(buf.base[0] == 'Q');
free(buf.base);
got_q = 1;
puts("got Q");
} else {
ASSERT(uv_last_error().code == UV_EOF);
if (buf.base) {
free(buf.base);
}
got_eof = 1;
puts("got EOF");
}
}
static void shutdown_cb(uv_req_t *req, int status) {
ASSERT(req == &shutdown_req);
ASSERT(called_connect_cb == 1);
ASSERT(!got_eof);
ASSERT(called_tcp_close_cb == 0);
ASSERT(called_timer_close_cb == 0);
ASSERT(called_timer_cb == 0);
called_shutdown_cb++;
}
static void connect_cb(uv_req_t *req, int status) {
ASSERT(status == 0);
ASSERT(req == &connect_req);
/* Start reading from our connection so we can receive the EOF. */
uv_read_start(&tcp, alloc_cb, read_cb);
/*
* Write the letter 'Q' to gracefully kill the echo-server. This will not
* effect our connection.
*/
uv_req_init(&write_req, (uv_handle_t*)&tcp, NULL);
uv_write(&write_req, &qbuf, 1);
/* Shutdown our end of the connection. */
uv_req_init(&shutdown_req, (uv_handle_t*)&tcp, shutdown_cb);
uv_shutdown(&shutdown_req);
called_connect_cb++;
ASSERT(called_shutdown_cb == 0);
}
void tcp_close_cb(uv_handle_t* handle, int status) {
ASSERT(handle == (uv_handle_t*) &tcp);
ASSERT(called_connect_cb == 1);
ASSERT(got_q);
ASSERT(got_eof);
ASSERT(called_timer_cb == 1);
called_tcp_close_cb++;
}
void timer_close_cb(uv_handle_t* handle, int status) {
ASSERT(handle == (uv_handle_t*) &timer);
called_timer_close_cb++;
}
void timer_cb(uv_handle_t* handle, int status) {
ASSERT(handle == (uv_handle_t*) &timer);
uv_close(handle);
/*
* The most important assert of the test: we have not received
* tcp_close_cb yet.
*/
ASSERT(called_tcp_close_cb == 0);
uv_close((uv_handle_t*) &tcp);
called_timer_cb++;
}
/*
* This test has a client which connects to the echo_server and immediately
* issues a shutdown. The echo-server, in response, will also shutdown their
* connection. We check, with a timer, that libuv is not automatically
* calling uv_close when the client receives the EOF from echo-server.
*/
TEST_IMPL(shutdown_eof) {
struct sockaddr_in server_addr;
int r;
uv_init();
qbuf.base = "Q";
qbuf.len = 1;
uv_timer_init(&timer, timer_close_cb, NULL);
uv_timer_start(&timer, timer_cb, 100, 0);
server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
r = uv_tcp_init(&tcp, tcp_close_cb, NULL);
ASSERT(!r);
uv_req_init(&connect_req, (uv_handle_t*) &tcp, connect_cb);
r = uv_connect(&connect_req, server_addr);
ASSERT(!r);
uv_run();
ASSERT(called_connect_cb == 1);
ASSERT(called_shutdown_cb == 1);
ASSERT(got_eof);
ASSERT(got_q);
ASSERT(called_tcp_close_cb == 1);
ASSERT(called_timer_close_cb == 1);
ASSERT(called_timer_cb == 1);
return 0;
}

48
deps/uv/test/test-tcp-writealot.c

@ -45,6 +45,14 @@ static int bytes_received = 0;
static int bytes_received_done = 0; static int bytes_received_done = 0;
static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
uv_buf_t buf;
buf.base = (char*)malloc(size);
buf.len = size;
return buf;
}
static void close_cb(uv_handle_t* handle, int status) { static void close_cb(uv_handle_t* handle, int status) {
ASSERT(handle != NULL); ASSERT(handle != NULL);
ASSERT(status == 0); ASSERT(status == 0);
@ -56,11 +64,15 @@ static void close_cb(uv_handle_t* handle, int status) {
static void shutdown_cb(uv_req_t* req, int status) { static void shutdown_cb(uv_req_t* req, int status) {
uv_tcp_t* tcp;
ASSERT(req); ASSERT(req);
ASSERT(status == 0); ASSERT(status == 0);
tcp = (uv_tcp_t*)(req->handle);
/* The write buffer should be empty by now. */ /* The write buffer should be empty by now. */
ASSERT(req->handle->write_queue_size == 0); ASSERT(tcp->write_queue_size == 0);
/* Now we wait for the EOF */ /* Now we wait for the EOF */
shutdown_cb_called++; shutdown_cb_called++;
@ -72,8 +84,8 @@ static void shutdown_cb(uv_req_t* req, int status) {
} }
static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) { static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
ASSERT(handle != NULL); ASSERT(tcp != NULL);
if (nread < 0) { if (nread < 0) {
ASSERT(uv_last_error().code == UV_EOF); ASSERT(uv_last_error().code == UV_EOF);
@ -83,7 +95,7 @@ static void read_cb(uv_handle_t* handle, int nread, uv_buf_t buf) {
free(buf.base); free(buf.base);
} }
uv_close(handle); uv_close((uv_handle_t*)tcp);
return; return;
} }
@ -111,13 +123,13 @@ static void write_cb(uv_req_t* req, int status) {
static void connect_cb(uv_req_t* req, int status) { static void connect_cb(uv_req_t* req, int status) {
uv_buf_t send_bufs[CHUNKS_PER_WRITE]; uv_buf_t send_bufs[CHUNKS_PER_WRITE];
uv_handle_t* handle; uv_tcp_t* tcp;
int i, j, r; int i, j, r;
ASSERT(req != NULL); ASSERT(req != NULL);
ASSERT(status == 0); ASSERT(status == 0);
handle = req->handle; tcp = (uv_tcp_t*)req->handle;
connect_cb_called++; connect_cb_called++;
free(req); free(req);
@ -133,7 +145,7 @@ static void connect_cb(uv_req_t* req, int status) {
req = (uv_req_t*)malloc(sizeof *req); req = (uv_req_t*)malloc(sizeof *req);
ASSERT(req != NULL); ASSERT(req != NULL);
uv_req_init(req, handle, write_cb); uv_req_init(req, (uv_handle_t*)tcp, write_cb);
r = uv_write(req, (uv_buf_t*)&send_bufs, CHUNKS_PER_WRITE); r = uv_write(req, (uv_buf_t*)&send_bufs, CHUNKS_PER_WRITE);
ASSERT(r == 0); ASSERT(r == 0);
} }
@ -141,7 +153,7 @@ static void connect_cb(uv_req_t* req, int status) {
/* Shutdown on drain. FIXME: dealloc req? */ /* Shutdown on drain. FIXME: dealloc req? */
req = (uv_req_t*) malloc(sizeof(uv_req_t)); req = (uv_req_t*) malloc(sizeof(uv_req_t));
ASSERT(req != NULL); ASSERT(req != NULL);
uv_req_init(req, handle, shutdown_cb); uv_req_init(req, (uv_handle_t*)tcp, shutdown_cb);
r = uv_shutdown(req); r = uv_shutdown(req);
ASSERT(r == 0); ASSERT(r == 0);
@ -149,23 +161,15 @@ static void connect_cb(uv_req_t* req, int status) {
req = (uv_req_t*)malloc(sizeof *req); req = (uv_req_t*)malloc(sizeof *req);
ASSERT(req != NULL); ASSERT(req != NULL);
uv_req_init(req, handle, read_cb); uv_req_init(req, (uv_handle_t*)tcp, read_cb);
r = uv_read_start(handle, read_cb); r = uv_read_start(tcp, alloc_cb, read_cb);
ASSERT(r == 0); ASSERT(r == 0);
} }
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
uv_buf_t buf;
buf.base = (char*)malloc(size);
buf.len = size;
return buf;
}
TEST_IMPL(tcp_writealot) { TEST_IMPL(tcp_writealot) {
struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT); struct sockaddr_in addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
uv_handle_t* client = (uv_handle_t*)malloc(sizeof *client); uv_tcp_t* client = (uv_tcp_t*)malloc(sizeof *client);
uv_req_t* connect_req = (uv_req_t*)malloc(sizeof *connect_req); uv_req_t* connect_req = (uv_req_t*)malloc(sizeof *connect_req);
int r; int r;
@ -176,13 +180,13 @@ TEST_IMPL(tcp_writealot) {
ASSERT(send_buffer != NULL); ASSERT(send_buffer != NULL);
uv_init(alloc_cb); uv_init();
r = uv_tcp_init(client, close_cb, NULL); r = uv_tcp_init(client, close_cb, NULL);
ASSERT(r == 0); ASSERT(r == 0);
uv_req_init(connect_req, client, connect_cb); uv_req_init(connect_req, (uv_handle_t*)client, connect_cb);
r = uv_connect(connect_req, (struct sockaddr*)&addr); r = uv_connect(connect_req, addr);
ASSERT(r == 0); ASSERT(r == 0);
uv_run(); uv_run();

24
deps/uv/test/test-timer-again.c

@ -29,7 +29,7 @@ static int repeat_2_cb_called = 0;
static int repeat_2_cb_allowed = 0; static int repeat_2_cb_allowed = 0;
static uv_handle_t dummy, repeat_1, repeat_2; static uv_timer_t dummy, repeat_1, repeat_2;
static int64_t start_time; static int64_t start_time;
@ -45,10 +45,10 @@ static void close_cb(uv_handle_t* handle, int status) {
static void repeat_1_cb(uv_handle_t* handle, int status) { static void repeat_1_cb(uv_handle_t* handle, int status) {
int r; int r;
ASSERT(handle == &repeat_1); ASSERT(handle == (uv_handle_t*)&repeat_1);
ASSERT(status == 0); ASSERT(status == 0);
ASSERT(uv_timer_get_repeat(handle) == 50); ASSERT(uv_timer_get_repeat((uv_timer_t*)handle) == 50);
LOGF("repeat_1_cb called after %ld ms\n", (long int)(uv_now() - start_time)); LOGF("repeat_1_cb called after %ld ms\n", (long int)(uv_now() - start_time));
@ -68,7 +68,7 @@ static void repeat_1_cb(uv_handle_t* handle, int status) {
static void repeat_2_cb(uv_handle_t* handle, int status) { static void repeat_2_cb(uv_handle_t* handle, int status) {
ASSERT(handle == &repeat_2); ASSERT(handle == (uv_handle_t*) &repeat_2);
ASSERT(status == 0); ASSERT(status == 0);
ASSERT(repeat_2_cb_allowed); ASSERT(repeat_2_cb_allowed);
@ -76,31 +76,25 @@ static void repeat_2_cb(uv_handle_t* handle, int status) {
repeat_2_cb_called++; repeat_2_cb_called++;
if (uv_timer_get_repeat(handle) == 0) { if (uv_timer_get_repeat(&repeat_2) == 0) {
ASSERT(!uv_is_active(handle)); ASSERT(!uv_is_active(handle));
uv_close(handle); uv_close(handle);
return; return;
} }
LOGF("uv_timer_get_repeat %ld ms\n", (long int)uv_timer_get_repeat(handle)); LOGF("uv_timer_get_repeat %ld ms\n",
ASSERT(uv_timer_get_repeat(handle) == 100); (long int)uv_timer_get_repeat(&repeat_2));
ASSERT(uv_timer_get_repeat(&repeat_2) == 100);
/* This shouldn't take effect immediately. */ /* This shouldn't take effect immediately. */
uv_timer_set_repeat(&repeat_2, 0); uv_timer_set_repeat(&repeat_2, 0);
} }
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
uv_buf_t buf = {0, 0};
FATAL("alloc should not be called");
return buf;
}
TEST_IMPL(timer_again) { TEST_IMPL(timer_again) {
int r; int r;
uv_init(alloc_cb); uv_init();
start_time = uv_now(); start_time = uv_now();
ASSERT(0 < start_time); ASSERT(0 < start_time);

15
deps/uv/test/test-timer.c

@ -92,26 +92,19 @@ static void never_cb(uv_handle_t* handle, int status) {
} }
static uv_buf_t alloc_cb(uv_handle_t* handle, size_t size) {
uv_buf_t buf = {0, 0};
FATAL("alloc should not be called");
return buf;
}
TEST_IMPL(timer) { TEST_IMPL(timer) {
uv_handle_t *once; uv_timer_t *once;
uv_handle_t repeat, never; uv_timer_t repeat, never;
int i, r; int i, r;
uv_init(alloc_cb); uv_init();
start_time = uv_now(); start_time = uv_now();
ASSERT(0 < start_time); ASSERT(0 < start_time);
/* Let 10 timers time out in 500 ms total. */ /* Let 10 timers time out in 500 ms total. */
for (i = 0; i < 10; i++) { for (i = 0; i < 10; i++) {
once = (uv_handle_t*)malloc(sizeof(*once)); once = (uv_timer_t*)malloc(sizeof(*once));
ASSERT(once != NULL); ASSERT(once != NULL);
r = uv_timer_init(once, once_close_cb, NULL); r = uv_timer_init(once, once_close_cb, NULL);
ASSERT(r == 0); ASSERT(r == 0);

755
deps/uv/uv-unix.c

File diff suppressed because it is too large

47
deps/uv/uv-unix.h

@ -38,7 +38,7 @@ typedef struct {
} uv_buf_t; } uv_buf_t;
#define uv_req_private_fields \ #define UV_REQ_PRIVATE_FIELDS \
int write_index; \ int write_index; \
ev_timer timer; \ ev_timer timer; \
ngx_queue_t queue; \ ngx_queue_t queue; \
@ -47,33 +47,52 @@ typedef struct {
/* TODO: union or classes please! */ /* TODO: union or classes please! */
#define uv_handle_private_fields \ #define UV_HANDLE_PRIVATE_FIELDS \
int fd; \ int fd; \
int flags; \ int flags; \
ev_idle next_watcher; \ ev_idle next_watcher;
/* UV_TCP */ \
/* UV_TCP */
#define UV_TCP_PRIVATE_FIELDS \
int delayed_error; \ int delayed_error; \
uv_read_cb read_cb; \ uv_read_cb read_cb; \
uv_accept_cb accept_cb; \ uv_alloc_cb alloc_cb; \
uv_connection_cb connection_cb; \
int accepted_fd; \ int accepted_fd; \
uv_req_t *connect_req; \ uv_req_t *connect_req; \
uv_req_t *shutdown_req; \ uv_req_t *shutdown_req; \
ev_io read_watcher; \ ev_io read_watcher; \
ev_io write_watcher; \ ev_io write_watcher; \
ngx_queue_t write_queue; \ ngx_queue_t write_queue;
/* UV_PREPARE */ \ /* UV_PREPARE */ \
#define UV_PREPARE_PRIVATE_FIELDS \
ev_prepare prepare_watcher; \ ev_prepare prepare_watcher; \
uv_loop_cb prepare_cb; \ uv_loop_cb prepare_cb;
/* UV_CHECK */ \
/* UV_CHECK */
#define UV_CHECK_PRIVATE_FIELDS \
ev_check check_watcher; \ ev_check check_watcher; \
uv_loop_cb check_cb; \ uv_loop_cb check_cb;
/* UV_IDLE */ \
/* UV_IDLE */
#define UV_IDLE_PRIVATE_FIELDS \
ev_idle idle_watcher; \ ev_idle idle_watcher; \
uv_loop_cb idle_cb; \ uv_loop_cb idle_cb;
/* UV_ASYNC */ \
/* UV_ASYNC */
#define UV_ASYNC_PRIVATE_FIELDS \
ev_async async_watcher; \ ev_async async_watcher; \
uv_loop_cb async_cb; \ uv_loop_cb async_cb;
/* UV_TIMER */ \
/* UV_TIMER */
#define UV_TIMER_PRIVATE_FIELDS \
ev_timer timer_watcher; \ ev_timer timer_watcher; \
uv_loop_cb timer_cb; uv_loop_cb timer_cb;

591
deps/uv/uv-win.c

File diff suppressed because it is too large

40
deps/uv/uv-win.h

@ -41,7 +41,7 @@ typedef struct uv_buf_t {
char* base; char* base;
} uv_buf_t; } uv_buf_t;
#define uv_req_private_fields \ #define UV_REQ_PRIVATE_FIELDS \
union { \ union { \
/* Used by I/O operations */ \ /* Used by I/O operations */ \
struct { \ struct { \
@ -49,21 +49,24 @@ typedef struct uv_buf_t {
size_t queued_bytes; \ size_t queued_bytes; \
}; \ }; \
}; \ }; \
int flags; int flags; \
uv_err_t error; \
struct uv_req_s* next_req;
#define uv_tcp_connection_fields \ #define uv_tcp_connection_fields \
uv_alloc_cb alloc_cb; \
void* read_cb; \ void* read_cb; \
struct uv_req_s read_req; \ struct uv_req_s read_req; \
unsigned int write_reqs_pending; \ unsigned int write_reqs_pending; \
uv_req_t* shutdown_req; uv_req_t* shutdown_req;
#define uv_tcp_server_fields \ #define uv_tcp_server_fields \
void *accept_cb; \ void *connection_cb; \
SOCKET accept_socket; \ SOCKET accept_socket; \
struct uv_req_s accept_req; \ struct uv_req_s accept_req; \
char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32]; char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32];
#define uv_tcp_fields \ #define UV_TCP_PRIVATE_FIELDS \
unsigned int reqs_pending; \ unsigned int reqs_pending; \
union { \ union { \
SOCKET socket; \ SOCKET socket; \
@ -74,29 +77,36 @@ typedef struct uv_buf_t {
struct { uv_tcp_server_fields }; \ struct { uv_tcp_server_fields }; \
}; };
#define uv_timer_fields \ #define UV_TIMER_PRIVATE_FIELDS \
RB_ENTRY(uv_handle_s) tree_entry; \ RB_ENTRY(uv_timer_s) tree_entry; \
int64_t due; \ int64_t due; \
int64_t repeat; \ int64_t repeat; \
void* timer_cb; void* timer_cb;
#define uv_loop_fields \ #define UV_LOOP_PRIVATE_FIELDS \
uv_handle_t* loop_prev; \ uv_handle_t* loop_prev; \
uv_handle_t* loop_next; \ uv_handle_t* loop_next; \
void* loop_cb; void* loop_cb;
#define uv_async_fields \ #define UV_ASYNC_PRIVATE_FIELDS \
struct uv_req_s async_req; \ struct uv_req_s async_req; \
/* char to avoid alignment issues */ \ /* char to avoid alignment issues */ \
char volatile async_sent; char volatile async_sent;
#define uv_handle_private_fields \ #define UV_PREPARE_PRIVATE_FIELDS /* empty */
#define UV_CHECK_PRIVATE_FIELDS /* empty */
#define UV_IDLE_PRIVATE_FIELDS /* empty */
/*
* TODO: remove UV_LOOP_PRIVATE_FIELDS from UV_HANDLE_PRIVATE_FIELDS and
* use it in UV_(PREPARE|CHECK|IDLE)_PRIVATE_FIELDS instead.
*/
#define UV_HANDLE_PRIVATE_FIELDS \
uv_handle_t* endgame_next; \ uv_handle_t* endgame_next; \
unsigned int flags; \ unsigned int flags; \
uv_err_t error; \ uv_err_t error; \
union { \ UV_LOOP_PRIVATE_FIELDS
struct { uv_tcp_fields }; \
struct { uv_timer_fields }; \
struct { uv_loop_fields }; \ int uv_utf16_to_utf8(wchar_t* utf16Buffer, size_t utf16Size, char* utf8Buffer, size_t utf8Size);
struct { uv_async_fields }; \
};

271
deps/uv/uv.h

@ -33,6 +33,11 @@ extern "C" {
typedef struct uv_err_s uv_err_t; typedef struct uv_err_s uv_err_t;
typedef struct uv_handle_s uv_handle_t; typedef struct uv_handle_s uv_handle_t;
typedef struct uv_tcp_s uv_tcp_t;
typedef struct uv_timer_s uv_timer_t;
typedef struct uv_prepare_s uv_prepare_t;
typedef struct uv_check_s uv_check_t;
typedef struct uv_idle_s uv_idle_t;
typedef struct uv_req_s uv_req_t; typedef struct uv_req_s uv_req_t;
@ -51,12 +56,12 @@ typedef struct uv_req_s uv_req_t;
* In the case of uv_read_cb the uv_buf_t returned should be freed by the * In the case of uv_read_cb the uv_buf_t returned should be freed by the
* user. * user.
*/ */
typedef uv_buf_t (*uv_alloc_cb)(uv_handle_t* handle, size_t suggested_size); typedef uv_buf_t (*uv_alloc_cb)(uv_tcp_t* tcp, size_t suggested_size);
typedef void (*uv_read_cb)(uv_handle_t *handle, int nread, uv_buf_t buf); typedef void (*uv_read_cb)(uv_tcp_t* tcp, int nread, uv_buf_t buf);
typedef void (*uv_write_cb)(uv_req_t* req, int status); typedef void (*uv_write_cb)(uv_req_t* req, int status);
typedef void (*uv_connect_cb)(uv_req_t* req, int status); typedef void (*uv_connect_cb)(uv_req_t* req, int status);
typedef void (*uv_shutdown_cb)(uv_req_t* req, int status); typedef void (*uv_shutdown_cb)(uv_req_t* req, int status);
typedef void (*uv_accept_cb)(uv_handle_t* handle); typedef void (*uv_connection_cb)(uv_tcp_t* server, int status);
typedef void (*uv_close_cb)(uv_handle_t* handle, int status); typedef void (*uv_close_cb)(uv_handle_t* handle, int status);
/* TODO: do loop_cb and async_cb really need a status argument? */ /* TODO: do loop_cb and async_cb really need a status argument? */
typedef void (*uv_loop_cb)(uv_handle_t* handle, int status); typedef void (*uv_loop_cb)(uv_handle_t* handle, int status);
@ -142,70 +147,67 @@ struct uv_req_s {
void* cb; void* cb;
void* data; void* data;
/* private */ /* private */
uv_req_private_fields UV_REQ_PRIVATE_FIELDS
}; };
/*
* Initialize a request for use with uv_write, uv_shutdown, or uv_connect.
*/
void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb);
#define UV_HANDLE_FIELDS \
/* read-only */ \
uv_handle_type type; \
/* public */ \
uv_close_cb close_cb; \
void* data; \
/* private */ \
UV_HANDLE_PRIVATE_FIELDS \
/* The abstract base class of all handles. */
struct uv_handle_s { struct uv_handle_s {
/* read-only */ UV_HANDLE_FIELDS
uv_handle_type type;
/* public */
uv_close_cb close_cb;
void* data;
/* number of bytes queued for writing */
size_t write_queue_size;
/* private */
uv_handle_private_fields
}; };
/*
/* Most functions return boolean: 0 for success and -1 for failure. * Returns 1 if the prepare/check/idle handle has been started, 0 otherwise.
* On error the user should then call uv_last_error() to determine * For other handle types this always returns 1.
* the error code.
*/ */
uv_err_t uv_last_error(); int uv_is_active(uv_handle_t* handle);
char* uv_strerror(uv_err_t err);
const char* uv_err_name(uv_err_t err);
void uv_init(uv_alloc_cb alloc);
int uv_run();
/* Manually modify the event loop's reference count. Useful if the user wants /*
* to have a handle or timeout that doesn't keep the loop alive. * Request handle to be closed. close_cb will be called asynchronously after
* this call. This MUST be called on each handle before memory is released.
*/ */
void uv_ref(); int uv_close(uv_handle_t* handle);
void uv_unref();
void uv_update_time();
int64_t uv_now();
void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb);
/* /*
* TODO: * A subclass of uv_handle_t representing a TCP stream or TCP server. In the
* - uv_(pipe|pipe_tty)_handle_init * future this will probably be split into two classes - one a stream and
* - uv_bind_pipe(char* name) * the other a server.
* - uv_continuous_read(uv_handle_t* handle, uv_continuous_read_cb* cb)
* - A way to list cancelled uv_reqs after before/on uv_close_cb
*/ */
struct uv_tcp_s {
UV_HANDLE_FIELDS
size_t write_queue_size; /* number of bytes queued for writing */
UV_TCP_PRIVATE_FIELDS
};
/* TCP socket methods. int uv_tcp_init(uv_tcp_t* handle, uv_close_cb close_cb, void* data);
* Handle and callback bust be set by calling uv_req_init.
*/ int uv_bind(uv_tcp_t* handle, struct sockaddr_in);
int uv_tcp_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
int uv_bind(uv_handle_t* handle, struct sockaddr* addr); int uv_connect(uv_req_t* req, struct sockaddr_in);
int uv_connect(uv_req_t* req, struct sockaddr* addr);
int uv_shutdown(uv_req_t* req); int uv_shutdown(uv_req_t* req);
/* TCP server methods. */ int uv_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb);
int uv_listen(uv_handle_t* handle, int backlog, uv_accept_cb cb);
/* Call this after accept_cb. client does not need to be initialized. */ /* Call this after connection_cb. client does not need to be initialized. */
int uv_accept(uv_handle_t* server, uv_handle_t* client, int uv_accept(uv_tcp_t* server, uv_tcp_t* client,
uv_close_cb close_cb, void* data); uv_close_cb close_cb, void* data);
/* Read data from an incoming stream. The callback will be made several /* Read data from an incoming stream. The callback will be made several
* several times until there is no more data to read or uv_read_stop is * several times until there is no more data to read or uv_read_stop is
* called. When we've reached EOF nread will be set to -1 and the error is * called. When we've reached EOF nread will be set to -1 and the error is
@ -215,82 +217,157 @@ int uv_accept(uv_handle_t* server, uv_handle_t* client,
* eof; it happens when libuv requested a buffer through the alloc callback * eof; it happens when libuv requested a buffer through the alloc callback
* but then decided that it didn't need that buffer. * but then decided that it didn't need that buffer.
*/ */
int uv_read_start(uv_handle_t* handle, uv_read_cb cb); int uv_read_start(uv_tcp_t*, uv_alloc_cb alloc_cb, uv_read_cb read_cb);
int uv_read_stop(uv_handle_t* handle);
int uv_read_stop(uv_tcp_t*);
int uv_write(uv_req_t* req, uv_buf_t bufs[], int bufcnt); int uv_write(uv_req_t* req, uv_buf_t bufs[], int bufcnt);
/* Timer methods */
int uv_timer_init(uv_handle_t* handle, uv_close_cb close_cb, void* data); /*
int uv_timer_start(uv_handle_t* handle, uv_loop_cb cb, int64_t timeout, int64_t repeat); * Subclass of uv_handle_t. libev wrapper. Every active prepare handle gets
int uv_timer_stop(uv_handle_t* handle); * its callback called exactly once per loop iteration, just before the
* system blocks to wait for completed i/o.
*/
struct uv_prepare_s {
UV_HANDLE_FIELDS
UV_PREPARE_PRIVATE_FIELDS
};
int uv_prepare_init(uv_prepare_t* prepare, uv_close_cb close_cb, void* data);
int uv_prepare_start(uv_prepare_t* prepare, uv_loop_cb cb);
int uv_prepare_stop(uv_prepare_t* prepare);
/*
* Subclass of uv_handle_t. libev wrapper. Every active check handle gets
* its callback called exactly once per loop iteration, just after the
* system returns from blocking.
*/
struct uv_check_s {
UV_HANDLE_FIELDS
UV_CHECK_PRIVATE_FIELDS
};
int uv_check_init(uv_check_t* check, uv_close_cb close_cb, void* data);
int uv_check_start(uv_check_t* check, uv_loop_cb cb);
int uv_check_stop(uv_check_t* check);
/*
* Subclass of uv_handle_t. libev wrapper. Every active idle handle gets its
* callback called repeatedly until it is stopped. This happens after all
* other types of callbacks are processed. When there are multiple "idle"
* handles active, their callbacks are called in turn.
*/
struct uv_idle_s {
UV_HANDLE_FIELDS
UV_IDLE_PRIVATE_FIELDS
};
int uv_idle_init(uv_idle_t* idle, uv_close_cb close_cb, void* data);
int uv_idle_start(uv_idle_t* idle, uv_loop_cb cb);
int uv_idle_stop(uv_idle_t* idle);
/*
* Subclass of uv_handle_t. libev wrapper. uv_async_send wakes up the event
* loop and calls the async handle's callback There is no guarantee that
* every uv_async_send call leads to exactly one invocation of the callback;
* The only guarantee is that the callback function is called at least once
* after the call to async_send. Unlike all other libuv functions,
* uv_async_send can be called from another thread.
*/
typedef struct {
UV_HANDLE_FIELDS
UV_ASYNC_PRIVATE_FIELDS
} uv_async_t;
int uv_async_init(uv_async_t* async, uv_async_cb async_cb,
uv_close_cb close_cb, void* data);
int uv_async_send(uv_async_t* async);
/*
* Subclass of uv_handle_t. Wraps libev's ev_timer watcher. Used to get
* woken up at a specified time in the future.
*/
struct uv_timer_s {
UV_HANDLE_FIELDS
UV_TIMER_PRIVATE_FIELDS
};
int uv_timer_init(uv_timer_t* timer, uv_close_cb close_cb, void* data);
int uv_timer_start(uv_timer_t* timer, uv_loop_cb cb, int64_t timeout, int64_t repeat);
int uv_timer_stop(uv_timer_t* timer);
/* /*
* Stop the timer, and if it is repeating restart it using the repeat value * Stop the timer, and if it is repeating restart it using the repeat value
* as the timeout. If the timer has never been started before it returns -1 and * as the timeout. If the timer has never been started before it returns -1 and
* sets the error to UV_EINVAL. * sets the error to UV_EINVAL.
*/ */
int uv_timer_again(uv_handle_t* handle); int uv_timer_again(uv_timer_t* timer);
/* /*
* Set the repeat value. Note that if the repeat value is set from a timer * Set the repeat value. Note that if the repeat value is set from a timer
* callback it does not immediately take effect. If the timer was nonrepeating * callback it does not immediately take effect. If the timer was nonrepeating
* before, it will have been stopped. If it was repeating, then the old repeat * before, it will have been stopped. If it was repeating, then the old repeat
* value will have been used to schedule the next timeout. * value will have been used to schedule the next timeout.
*/ */
void uv_timer_set_repeat(uv_handle_t* handle, int64_t repeat); void uv_timer_set_repeat(uv_timer_t* timer, int64_t repeat);
int64_t uv_timer_get_repeat(uv_handle_t* handle);
/* libev wrapper. Every active prepare handle gets its callback called int64_t uv_timer_get_repeat(uv_timer_t* timer);
* exactly once per loop iteration, just before the system blocks to wait
* for completed i/o.
*/
int uv_prepare_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
int uv_prepare_start(uv_handle_t* handle, uv_loop_cb cb);
int uv_prepare_stop(uv_handle_t* handle);
/* libev wrapper. Every active check handle gets its callback called exactly
* once per loop iteration, just after the system returns from blocking.
*/
int uv_check_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
int uv_check_start(uv_handle_t* handle, uv_loop_cb cb);
int uv_check_stop(uv_handle_t* handle);
/* libev wrapper. Every active idle handle gets its callback called repeatedly until it is
* stopped. This happens after all other types of callbacks are processed.
* When there are multiple "idle" handles active, their callbacks are called
* in turn.
*/
int uv_idle_init(uv_handle_t* handle, uv_close_cb close_cb, void* data);
int uv_idle_start(uv_handle_t* handle, uv_loop_cb cb);
int uv_idle_stop(uv_handle_t* handle);
/* Returns 1 if the prepare/check/idle handle has been started, 0 otherwise. /*
* For other handle types this always returns 1. * Most functions return boolean: 0 for success and -1 for failure.
* On error the user should then call uv_last_error() to determine
* the error code.
*/ */
int uv_is_active(uv_handle_t* handle); uv_err_t uv_last_error();
char* uv_strerror(uv_err_t err);
const char* uv_err_name(uv_err_t err);
/* libev wrapper. uv_async_send wakes up the event loop and calls the async void uv_init();
* handle's callback There is no guarantee that every uv_async_send call int uv_run();
* leads to exactly one invocation of the callback; The only guarantee is
* that the callback function is called at least once after the call to
* async_send. Unlike everything else, uv_async_send can be called from
* another thread.
*
* QUESTION(ryan) Can UV_ASYNC just use uv_loop_cb? Same signature on my
* side.
*/
int uv_async_init(uv_handle_t* handle, uv_async_cb async_cb,
uv_close_cb close_cb, void* data);
int uv_async_send(uv_handle_t* handle);
/* Request handle to be closed. close_cb will be called /*
* asynchronously after this call. * Manually modify the event loop's reference count. Useful if the user wants
* to have a handle or timeout that doesn't keep the loop alive.
*/ */
int uv_close(uv_handle_t* handle); void uv_ref();
void uv_unref();
void uv_update_time();
int64_t uv_now();
/* Utility */ /* Utility */
struct sockaddr_in uv_ip4_addr(char* ip, int port); struct sockaddr_in uv_ip4_addr(char* ip, int port);
/* Gets the executable path */
int uv_get_exepath(char* buffer, size_t* size);
/* the presence of this union forces similar struct layout */
union uv_any_handle {
uv_tcp_t tcp;
uv_prepare_t prepare;
uv_check_t check;
uv_idle_t idle;
uv_async_t async;
uv_timer_t timer;
};
#ifdef __cplusplus #ifdef __cplusplus
} }
#endif #endif

71
src/node.cc

@ -114,15 +114,15 @@ static bool cov = false;
static int debug_port=5858; static int debug_port=5858;
static int max_stack_size = 0; static int max_stack_size = 0;
static uv_handle_t check_tick_watcher; static uv_check_t check_tick_watcher;
static uv_handle_t prepare_tick_watcher; static uv_prepare_t prepare_tick_watcher;
static uv_handle_t tick_spinner; static uv_idle_t tick_spinner;
static bool need_tick_cb; static bool need_tick_cb;
static Persistent<String> tick_callback_sym; static Persistent<String> tick_callback_sym;
static uv_handle_t eio_want_poll_notifier; static uv_async_t eio_want_poll_notifier;
static uv_handle_t eio_done_poll_notifier; static uv_async_t eio_done_poll_notifier;
static uv_handle_t eio_poller; static uv_idle_t eio_poller;
// Buffer for getpwnam_r(), getgrpam_r() and other misc callers; keep this // Buffer for getpwnam_r(), getgrpam_r() and other misc callers; keep this
// scoped at file-level rather than method-level to avoid excess stack usage. // scoped at file-level rather than method-level to avoid excess stack usage.
@ -135,9 +135,9 @@ static char getbuf[PATH_MAX + 1];
// //
// A rather convoluted algorithm has been devised to determine when Node is // A rather convoluted algorithm has been devised to determine when Node is
// idle. You'll have to figure it out for yourself. // idle. You'll have to figure it out for yourself.
static uv_handle_t gc_check; static uv_check_t gc_check;
static uv_handle_t gc_idle; static uv_idle_t gc_idle;
static uv_handle_t gc_timer; static uv_timer_t gc_timer;
bool need_gc; bool need_gc;
@ -151,24 +151,24 @@ static int tick_time_head;
static void CheckStatus(uv_handle_t* watcher, int status); static void CheckStatus(uv_handle_t* watcher, int status);
static void StartGCTimer () { static void StartGCTimer () {
if (!uv_is_active(&gc_timer)) { if (!uv_is_active((uv_handle_t*) &gc_timer)) {
uv_timer_start(&node::gc_timer, node::CheckStatus, 5., 5.); uv_timer_start(&node::gc_timer, node::CheckStatus, 5., 5.);
} }
} }
static void StopGCTimer () { static void StopGCTimer () {
if (uv_is_active(&gc_timer)) { if (uv_is_active((uv_handle_t*) &gc_timer)) {
uv_timer_stop(&gc_timer); uv_timer_stop(&gc_timer);
} }
} }
static void Idle(uv_handle_t* watcher, int status) { static void Idle(uv_handle_t* watcher, int status) {
assert(watcher == &gc_idle); assert((uv_idle_t*) watcher == &gc_idle);
//fprintf(stderr, "idle\n"); //fprintf(stderr, "idle\n");
if (V8::IdleNotification()) { if (V8::IdleNotification()) {
uv_idle_stop(watcher); uv_idle_stop(&gc_idle);
StopGCTimer(); StopGCTimer();
} }
} }
@ -176,7 +176,7 @@ static void Idle(uv_handle_t* watcher, int status) {
// Called directly after every call to select() (or epoll, or whatever) // Called directly after every call to select() (or epoll, or whatever)
static void Check(uv_handle_t* watcher, int status) { static void Check(uv_handle_t* watcher, int status) {
assert(watcher == &gc_check); assert((uv_check_t*) watcher == &gc_check);
tick_times[tick_time_head] = uv_now(); tick_times[tick_time_head] = uv_now();
tick_time_head = (tick_time_head + 1) % RPM_SAMPLES; tick_time_head = (tick_time_head + 1) % RPM_SAMPLES;
@ -202,7 +202,7 @@ static void Check(uv_handle_t* watcher, int status) {
static void Spin(uv_handle_t* handle, int status) { static void Spin(uv_handle_t* handle, int status) {
assert(handle == &tick_spinner); assert((uv_idle_t*) handle == &tick_spinner);
assert(status == 0); assert(status == 0);
} }
@ -215,7 +215,7 @@ static Handle<Value> NeedTickCallback(const Arguments& args) {
// there is nothing left to do in the event loop and libev will exit. The // there is nothing left to do in the event loop and libev will exit. The
// ev_prepare callback isn't called before exiting. Thus we start this // ev_prepare callback isn't called before exiting. Thus we start this
// tick_spinner to keep the event loop alive long enough to handle it. // tick_spinner to keep the event loop alive long enough to handle it.
if (!uv_is_active(&tick_spinner)) { if (!uv_is_active((uv_handle_t*) &tick_spinner)) {
uv_idle_start(&tick_spinner, Spin); uv_idle_start(&tick_spinner, Spin);
uv_ref(); uv_ref();
} }
@ -228,7 +228,7 @@ static void Tick(void) {
if (!need_tick_cb) return; if (!need_tick_cb) return;
need_tick_cb = false; need_tick_cb = false;
if (uv_is_active(&tick_spinner)) { if (uv_is_active((uv_handle_t*) &tick_spinner)) {
uv_idle_stop(&tick_spinner); uv_idle_stop(&tick_spinner);
uv_unref(); uv_unref();
} }
@ -256,27 +256,27 @@ static void Tick(void) {
static void PrepareTick(uv_handle_t* handle, int status) { static void PrepareTick(uv_handle_t* handle, int status) {
assert(handle == &prepare_tick_watcher); assert((uv_prepare_t*) handle == &prepare_tick_watcher);
assert(status == 0); assert(status == 0);
Tick(); Tick();
} }
static void CheckTick(uv_handle_t* handle, int status) { static void CheckTick(uv_handle_t* handle, int status) {
assert(handle == &check_tick_watcher); assert((uv_check_t*) handle == &check_tick_watcher);
assert(status == 0); assert(status == 0);
Tick(); Tick();
} }
static void DoPoll(uv_handle_t* watcher, int status) { static void DoPoll(uv_handle_t* watcher, int status) {
assert(watcher == &eio_poller); assert((uv_idle_t*) watcher == &eio_poller);
//printf("eio_poller\n"); //printf("eio_poller\n");
if (eio_poll() != -1 && uv_is_active(&eio_poller)) { if (eio_poll() != -1 && uv_is_active((uv_handle_t*) &eio_poller)) {
//printf("eio_poller stop\n"); //printf("eio_poller stop\n");
uv_idle_stop(watcher); uv_idle_stop(&eio_poller);
uv_unref(); uv_unref();
} }
} }
@ -284,11 +284,11 @@ static void DoPoll(uv_handle_t* watcher, int status) {
// Called from the main thread. // Called from the main thread.
static void WantPollNotifier(uv_handle_t* watcher, int status) { static void WantPollNotifier(uv_handle_t* watcher, int status) {
assert(watcher == &eio_want_poll_notifier); assert((uv_async_t*) watcher == &eio_want_poll_notifier);
//printf("want poll notifier\n"); //printf("want poll notifier\n");
if (eio_poll() == -1 && !uv_is_active(&eio_poller)) { if (eio_poll() == -1 && !uv_is_active((uv_handle_t*) &eio_poller)) {
//printf("eio_poller start\n"); //printf("eio_poller start\n");
uv_idle_start(&eio_poller, node::DoPoll); uv_idle_start(&eio_poller, node::DoPoll);
uv_ref(); uv_ref();
@ -297,11 +297,11 @@ static void WantPollNotifier(uv_handle_t* watcher, int status) {
static void DonePollNotifier(uv_handle_t* watcher, int revents) { static void DonePollNotifier(uv_handle_t* watcher, int revents) {
assert(watcher == &eio_done_poll_notifier); assert((uv_async_t*) watcher == &eio_done_poll_notifier);
//printf("done poll notifier\n"); //printf("done poll notifier\n");
if (eio_poll() != -1 && uv_is_active(&eio_poller)) { if (eio_poll() != -1 && uv_is_active((uv_handle_t*) &eio_poller)) {
//printf("eio_poller stop\n"); //printf("eio_poller stop\n");
uv_idle_stop(&eio_poller); uv_idle_stop(&eio_poller);
uv_unref(); uv_unref();
@ -1544,10 +1544,10 @@ v8::Handle<v8::Value> Exit(const v8::Arguments& args) {
static void CheckStatus(uv_handle_t* watcher, int status) { static void CheckStatus(uv_handle_t* watcher, int status) {
assert(watcher == &gc_timer); assert((uv_timer_t*) watcher == &gc_timer);
// check memory // check memory
if (!uv_is_active(&gc_idle)) { if (!uv_is_active((uv_handle_t*) &gc_idle)) {
HeapStatistics stats; HeapStatistics stats;
V8::GetHeapStatistics(&stats); V8::GetHeapStatistics(&stats);
if (stats.total_heap_size() > 1024 * 1024 * 128) { if (stats.total_heap_size() > 1024 * 1024 * 128) {
@ -1818,11 +1818,11 @@ void FatalException(TryCatch &try_catch) {
} }
static uv_handle_t debug_watcher; static uv_async_t debug_watcher;
static void DebugMessageCallback(uv_handle_t* watcher, int status) { static void DebugMessageCallback(uv_handle_t* watcher, int status) {
HandleScope scope; HandleScope scope;
assert(watcher == &debug_watcher); assert((uv_async_t*) watcher == &debug_watcher);
Debug::ProcessDebugMessages(); Debug::ProcessDebugMessages();
} }
@ -2468,17 +2468,8 @@ void EmitExit(v8::Handle<v8::Object> process) {
} }
uv_buf_t UVAlloc(uv_handle_t* handle, size_t suggested_size) {
char* base = (char*)malloc(suggested_size);
uv_buf_t buf;
buf.base = base;
buf.len = suggested_size;
return buf;
}
int Start(int argc, char *argv[]) { int Start(int argc, char *argv[]) {
uv_init(UVAlloc); uv_init();
v8::V8::Initialize(); v8::V8::Initialize();
v8::HandleScope handle_scope; v8::HandleScope handle_scope;

6
src/timer_wrap.cc

@ -108,7 +108,7 @@ class TimerWrap {
void StateChange() { void StateChange() {
bool was_active = active_; bool was_active = active_;
active_ = uv_is_active(&handle_); active_ = uv_is_active((uv_handle_t*) &handle_);
if (!was_active && active_) { if (!was_active && active_) {
// If our state is changing from inactive to active, we // If our state is changing from inactive to active, we
@ -207,7 +207,7 @@ class TimerWrap {
UNWRAP UNWRAP
int r = uv_close(&wrap->handle_); int r = uv_close((uv_handle_t*) &wrap->handle_);
if (r) SetErrno(uv_last_error().code); if (r) SetErrno(uv_last_error().code);
@ -228,7 +228,7 @@ class TimerWrap {
MakeCallback(wrap->object_, "ontimeout", 1, argv); MakeCallback(wrap->object_, "ontimeout", 1, argv);
} }
uv_handle_t handle_; uv_timer_t handle_;
Persistent<Object> object_; Persistent<Object> object_;
// This member is set false initially. When the timer is turned // This member is set false initially. When the timer is turned
// on uv_ref is called. When the timer is turned off uv_unref is // on uv_ref is called. When the timer is turned off uv_unref is

Loading…
Cancel
Save