Browse Source

Upgrade libuv

v0.7.4-release
Ryan Dahl 14 years ago
parent
commit
623f513071
  1. 2
      deps/uv/config-mingw.mk
  2. 4
      deps/uv/config-unix.mk
  3. 24
      deps/uv/desired-api.md
  4. 12
      deps/uv/test/benchmark-ping-pongs.c
  5. 23
      deps/uv/test/benchmark-pump.c
  6. 26
      deps/uv/test/echo-server.c
  7. 7
      deps/uv/test/runner.c
  8. 12
      deps/uv/test/test-async.c
  9. 28
      deps/uv/test/test-bind-error.c
  10. 13
      deps/uv/test/test-callback-stack.c
  11. 17
      deps/uv/test/test-connection-fail.c
  12. 35
      deps/uv/test/test-delayed-accept.c
  13. 110
      deps/uv/test/test-loop-handles.c
  14. 12
      deps/uv/test/test-ping-pong.c
  15. 14
      deps/uv/test/test-shutdown-eof.c
  16. 9
      deps/uv/test/test-tcp-writealot.c
  17. 13
      deps/uv/test/test-timer-again.c
  18. 21
      deps/uv/test/test-timer.c
  19. 8
      deps/uv/uv-common.c
  20. 54
      deps/uv/uv-unix.c
  21. 100
      deps/uv/uv-win.c
  22. 8
      deps/uv/uv-win.h
  23. 44
      deps/uv/uv.h
  24. 20
      src/node.cc
  25. 7
      src/timer_wrap.cc
  26. 1
      wscript

2
deps/uv/config-mingw.mk

@ -24,7 +24,7 @@ CC = $(PREFIX)gcc
AR = $(PREFIX)ar
E=.exe
CFLAGS=-g --std=gnu89 -pedantic -Wno-variadic-macros
CFLAGS=-g --std=gnu89 -Wno-variadic-macros
LINKFLAGS=-lm
RUNNER_CFLAGS=$(CFLAGS) -D_GNU_SOURCE # Need _GNU_SOURCE for strdup?

4
deps/uv/config-unix.mk

@ -21,7 +21,7 @@
CC = $(PREFIX)gcc
AR = $(PREFIX)ar
E=
CFLAGS=--std=gnu89 -pedantic -Wno-variadic-macros -g -DEV_MULTIPLICITY=0
CFLAGS=--std=gnu89 -Wno-variadic-macros -g
LINKFLAGS=-lm
ifeq (SunOS,$(uname_S))
@ -48,7 +48,7 @@ ev/ev.o: ev/config.h ev/ev.c
$(MAKE) -C ev
ev/config.h:
cd ev && CPPFLAGS=-DEV_MULTIPLICITY=0 ./configure
cd ev && ./configure
clean-platform:
$(MAKE) -C ev clean

24
deps/uv/desired-api.md

@ -48,9 +48,7 @@ Begins listening for connections. The accept callback is level-triggered.
int uv_accept(uv_tcp_server_t* server,
uv_tcp_t* client,
uv_close_cb close_cb,
void* data);
uv_tcp_t* client);
Accepts a connection. This should be called after the accept callback is
made. The `client` parameter should be uninitialized memory; `uv_accept` is
@ -91,9 +89,7 @@ Stops reading from the stream.
int uv_write_req_init(uv_write_req_t*,
uv_stream_t*,
uv_buf_t bufs[],
int butcnf,
uv_close_cb close_cb,
void* data);
int butcnf);
Initiates a write request on a stream.
@ -112,9 +108,7 @@ or a server-side connection (one that was initialized with `uv_accept`)
int uv_connect_req_init(uv_connect_req_t* req,
uv_tcp_t* socket,
struct sockaddr* addr,
uv_close_cb close_cb,
void* data);
struct sockaddr* addr);
Initiates a request to open a connection.
@ -133,9 +127,7 @@ completion of the request.
int uv_connect_req_init(uv_connect_req_t* req,
uv_tcp_t* socket,
struct sockaddr* addr,
uv_close_cb close_cb,
void* data);
struct sockaddr* addr);
Initializes the connection request. Returning 0 indicates success, -1 if
there was an error. The following values can be retrieved from
@ -152,9 +144,7 @@ channel is shutdown. Once a shutdown request is initiated on a stream, the
stream will allow no more writes.
int uv_shutdown_req_init(uv_shutdown_req_t*,
uv_stream_t* parent,
uv_close_cb close_cb,
void* data);
uv_stream_t* parent);
Initializes the shutdown request.
@ -164,8 +154,6 @@ Initializes the shutdown request.
int uv_write_req_init(uv_write_req_t*,
uv_stream_t*,
uv_buf_t bufs[],
int butcnf,
uv_close_cb close_cb,
void* data);
int butcnf);
Initiates a write request on a stream.

12
deps/uv/test/benchmark-ping-pongs.c

@ -78,11 +78,9 @@ static void buf_free(uv_buf_t uv_buf_t) {
}
static void pinger_close_cb(uv_handle_t* handle, int status) {
static void pinger_close_cb(uv_handle_t* handle) {
pinger_t* pinger;
ASSERT(status == 0);
pinger = (pinger_t*)handle->data;
LOGF("ping_pongs: %d roundtrips/s\n", (1000 * pinger->pongs) / TIME);
@ -127,7 +125,7 @@ static void pinger_shutdown_cb(uv_handle_t* handle, int status) {
}
static void pinger_read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
static void pinger_read_cb(uv_tcp_t* tcp, ssize_t nread, uv_buf_t buf) {
unsigned int i;
pinger_t* pinger;
@ -141,7 +139,7 @@ static void pinger_read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
}
ASSERT(pinger_shutdown_cb_called == 1);
uv_close((uv_handle_t*)tcp);
uv_close((uv_handle_t*)tcp, pinger_close_cb);
return;
}
@ -190,9 +188,11 @@ static void pinger_new() {
pinger->pongs = 0;
/* Try to connec to the server and do NUM_PINGS ping-pongs. */
r = uv_tcp_init(&pinger->tcp, pinger_close_cb, (void*)pinger);
r = uv_tcp_init(&pinger->tcp);
ASSERT(!r);
pinger->tcp.data = pinger;
/* We are never doing multiple reads/connects at a time anyway. */
/* so these handles can be pre-initialized. */
uv_req_init(&pinger->connect_req, (uv_handle_t*)&pinger->tcp,

23
deps/uv/test/benchmark-pump.c

@ -118,16 +118,13 @@ static void read_show_stats() {
void write_sockets_close_cb(uv_handle_t* handle, int status) {
ASSERT(status == 0);
void write_sockets_close_cb(uv_handle_t* handle) {
/* If any client closes, the process is done. */
exit(0);
}
void read_sockets_close_cb(uv_handle_t* handle, int status) {
ASSERT(status == 0);
void read_sockets_close_cb(uv_handle_t* handle) {
free(handle);
read_sockets--;
@ -136,7 +133,7 @@ void read_sockets_close_cb(uv_handle_t* handle, int status) {
*/
if (uv_now() - start_time > 1000 && read_sockets == 0) {
read_show_stats();
uv_close((uv_handle_t*)&server);
uv_close((uv_handle_t*)&server, NULL);
}
}
@ -147,7 +144,7 @@ static void start_stats_collection() {
/* Show-stats timer */
stats_left = STATS_COUNT;
r = uv_timer_init(&timer_handle, NULL, NULL);
r = uv_timer_init(&timer_handle);
ASSERT(r == 0);
r = uv_timer_start(&timer_handle, show_stats, STATS_INTERVAL, STATS_INTERVAL);
ASSERT(r == 0);
@ -157,7 +154,7 @@ static void start_stats_collection() {
}
static void read_cb(uv_tcp_t* tcp, int bytes, uv_buf_t buf) {
static void read_cb(uv_tcp_t* tcp, ssize_t bytes, uv_buf_t buf) {
if (nrecv_total == 0) {
ASSERT(start_time == 0);
uv_update_time();
@ -165,7 +162,7 @@ static void read_cb(uv_tcp_t* tcp, int bytes, uv_buf_t buf) {
}
if (bytes < 0) {
uv_close((uv_handle_t*)tcp);
uv_close((uv_handle_t*)tcp, read_sockets_close_cb);
return;
}
@ -239,7 +236,7 @@ static void maybe_connect_some() {
max_connect_socket < write_sockets + MAX_SIMULTANEOUS_CONNECTS) {
tcp = &write_handles[max_connect_socket++];
r = uv_tcp_init(tcp, write_sockets_close_cb, NULL);
r = uv_tcp_init(tcp);
ASSERT(r == 0);
req = req_alloc();
@ -259,7 +256,9 @@ static void connection_cb(uv_tcp_t* s, int status) {
tcp = malloc(sizeof(uv_tcp_t));
r = uv_accept(s, tcp, read_sockets_close_cb, NULL);
uv_tcp_init(tcp);
r = uv_accept(s, tcp);
ASSERT(r == 0);
r = uv_read_start(tcp, buf_alloc, read_cb);
@ -350,7 +349,7 @@ HELPER_IMPL(pump_server) {
listen_addr = uv_ip4_addr("0.0.0.0", TEST_PORT);
/* Server */
r = uv_tcp_init(&server, NULL, NULL);
r = uv_tcp_init(&server);
ASSERT(r == 0);
r = uv_bind(&server, listen_addr);
ASSERT(r == 0);

26
deps/uv/test/echo-server.c

@ -36,8 +36,9 @@ static uv_tcp_t server;
static void after_write(uv_req_t* req, int status);
static void after_read(uv_tcp_t*, int nread, uv_buf_t buf);
static void on_close(uv_handle_t* peer, int status);
static void after_read(uv_tcp_t*, ssize_t nread, uv_buf_t buf);
static void on_close(uv_handle_t* peer);
static void on_server_close(uv_handle_t* handle);
static void on_connection(uv_tcp_t*, int status);
@ -59,12 +60,12 @@ static void after_write(uv_req_t* req, int status) {
static void after_shutdown(uv_req_t* req, int status) {
uv_close(req->handle);
uv_close(req->handle, on_close);
free(req);
}
static void after_read(uv_tcp_t* handle, int nread, uv_buf_t buf) {
static void after_read(uv_tcp_t* handle, ssize_t nread, uv_buf_t buf) {
int i;
write_req_t *wr;
uv_req_t* req;
@ -94,7 +95,7 @@ static void after_read(uv_tcp_t* handle, int nread, uv_buf_t buf) {
if (!server_closed) {
for (i = 0; i < nread; i++) {
if (buf.base[i] == 'Q') {
uv_close((uv_handle_t*)&server);
uv_close((uv_handle_t*)&server, on_server_close);
server_closed = 1;
}
}
@ -111,10 +112,8 @@ static void after_read(uv_tcp_t* handle, int nread, uv_buf_t buf) {
}
static void on_close(uv_handle_t* peer, int status) {
if (status != 0) {
fprintf(stdout, "Socket error\n");
}
static void on_close(uv_handle_t* peer) {
free(peer);
}
@ -135,7 +134,9 @@ static void on_connection(uv_tcp_t* server, int status) {
handle = (uv_tcp_t*) malloc(sizeof *handle);
ASSERT(handle != NULL);
r = uv_accept(server, handle, on_close, NULL);
uv_tcp_init(handle);
r = uv_accept(server, handle);
ASSERT(r == 0);
r = uv_read_start(handle, echo_alloc, after_read);
@ -143,9 +144,8 @@ static void on_connection(uv_tcp_t* server, int status) {
}
static void on_server_close(uv_handle_t* handle, int status) {
static void on_server_close(uv_handle_t* handle) {
ASSERT(handle == (uv_handle_t*)&server);
ASSERT(status == 0);
}
@ -153,7 +153,7 @@ static int echo_start(int port) {
struct sockaddr_in addr = uv_ip4_addr("0.0.0.0", port);
int r;
r = uv_tcp_init(&server, on_server_close, NULL);
r = uv_tcp_init(&server);
if (r) {
/* TODO: Error codes */
fprintf(stderr, "Socket creation error\n");

7
deps/uv/test/runner.c

@ -77,7 +77,7 @@ int run_task(task_entry_t *test, int timeout, int benchmark_output) {
}
/* Wait a little bit to allow servers to start. Racy. */
uv_sleep(50);
uv_sleep(100);
/* Start the main test process. */
if (process_start(test->process_name, &processes[process_count]) == -1) {
@ -155,8 +155,9 @@ finalize:
break;
default:
//LOGF("%s: ", test->task_name);
process_copy_output(main_process, fileno(stderr));
for (i = 0; i < process_count; i++) {
process_copy_output(&processes[i], fileno(stderr));
}
break;
}
}

12
deps/uv/test/test-async.c

@ -112,10 +112,8 @@ void thread3_entry(void *arg) {
#endif
static void close_cb(uv_handle_t* handle, int status) {
static void close_cb(uv_handle_t* handle) {
ASSERT(handle != NULL);
ASSERT(status == 0);
close_cb_called++;
}
@ -129,7 +127,7 @@ static void async1_cb(uv_handle_t* handle, int status) {
if (async1_cb_called > 2 && !async1_closed) {
async1_closed = 1;
uv_close(handle);
uv_close(handle, close_cb);
}
}
@ -174,7 +172,7 @@ static void prepare_cb(uv_handle_t* handle, int status) {
#endif
case 1:
r = uv_close(handle);
r = uv_close(handle, close_cb);
ASSERT(r == 0);
break;
@ -191,12 +189,12 @@ TEST_IMPL(async) {
uv_init();
r = uv_prepare_init(&prepare_handle, close_cb, NULL);
r = uv_prepare_init(&prepare_handle);
ASSERT(r == 0);
r = uv_prepare_start(&prepare_handle, prepare_cb);
ASSERT(r == 0);
r = uv_async_init(&async1_handle, async1_cb, close_cb, NULL);
r = uv_async_init(&async1_handle, async1_cb);
ASSERT(r == 0);
#if 0

28
deps/uv/test/test-bind-error.c

@ -28,10 +28,8 @@
static int close_cb_called = 0;
static void close_cb(uv_handle_t* handle, int status) {
static void close_cb(uv_handle_t* handle) {
ASSERT(handle != NULL);
ASSERT(status == 0);
close_cb_called++;
}
@ -43,12 +41,12 @@ TEST_IMPL(bind_error_addrinuse) {
uv_init();
r = uv_tcp_init(&server1, close_cb, NULL);
r = uv_tcp_init(&server1);
ASSERT(r == 0);
r = uv_bind(&server1, addr);
ASSERT(r == 0);
r = uv_tcp_init(&server2, close_cb, NULL);
r = uv_tcp_init(&server2);
ASSERT(r == 0);
r = uv_bind(&server2, addr);
ASSERT(r == 0);
@ -60,8 +58,8 @@ TEST_IMPL(bind_error_addrinuse) {
ASSERT(uv_last_error().code == UV_EADDRINUSE);
uv_close((uv_handle_t*)&server1);
uv_close((uv_handle_t*)&server2);
uv_close((uv_handle_t*)&server1, close_cb);
uv_close((uv_handle_t*)&server2, close_cb);
uv_run();
@ -78,7 +76,7 @@ TEST_IMPL(bind_error_addrnotavail_1) {
uv_init();
r = uv_tcp_init(&server, close_cb, NULL);
r = uv_tcp_init(&server);
ASSERT(r == 0);
r = uv_bind(&server, addr);
@ -87,7 +85,7 @@ TEST_IMPL(bind_error_addrnotavail_1) {
ASSERT(uv_last_error().code == UV_EADDRNOTAVAIL);
}
uv_close((uv_handle_t*)&server);
uv_close((uv_handle_t*)&server, close_cb);
uv_run();
@ -104,13 +102,13 @@ TEST_IMPL(bind_error_addrnotavail_2) {
uv_init();
r = uv_tcp_init(&server, close_cb, NULL);
r = uv_tcp_init(&server);
ASSERT(r == 0);
r = uv_bind(&server, addr);
ASSERT(r == -1);
ASSERT(uv_last_error().code == UV_EADDRNOTAVAIL);
uv_close((uv_handle_t*)&server);
uv_close((uv_handle_t*)&server, close_cb);
uv_run();
@ -130,14 +128,14 @@ TEST_IMPL(bind_error_fault) {
uv_init();
r = uv_tcp_init(&server, close_cb, NULL);
r = uv_tcp_init(&server);
ASSERT(r == 0);
r = uv_bind(&server, *garbage_addr);
ASSERT(r == -1);
ASSERT(uv_last_error().code == UV_EFAULT);
uv_close((uv_handle_t*)&server);
uv_close((uv_handle_t*)&server, close_cb);
uv_run();
@ -156,7 +154,7 @@ TEST_IMPL(bind_error_inval) {
uv_init();
r = uv_tcp_init(&server, close_cb, NULL);
r = uv_tcp_init(&server);
ASSERT(r == 0);
r = uv_bind(&server, addr1);
ASSERT(r == 0);
@ -165,7 +163,7 @@ TEST_IMPL(bind_error_inval) {
ASSERT(uv_last_error().code == UV_EINVAL);
uv_close((uv_handle_t*)&server);
uv_close((uv_handle_t*)&server, close_cb);
uv_run();

13
deps/uv/test/test-callback-stack.c

@ -52,8 +52,7 @@ static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
}
static void close_cb(uv_handle_t* handle, int status) {
ASSERT(status == 0);
static void close_cb(uv_handle_t* handle) {
ASSERT(nested == 0 && "close_cb must be called from a fresh stack");
close_cb_called++;
@ -68,7 +67,7 @@ static void shutdown_cb(uv_req_t* req, int status) {
}
static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
static void read_cb(uv_tcp_t* tcp, ssize_t nread, uv_buf_t buf) {
ASSERT(nested == 0 && "read_cb must be called from a fresh stack");
printf("Read. nread == %d\n", nread);
@ -82,7 +81,7 @@ static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
ASSERT(uv_last_error().code == UV_EOF);
nested++;
if (uv_close((uv_handle_t*)tcp)) {
if (uv_close((uv_handle_t*)tcp, close_cb)) {
FATAL("uv_close failed");
}
nested--;
@ -127,7 +126,7 @@ static void timer_cb(uv_handle_t* handle, int status) {
timer_cb_called++;
r = uv_close(handle);
r = uv_close(handle, close_cb);
ASSERT(r == 0);
}
@ -145,7 +144,7 @@ static void write_cb(uv_req_t* req, int status) {
/* back to our receive buffer when we start reading. This maximizes the */
/* tempation for the backend to use dirty stack for calling read_cb. */
nested++;
r = uv_timer_init(&timer, close_cb, NULL);
r = uv_timer_init(&timer);
ASSERT(r == 0);
r = uv_timer_start(&timer, timer_cb, 500, 0);
ASSERT(r == 0);
@ -185,7 +184,7 @@ TEST_IMPL(callback_stack) {
uv_init();
if (uv_tcp_init(&client, &close_cb, NULL)) {
if (uv_tcp_init(&client)) {
FATAL("uv_tcp_init failed");
}

17
deps/uv/test/test-connection-fail.c

@ -36,14 +36,12 @@ static int timer_close_cb_calls;
static int timer_cb_calls;
static void on_close(uv_handle_t* handle, int status) {
ASSERT(status == 0);
static void on_close(uv_handle_t* handle) {
close_cb_calls++;
}
static void timer_close_cb(uv_handle_t* handle, int status) {
ASSERT(status == 0);
static void timer_close_cb(uv_handle_t* handle) {
timer_close_cb_calls++;
}
@ -61,20 +59,21 @@ static void timer_cb(uv_handle_t* handle, int status) {
ASSERT(connect_cb_calls == 1);
/* Close the tcp handle. */
uv_close((uv_handle_t*)&tcp);
uv_close((uv_handle_t*)&tcp, on_close);
/* Close the timer. */
uv_close(handle);
uv_close(handle, timer_close_cb);
}
static void on_connect_with_close(uv_req_t *req, int status) {
ASSERT(&tcp == (uv_tcp_t*) req->handle);
ASSERT(status == -1);
ASSERT(uv_last_error().code == UV_ECONNREFUSED);
connect_cb_calls++;
ASSERT(close_cb_calls == 0);
uv_close(req->handle);
uv_close(req->handle, on_close);
}
@ -99,7 +98,7 @@ void connection_fail(uv_connect_cb connect_cb) {
server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
/* Try to connec to the server and do NUM_PINGS ping-pongs. */
r = uv_tcp_init(&tcp, on_close, NULL);
r = uv_tcp_init(&tcp);
ASSERT(!r);
/* We are never doing multiple reads/connects at a time anyway. */
@ -141,7 +140,7 @@ TEST_IMPL(connection_fail) {
TEST_IMPL(connection_fail_doesnt_auto_close) {
uv_init();
uv_timer_init(&timer, timer_close_cb, NULL);
uv_timer_init(&timer);
connection_fail(on_connect_without_close);

35
deps/uv/test/test-delayed-accept.c

@ -24,7 +24,6 @@
#include <stdio.h>
#include <stdlib.h>
static char BUFFER[1024];
static int connection_cb_called = 0;
@ -41,9 +40,8 @@ static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
}
static void close_cb(uv_handle_t* handle, int status) {
static void close_cb(uv_handle_t* handle) {
ASSERT(handle != NULL);
ASSERT(status == 0);
free(handle);
@ -55,29 +53,37 @@ static void do_accept(uv_handle_t* timer_handle, int status) {
uv_tcp_t* server;
uv_tcp_t* accepted_handle = (uv_tcp_t*)malloc(sizeof *accepted_handle);
int r;
int tcpcnt;
ASSERT(timer_handle != NULL);
ASSERT(status == 0);
ASSERT(accepted_handle != NULL);
uv_tcp_init(accepted_handle);
/* Test to that uv_counters()->tcp_init does not increase across the uv_accept. */
tcpcnt = uv_counters()->tcp_init;
server = (uv_tcp_t*)timer_handle->data;
r = uv_accept(server, accepted_handle, close_cb, NULL);
r = uv_accept(server, accepted_handle);
ASSERT(r == 0);
ASSERT(uv_counters()->tcp_init == tcpcnt);
do_accept_called++;
/* Immediately close the accepted handle. */
r = uv_close((uv_handle_t*)accepted_handle);
r = uv_close((uv_handle_t*)accepted_handle, close_cb);
ASSERT(r == 0);
/* After accepting the two clients close the server handle */
if (do_accept_called == 2) {
r = uv_close((uv_handle_t*)server);
r = uv_close((uv_handle_t*)server, close_cb);
ASSERT(r == 0);
}
/* Dispose the timer. */
r = uv_close(timer_handle);
r = uv_close(timer_handle, close_cb);
ASSERT(r == 0);
}
@ -92,8 +98,11 @@ static void connection_cb(uv_tcp_t* tcp, int status) {
ASSERT(timer_handle != NULL);
/* Accept the client after 1 second */
r = uv_timer_init(timer_handle, close_cb, (void*)tcp);
r = uv_timer_init(timer_handle);
ASSERT(r == 0);
timer_handle->data = tcp;
r = uv_timer_start(timer_handle, do_accept, 1000, 0);
ASSERT(r == 0);
@ -108,8 +117,10 @@ static void start_server() {
ASSERT(server != NULL);
r = uv_tcp_init(server, close_cb, NULL);
r = uv_tcp_init(server);
ASSERT(r == 0);
ASSERT(uv_counters()->tcp_init == 1);
ASSERT(uv_counters()->handle_init == 1);
r = uv_bind(server, addr);
ASSERT(r == 0);
@ -119,7 +130,7 @@ static void start_server() {
}
static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
static void read_cb(uv_tcp_t* tcp, ssize_t nread, uv_buf_t buf) {
/* The server will not send anything, it should close gracefully. */
ASSERT(tcp != NULL);
ASSERT(nread == -1);
@ -129,7 +140,7 @@ static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
free(buf.base);
}
uv_close((uv_handle_t*)tcp);
uv_close((uv_handle_t*)tcp, close_cb);
}
@ -159,7 +170,7 @@ static void client_connect() {
ASSERT(client != NULL);
ASSERT(connect_req != NULL);
r = uv_tcp_init(client, close_cb, NULL);
r = uv_tcp_init(client);
ASSERT(r == 0);
uv_req_init(connect_req, (uv_handle_t*)client, connect_cb);

110
deps/uv/test/test-loop-handles.c

@ -117,8 +117,15 @@ static void timer_cb(uv_handle_t* handle, int status) {
}
static void timer_close_cb(uv_handle_t* handle, int status) {
FATAL("timer_close_cb should not be called");
static void idle_2_close_cb(uv_handle_t* handle) {
LOG("IDLE_2_CLOSE_CB\n");
ASSERT(handle == (uv_handle_t*)&idle_2_handle);
ASSERT(idle_2_is_active);
idle_2_close_cb_called++;
idle_2_is_active = 0;
}
@ -132,24 +139,11 @@ static void idle_2_cb(uv_handle_t* handle, int status) {
idle_2_cb_called++;
r = uv_close(handle);
r = uv_close(handle, idle_2_close_cb);
ASSERT(r == 0);
}
static void idle_2_close_cb(uv_handle_t* handle, int status){
LOG("IDLE_2_CLOSE_CB\n");
ASSERT(handle == (uv_handle_t*)&idle_2_handle);
ASSERT(status == 0);
ASSERT(idle_2_is_active);
idle_2_close_cb_called++;
idle_2_is_active = 0;
}
static void idle_1_cb(uv_handle_t* handle, int status) {
int r;
@ -162,7 +156,7 @@ static void idle_1_cb(uv_handle_t* handle, int status) {
/* Init idle_2 and make it active */
if (!idle_2_is_active) {
r = uv_idle_init(&idle_2_handle, idle_2_close_cb, NULL);
r = uv_idle_init(&idle_2_handle);
ASSERT(r == 0);
r = uv_idle_start(&idle_2_handle, idle_2_cb);
ASSERT(r == 0);
@ -180,16 +174,39 @@ static void idle_1_cb(uv_handle_t* handle, int status) {
}
static void idle_1_close_cb(uv_handle_t* handle, int status){
static void idle_1_close_cb(uv_handle_t* handle) {
LOG("IDLE_1_CLOSE_CB\n");
ASSERT(handle != NULL);
ASSERT(status == 0);
idle_1_close_cb_called++;
}
static void prepare_1_close_cb(uv_handle_t* handle) {
LOG("PREPARE_1_CLOSE_CB");
ASSERT(handle == (uv_handle_t*)&prepare_1_handle);
prepare_1_close_cb_called++;
}
static void check_close_cb(uv_handle_t* handle) {
LOG("CHECK_CLOSE_CB\n");
ASSERT(handle == (uv_handle_t*)&check_handle);
check_close_cb_called++;
}
static void prepare_2_close_cb(uv_handle_t* handle) {
LOG("PREPARE_2_CLOSE_CB\n");
ASSERT(handle == (uv_handle_t*)&prepare_2_handle);
prepare_2_close_cb_called++;
}
static void check_cb(uv_handle_t* handle, int status) {
int i, r;
@ -213,22 +230,22 @@ static void check_cb(uv_handle_t* handle, int status) {
} else {
/* End of the test - close all handles */
r = uv_close((uv_handle_t*)&prepare_1_handle);
r = uv_close((uv_handle_t*)&prepare_1_handle, prepare_1_close_cb);
ASSERT(r == 0);
r = uv_close((uv_handle_t*)&check_handle);
r = uv_close((uv_handle_t*)&check_handle, check_close_cb);
ASSERT(r == 0);
r = uv_close((uv_handle_t*)&prepare_2_handle);
r = uv_close((uv_handle_t*)&prepare_2_handle, prepare_2_close_cb);
ASSERT(r == 0);
for (i = 0; i < IDLE_COUNT; i++) {
r = uv_close((uv_handle_t*)&idle_1_handles[i]);
r = uv_close((uv_handle_t*)&idle_1_handles[i], idle_1_close_cb);
ASSERT(r == 0);
}
/* This handle is closed/recreated every time, close it only if it is */
/* active.*/
if (idle_2_is_active) {
r = uv_close((uv_handle_t*)&idle_2_handle);
r = uv_close((uv_handle_t*)&idle_2_handle, idle_2_close_cb);
ASSERT(r == 0);
}
}
@ -237,15 +254,6 @@ static void check_cb(uv_handle_t* handle, int status) {
}
static void check_close_cb(uv_handle_t* handle, int status){
LOG("CHECK_CLOSE_CB\n");
ASSERT(handle == (uv_handle_t*)&check_handle);
ASSERT(status == 0);
check_close_cb_called++;
}
static void prepare_2_cb(uv_handle_t* handle, int status) {
int r;
@ -270,15 +278,6 @@ static void prepare_2_cb(uv_handle_t* handle, int status) {
}
static void prepare_2_close_cb(uv_handle_t* handle, int status) {
LOG("PREPARE_2_CLOSE_CB\n");
ASSERT(handle == (uv_handle_t*)&prepare_2_handle);
ASSERT(status == 0);
prepare_2_close_cb_called++;
}
static void prepare_1_cb(uv_handle_t* handle, int status) {
int r;
@ -304,38 +303,29 @@ static void prepare_1_cb(uv_handle_t* handle, int status) {
}
static void prepare_1_close_cb(uv_handle_t* handle, int status){
LOG("PREPARE_1_CLOSE_CB");
ASSERT(handle == (uv_handle_t*)&prepare_1_handle);
ASSERT(status == 0);
prepare_1_close_cb_called++;
}
TEST_IMPL(loop_handles) {
int i;
int r;
uv_init();
r = uv_prepare_init(&prepare_1_handle, prepare_1_close_cb, NULL);
r = uv_prepare_init(&prepare_1_handle);
ASSERT(r == 0);
r = uv_prepare_start(&prepare_1_handle, prepare_1_cb);
ASSERT(r == 0);
r = uv_check_init(&check_handle, check_close_cb, NULL);
r = uv_check_init(&check_handle);
ASSERT(r == 0);
r = uv_check_start(&check_handle, check_cb);
ASSERT(r == 0);
/* initialize only, prepare_2 is started by prepare_1_cb */
r = uv_prepare_init(&prepare_2_handle, prepare_2_close_cb, NULL);
r = uv_prepare_init(&prepare_2_handle);
ASSERT(r == 0);
for (i = 0; i < IDLE_COUNT; i++) {
/* initialize only, idle_1 handles are started by check_cb */
r = uv_idle_init(&idle_1_handles[i], idle_1_close_cb, NULL);
r = uv_idle_init(&idle_1_handles[i]);
ASSERT(r == 0);
}
@ -343,7 +333,7 @@ TEST_IMPL(loop_handles) {
/* the timer callback is there to keep the event loop polling */
/* unref it as it is not supposed to keep the loop alive */
r = uv_timer_init(&timer_handle, timer_close_cb, NULL);
r = uv_timer_init(&timer_handle);
ASSERT(r == 0);
r = uv_timer_start(&timer_handle, timer_cb, TIMEOUT, TIMEOUT);
ASSERT(r == 0);
@ -389,7 +379,7 @@ TEST_IMPL(ref) {
TEST_IMPL(idle_ref) {
uv_idle_t h;
uv_init();
uv_idle_init(&h, NULL, NULL);
uv_idle_init(&h);
uv_idle_start(&h, NULL);
uv_unref();
uv_run();
@ -400,7 +390,7 @@ TEST_IMPL(idle_ref) {
TEST_IMPL(async_ref) {
uv_async_t h;
uv_init();
uv_async_init(&h, NULL, NULL, NULL);
uv_async_init(&h, NULL);
uv_unref();
uv_run();
return 0;
@ -410,7 +400,7 @@ TEST_IMPL(async_ref) {
TEST_IMPL(prepare_ref) {
uv_prepare_t h;
uv_init();
uv_prepare_init(&h, NULL, NULL);
uv_prepare_init(&h);
uv_prepare_start(&h, NULL);
uv_unref();
uv_run();
@ -421,7 +411,7 @@ TEST_IMPL(prepare_ref) {
TEST_IMPL(check_ref) {
uv_check_t h;
uv_init();
uv_check_init(&h, NULL, NULL);
uv_check_init(&h);
uv_check_start(&h, NULL);
uv_unref();
uv_run();

12
deps/uv/test/test-ping-pong.c

@ -56,10 +56,9 @@ static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
}
static void pinger_on_close(uv_handle_t* handle, int status) {
static void pinger_on_close(uv_handle_t* handle) {
pinger_t* pinger = (pinger_t*)handle->data;
ASSERT(status == 0);
ASSERT(NUM_PINGS == pinger->pongs);
free(pinger);
@ -93,7 +92,7 @@ static void pinger_write_ping(pinger_t* pinger) {
}
static void pinger_read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
static void pinger_read_cb(uv_tcp_t* tcp, ssize_t nread, uv_buf_t buf) {
unsigned int i;
pinger_t* pinger;
@ -108,7 +107,7 @@ static void pinger_read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
free(buf.base);
}
uv_close((uv_handle_t*)(&pinger->tcp));
uv_close((uv_handle_t*)(&pinger->tcp), pinger_on_close);
return;
}
@ -123,7 +122,7 @@ static void pinger_read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
if (pinger->pongs < NUM_PINGS) {
pinger_write_ping(pinger);
} else {
uv_close((uv_handle_t*)(&pinger->tcp));
uv_close((uv_handle_t*)(&pinger->tcp), pinger_on_close);
return;
}
}
@ -152,7 +151,8 @@ static void pinger_new() {
pinger->pongs = 0;
/* Try to connec to the server and do NUM_PINGS ping-pongs. */
r = uv_tcp_init(&pinger->tcp, pinger_on_close, (void*)pinger);
r = uv_tcp_init(&pinger->tcp);
pinger->tcp.data = pinger;
ASSERT(!r);
/* We are never doing multiple reads/connects at a time anyway. */

14
deps/uv/test/test-shutdown-eof.c

@ -45,7 +45,7 @@ static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
}
static void read_cb(uv_tcp_t* t, int nread, uv_buf_t buf) {
static void read_cb(uv_tcp_t* t, ssize_t nread, uv_buf_t buf) {
ASSERT(t == &tcp);
if (!got_q) {
@ -102,7 +102,7 @@ static void connect_cb(uv_req_t *req, int status) {
}
void tcp_close_cb(uv_handle_t* handle, int status) {
void tcp_close_cb(uv_handle_t* handle) {
ASSERT(handle == (uv_handle_t*) &tcp);
ASSERT(called_connect_cb == 1);
@ -114,7 +114,7 @@ void tcp_close_cb(uv_handle_t* handle, int status) {
}
void timer_close_cb(uv_handle_t* handle, int status) {
void timer_close_cb(uv_handle_t* handle) {
ASSERT(handle == (uv_handle_t*) &timer);
called_timer_close_cb++;
}
@ -122,14 +122,14 @@ void timer_close_cb(uv_handle_t* handle, int status) {
void timer_cb(uv_handle_t* handle, int status) {
ASSERT(handle == (uv_handle_t*) &timer);
uv_close(handle);
uv_close(handle, timer_close_cb);
/*
* The most important assert of the test: we have not received
* tcp_close_cb yet.
*/
ASSERT(called_tcp_close_cb == 0);
uv_close((uv_handle_t*) &tcp);
uv_close((uv_handle_t*) &tcp, tcp_close_cb);
called_timer_cb++;
}
@ -150,11 +150,11 @@ TEST_IMPL(shutdown_eof) {
qbuf.base = "Q";
qbuf.len = 1;
uv_timer_init(&timer, timer_close_cb, NULL);
uv_timer_init(&timer);
uv_timer_start(&timer, timer_cb, 100, 0);
server_addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
r = uv_tcp_init(&tcp, tcp_close_cb, NULL);
r = uv_tcp_init(&tcp);
ASSERT(!r);
uv_req_init(&connect_req, (uv_handle_t*) &tcp, connect_cb);

9
deps/uv/test/test-tcp-writealot.c

@ -53,9 +53,8 @@ static uv_buf_t alloc_cb(uv_tcp_t* tcp, size_t size) {
}
static void close_cb(uv_handle_t* handle, int status) {
static void close_cb(uv_handle_t* handle) {
ASSERT(handle != NULL);
ASSERT(status == 0);
free(handle);
@ -84,7 +83,7 @@ static void shutdown_cb(uv_req_t* req, int status) {
}
static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
static void read_cb(uv_tcp_t* tcp, ssize_t nread, uv_buf_t buf) {
ASSERT(tcp != NULL);
if (nread < 0) {
@ -95,7 +94,7 @@ static void read_cb(uv_tcp_t* tcp, int nread, uv_buf_t buf) {
free(buf.base);
}
uv_close((uv_handle_t*)tcp);
uv_close((uv_handle_t*)tcp, close_cb);
return;
}
@ -182,7 +181,7 @@ TEST_IMPL(tcp_writealot) {
uv_init();
r = uv_tcp_init(client, close_cb, NULL);
r = uv_tcp_init(client);
ASSERT(r == 0);
uv_req_init(connect_req, (uv_handle_t*)client, connect_cb);

13
deps/uv/test/test-timer-again.c

@ -34,9 +34,8 @@ static uv_timer_t dummy, repeat_1, repeat_2;
static int64_t start_time;
static void close_cb(uv_handle_t* handle, int status) {
static void close_cb(uv_handle_t* handle) {
ASSERT(handle != NULL);
ASSERT(status == 0);
close_cb_called++;
}
@ -58,7 +57,7 @@ static void repeat_1_cb(uv_handle_t* handle, int status) {
ASSERT(r == 0);
if (uv_now() >= start_time + 500) {
uv_close(handle);
uv_close(handle, close_cb);
/* We're not calling uv_timer_again on repeat_2 any more, so after this */
/* timer_2_cb is expected. */
repeat_2_cb_allowed = 1;
@ -78,7 +77,7 @@ static void repeat_2_cb(uv_handle_t* handle, int status) {
if (uv_timer_get_repeat(&repeat_2) == 0) {
ASSERT(!uv_is_active(handle));
uv_close(handle);
uv_close(handle, close_cb);
return;
}
@ -100,7 +99,7 @@ TEST_IMPL(timer_again) {
ASSERT(0 < start_time);
/* Verify that it is not possible to uv_timer_again a never-started timer. */
r = uv_timer_init(&dummy, NULL, NULL);
r = uv_timer_init(&dummy);
ASSERT(r == 0);
r = uv_timer_again(&dummy);
ASSERT(r == -1);
@ -108,7 +107,7 @@ TEST_IMPL(timer_again) {
uv_unref();
/* Start timer repeat_1. */
r = uv_timer_init(&repeat_1, close_cb, NULL);
r = uv_timer_init(&repeat_1);
ASSERT(r == 0);
r = uv_timer_start(&repeat_1, repeat_1_cb, 50, 0);
ASSERT(r == 0);
@ -122,7 +121,7 @@ TEST_IMPL(timer_again) {
* Start another repeating timer. It'll be again()ed by the repeat_1 so
* it should not time out until repeat_1 stops.
*/
r = uv_timer_init(&repeat_2, close_cb, NULL);
r = uv_timer_init(&repeat_2);
ASSERT(r == 0);
r = uv_timer_start(&repeat_2, repeat_2_cb, 100, 100);
ASSERT(r == 0);

21
deps/uv/test/test-timer.c

@ -31,11 +31,10 @@ static int repeat_close_cb_called = 0;
static int64_t start_time;
static void once_close_cb(uv_handle_t* handle, int status) {
static void once_close_cb(uv_handle_t* handle) {
printf("ONCE_CLOSE_CB\n");
ASSERT(handle != NULL);
ASSERT(status == 0);
once_close_cb_called++;
@ -51,18 +50,17 @@ static void once_cb(uv_handle_t* handle, int status) {
once_cb_called++;
uv_close(handle);
uv_close(handle, once_close_cb);
/* Just call this randomly for the code coverage. */
uv_update_time();
}
static void repeat_close_cb(uv_handle_t* handle, int status) {
static void repeat_close_cb(uv_handle_t* handle) {
printf("REPEAT_CLOSE_CB\n");
ASSERT(handle != NULL);
ASSERT(status == 0);
repeat_close_cb_called++;
}
@ -77,13 +75,8 @@ static void repeat_cb(uv_handle_t* handle, int status) {
repeat_cb_called++;
if (repeat_cb_called == 5) {
uv_close(handle);
}
uv_close(handle, repeat_close_cb);
}
static void never_close_cb(uv_handle_t* handle, int status) {
FATAL("never_close_cb should never be called");
}
@ -106,20 +99,20 @@ TEST_IMPL(timer) {
for (i = 0; i < 10; i++) {
once = (uv_timer_t*)malloc(sizeof(*once));
ASSERT(once != NULL);
r = uv_timer_init(once, once_close_cb, NULL);
r = uv_timer_init(once);
ASSERT(r == 0);
r = uv_timer_start(once, once_cb, i * 50, 0);
ASSERT(r == 0);
}
/* The 11th timer is a repeating timer that runs 4 times */
r = uv_timer_init(&repeat, repeat_close_cb, NULL);
r = uv_timer_init(&repeat);
ASSERT(r == 0);
r = uv_timer_start(&repeat, repeat_cb, 100, 100);
ASSERT(r == 0);
/* The 12th timer should not do anything. */
r = uv_timer_init(&never, never_close_cb, NULL);
r = uv_timer_init(&never);
ASSERT(r == 0);
r = uv_timer_start(&never, never_cb, 100, 100);
ASSERT(r == 0);

8
deps/uv/uv-common.c

@ -25,6 +25,14 @@
#include <stddef.h> /* NULL */
static uv_counters_t counters;
uv_counters_t* const uv_counters() {
return &counters;
}
const char* uv_err_name(uv_err_t err) {
switch (err.code) {
case UV_UNKNOWN: return "UNKNOWN";

54
deps/uv/uv-unix.c

@ -41,6 +41,7 @@
#include <sys/sysctl.h>
#endif
static uv_err_t last_err;
@ -132,11 +133,13 @@ struct sockaddr_in uv_ip4_addr(char* ip, int port) {
}
int uv_close(uv_handle_t* handle) {
int uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
uv_tcp_t* tcp;
uv_async_t* async;
uv_timer_t* timer;
handle->close_cb = close_cb;
switch (handle->type) {
case UV_TCP:
tcp = (uv_tcp_t*) handle;
@ -202,11 +205,10 @@ int uv_run() {
}
static void uv__handle_init(uv_handle_t* handle, uv_handle_type type,
uv_close_cb close_cb, void* data) {
static void uv__handle_init(uv_handle_t* handle, uv_handle_type type) {
uv_counters()->handle_init++;
handle->type = type;
handle->close_cb = close_cb;
handle->data = data;
handle->flags = 0;
ev_init(&handle->next_watcher, uv__next);
@ -217,8 +219,9 @@ static void uv__handle_init(uv_handle_t* handle, uv_handle_type type,
}
int uv_tcp_init(uv_tcp_t* tcp, uv_close_cb close_cb, void* data) {
uv__handle_init((uv_handle_t*)tcp, UV_TCP, close_cb, data);
int uv_tcp_init(uv_tcp_t* tcp) {
uv__handle_init((uv_handle_t*)tcp, UV_TCP);
uv_counters()->tcp_init++;
tcp->alloc_cb = NULL;
tcp->connect_req = NULL;
@ -363,13 +366,9 @@ void uv__server_io(EV_P_ ev_io* watcher, int revents) {
}
int uv_accept(uv_tcp_t* server, uv_tcp_t* client,
uv_close_cb close_cb, void* data) {
int uv_accept(uv_tcp_t* server, uv_tcp_t* client) {
if (server->accepted_fd < 0) {
return -1;
}
if (uv_tcp_init(client, close_cb, data)) {
uv_err_new((uv_handle_t*) server, EAGAIN);
return -1;
}
@ -469,7 +468,7 @@ void uv__finish_close(uv_handle_t* handle) {
ev_idle_stop(EV_DEFAULT_ &handle->next_watcher);
if (handle->close_cb) {
handle->close_cb(handle, 0);
handle->close_cb(handle);
}
ev_unref(EV_DEFAULT_UC);
@ -967,6 +966,7 @@ int uv_read_stop(uv_tcp_t* tcp) {
void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb) {
uv_counters()->req_init++;
req->type = UV_UNKNOWN_REQ;
req->cb = cb;
req->handle = handle;
@ -983,8 +983,9 @@ static void uv__prepare(EV_P_ ev_prepare* w, int revents) {
}
int uv_prepare_init(uv_prepare_t* prepare, uv_close_cb close_cb, void* data) {
uv__handle_init((uv_handle_t*)prepare, UV_PREPARE, close_cb, data);
int uv_prepare_init(uv_prepare_t* prepare) {
uv__handle_init((uv_handle_t*)prepare, UV_PREPARE);
uv_counters()->prepare_init++;
ev_prepare_init(&prepare->prepare_watcher, uv__prepare);
prepare->prepare_watcher.data = prepare;
@ -1032,8 +1033,9 @@ static void uv__check(EV_P_ ev_check* w, int revents) {
}
int uv_check_init(uv_check_t* check, uv_close_cb close_cb, void* data) {
uv__handle_init((uv_handle_t*)check, UV_CHECK, close_cb, data);
int uv_check_init(uv_check_t* check) {
uv__handle_init((uv_handle_t*)check, UV_CHECK);
uv_counters()->check_init;
ev_check_init(&check->check_watcher, uv__check);
check->check_watcher.data = check;
@ -1082,8 +1084,9 @@ static void uv__idle(EV_P_ ev_idle* w, int revents) {
int uv_idle_init(uv_idle_t* idle, uv_close_cb close_cb, void* data) {
uv__handle_init((uv_handle_t*)idle, UV_IDLE, close_cb, data);
int uv_idle_init(uv_idle_t* idle) {
uv__handle_init((uv_handle_t*)idle, UV_IDLE);
uv_counters()->idle_init++;
ev_idle_init(&idle->idle_watcher, uv__idle);
idle->idle_watcher.data = idle;
@ -1150,9 +1153,9 @@ static void uv__async(EV_P_ ev_async* w, int revents) {
}
int uv_async_init(uv_async_t* async, uv_async_cb async_cb,
uv_close_cb close_cb, void* data) {
uv__handle_init((uv_handle_t*)async, UV_ASYNC, close_cb, data);
int uv_async_init(uv_async_t* async, uv_async_cb async_cb) {
uv__handle_init((uv_handle_t*)async, UV_ASYNC);
uv_counters()->async_init++;
ev_async_init(&async->async_watcher, uv__async);
async->async_watcher.data = async;
@ -1185,8 +1188,9 @@ static void uv__timer_cb(EV_P_ ev_timer* w, int revents) {
}
int uv_timer_init(uv_timer_t* timer, uv_close_cb close_cb, void* data) {
uv__handle_init((uv_handle_t*)timer, UV_TIMER, close_cb, data);
int uv_timer_init(uv_timer_t* timer) {
uv__handle_init((uv_handle_t*)timer, UV_TIMER);
uv_counters()->timer_init++;
ev_init(&timer->timer_watcher, uv__timer_cb);
timer->timer_watcher.data = timer;

100
deps/uv/uv-win.c

@ -143,7 +143,7 @@ static LPFN_TRANSMITFILE pTransmitFile;
/* Binary tree used to keep the list of timers sorted. */
static int uv_timer_compare(uv_timer_t* handle1, uv_timer_t* handle2);
RB_HEAD(uv_timer_tree_s, uv_timer_s);
RB_PROTOTYPE_STATIC(uv_timer_tree_s, uv_timer_s, tree_entry, uv_timer_compare);
RB_PROTOTYPE_STATIC(uv_timer_tree_s, uv_timer_s, tree_entry, uv_timer_compare)
/* The head of the timers tree */
static struct uv_timer_tree_s uv_timers_ = RB_INITIALIZER(uv_timers_);
@ -410,6 +410,7 @@ void uv_init() {
void uv_req_init(uv_req_t* req, uv_handle_t* handle, void* cb) {
uv_counters()->req_init++;
req->type = UV_UNKNOWN_REQ;
req->flags = 0;
req->handle = handle;
@ -455,13 +456,13 @@ static uv_req_t* uv_remove_pending_req() {
}
static int uv_tcp_init_socket(uv_tcp_t* handle, uv_close_cb close_cb,
void* data, SOCKET socket) {
static int uv_tcp_init_socket(uv_tcp_t* handle, SOCKET socket) {
DWORD yes = 1;
uv_counters()->handle_init++;
uv_counters()->tcp_init++;
handle->socket = socket;
handle->close_cb = close_cb;
handle->data = data;
handle->write_queue_size = 0;
handle->type = UV_TCP;
handle->flags = 0;
@ -504,7 +505,7 @@ static void uv_tcp_init_connection(uv_tcp_t* handle) {
}
int uv_tcp_init(uv_tcp_t* handle, uv_close_cb close_cb, void* data) {
int uv_tcp_init(uv_tcp_t* handle) {
SOCKET sock;
sock = socket(AF_INET, SOCK_STREAM, 0);
@ -513,7 +514,7 @@ int uv_tcp_init(uv_tcp_t* handle, uv_close_cb close_cb, void* data) {
return -1;
}
if (uv_tcp_init_socket(handle, close_cb, data, sock) == -1) {
if (uv_tcp_init_socket(handle, sock) == -1) {
closesocket(sock);
return -1;
}
@ -553,8 +554,7 @@ static void uv_tcp_endgame(uv_tcp_t* handle) {
handle->flags |= UV_HANDLE_CLOSED;
if (handle->close_cb) {
uv_last_error_ = handle->error;
handle->close_cb((uv_handle_t*)handle, handle->error.code == UV_OK ? 0 : 1);
handle->close_cb((uv_handle_t*)handle);
}
uv_refs_--;
@ -568,7 +568,7 @@ static void uv_timer_endgame(uv_timer_t* handle) {
handle->flags |= UV_HANDLE_CLOSED;
if (handle->close_cb) {
handle->close_cb((uv_handle_t*)handle, 0);
handle->close_cb((uv_handle_t*)handle);
}
uv_refs_--;
@ -582,7 +582,7 @@ static void uv_loop_endgame(uv_handle_t* handle) {
handle->flags |= UV_HANDLE_CLOSED;
if (handle->close_cb) {
handle->close_cb(handle, 0);
handle->close_cb(handle);
}
uv_refs_--;
@ -597,7 +597,7 @@ static void uv_async_endgame(uv_async_t* handle) {
handle->flags |= UV_HANDLE_CLOSED;
if (handle->close_cb) {
handle->close_cb((uv_handle_t*)handle, 0);
handle->close_cb((uv_handle_t*)handle);
}
uv_refs_--;
@ -665,6 +665,7 @@ static int uv_close_error(uv_handle_t* handle, uv_err_t e) {
switch (handle->type) {
case UV_TCP:
tcp = (uv_tcp_t*)handle;
tcp->flags &= ~(UV_HANDLE_READING | UV_HANDLE_LISTENING);
closesocket(tcp->socket);
if (tcp->reqs_pending == 0) {
uv_want_endgame(handle);
@ -705,7 +706,8 @@ static int uv_close_error(uv_handle_t* handle, uv_err_t e) {
}
int uv_close(uv_handle_t* handle) {
int uv_close(uv_handle_t* handle, uv_close_cb close_cb) {
handle->close_cb = close_cb;
return uv_close_error(handle, uv_ok_);
}
@ -868,8 +870,7 @@ int uv_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) {
}
int uv_accept(uv_tcp_t* server, uv_tcp_t* client,
uv_close_cb close_cb, void* data) {
int uv_accept(uv_tcp_t* server, uv_tcp_t* client) {
int rv = 0;
if (server->accept_socket == INVALID_SOCKET) {
@ -877,7 +878,7 @@ int uv_accept(uv_tcp_t* server, uv_tcp_t* client,
return -1;
}
if (uv_tcp_init_socket(client, close_cb, data, server->accept_socket) == -1) {
if (uv_tcp_init_socket(client, server->accept_socket) == -1) {
closesocket(server->accept_socket);
rv = -1;
}
@ -1093,12 +1094,16 @@ static void uv_tcp_return_req(uv_tcp_t* handle, uv_req_t* req) {
case UV_READ:
if (req->error.code != UV_OK) {
/* An error occurred doing the 0-read. */
if (!(handle->flags & UV_HANDLE_READING)) {
break;
}
/* Stop reading and report error. */
handle->flags &= ~UV_HANDLE_READING;
uv_last_error_ = req->error;
buf.base = 0;
buf.len = 0;
((uv_read_cb)handle->read_cb)(handle, -1, buf);
handle->read_cb(handle, -1, buf);
break;
}
@ -1116,7 +1121,7 @@ static void uv_tcp_return_req(uv_tcp_t* handle, uv_req_t* req) {
NULL) != SOCKET_ERROR) {
if (bytes > 0) {
/* Successful read */
((uv_read_cb)handle->read_cb)(handle, bytes, buf);
handle->read_cb(handle, bytes, buf);
/* Read again only if bytes == buf.len */
if (bytes < buf.len) {
break;
@ -1127,7 +1132,7 @@ static void uv_tcp_return_req(uv_tcp_t* handle, uv_req_t* req) {
handle->flags |= UV_HANDLE_EOF;
uv_last_error_.code = UV_EOF;
uv_last_error_.sys_errno_ = ERROR_SUCCESS;
((uv_read_cb)handle->read_cb)(handle, -1, buf);
handle->read_cb(handle, -1, buf);
break;
}
} else {
@ -1135,18 +1140,17 @@ static void uv_tcp_return_req(uv_tcp_t* handle, uv_req_t* req) {
if (err == WSAEWOULDBLOCK) {
/* Read buffer was completely empty, report a 0-byte read. */
uv_set_sys_error(WSAEWOULDBLOCK);
((uv_read_cb)handle->read_cb)(handle, 0, buf);
handle->read_cb(handle, 0, buf);
} else {
/* Ouch! serious error. */
uv_set_sys_error(err);
((uv_read_cb)handle->read_cb)(handle, -1, buf);
handle->read_cb(handle, -1, buf);
}
break;
}
}
/* Post another 0-read if still reading and not closing. */
if (!(handle->flags & UV_HANDLE_CLOSING) &&
handle->flags & UV_HANDLE_READING) {
if (handle->flags & UV_HANDLE_READING) {
uv_queue_read(handle);
}
break;
@ -1157,10 +1161,13 @@ static void uv_tcp_return_req(uv_tcp_t* handle, uv_req_t* req) {
/* accepting connections and report this error to the connection */
/* callback. */
if (handle->accept_socket == INVALID_SOCKET) {
if (!(handle->flags & UV_HANDLE_LISTENING)) {
break;
}
handle->flags &= ~UV_HANDLE_LISTENING;
if (handle->connection_cb) {
uv_last_error_ = req->error;
((uv_connection_cb)handle->connection_cb)(handle, -1);
handle->connection_cb(handle, -1);
}
break;
}
@ -1173,14 +1180,14 @@ static void uv_tcp_return_req(uv_tcp_t* handle, uv_req_t* req) {
sizeof(handle->socket)) == 0) {
/* Accept and SO_UPDATE_ACCEPT_CONTEXT were successful. */
if (handle->connection_cb) {
((uv_connection_cb)handle->connection_cb)(handle, 0);
handle->connection_cb(handle, 0);
}
} else {
/* Error related to accepted socket is ignored because the server */
/* socket may still be healthy. If the server socket is broken
/* uv_queue_accept will detect it. */
closesocket(handle->accept_socket);
if (!(handle->flags & UV_HANDLE_CLOSING)) {
if (handle->flags & UV_HANDLE_LISTENING) {
uv_queue_accept(handle);
}
}
@ -1239,10 +1246,11 @@ static int uv_timer_compare(uv_timer_t* a, uv_timer_t* b) {
RB_GENERATE_STATIC(uv_timer_tree_s, uv_timer_s, tree_entry, uv_timer_compare);
int uv_timer_init(uv_timer_t* handle, uv_close_cb close_cb, void* data) {
int uv_timer_init(uv_timer_t* handle) {
uv_counters()->handle_init++;
uv_counters()->timer_init++;
handle->type = UV_TIMER;
handle->close_cb = (void*) close_cb;
handle->data = data;
handle->flags = 0;
handle->error = uv_ok_;
handle->timer_cb = NULL;
@ -1337,9 +1345,7 @@ int64_t uv_now() {
}
int uv_loop_init(uv_handle_t* handle, uv_close_cb close_cb, void* data) {
handle->close_cb = (void*) close_cb;
handle->data = data;
int uv_loop_init(uv_handle_t* handle) {
handle->flags = 0;
handle->error = uv_ok_;
@ -1410,26 +1416,32 @@ static void uv_loop_invoke(uv_handle_t* list) {
handle = uv_next_loop_handle_;
uv_next_loop_handle_ = handle->loop_next;
((uv_loop_cb)handle->loop_cb)(handle, 0);
handle->loop_cb(handle, 0);
}
}
int uv_prepare_init(uv_prepare_t* handle, uv_close_cb close_cb, void* data) {
int uv_prepare_init(uv_prepare_t* handle) {
uv_counters()->handle_init++;
uv_counters()->prepare_init++;
handle->type = UV_PREPARE;
return uv_loop_init((uv_handle_t*)handle, close_cb, data);
return uv_loop_init((uv_handle_t*)handle);
}
int uv_check_init(uv_check_t* handle, uv_close_cb close_cb, void* data) {
int uv_check_init(uv_check_t* handle) {
uv_counters()->handle_init++;
uv_counters()->check_init++;
handle->type = UV_CHECK;
return uv_loop_init((uv_handle_t*)handle, close_cb, data);
return uv_loop_init((uv_handle_t*)handle);
}
int uv_idle_init(uv_idle_t* handle, uv_close_cb close_cb, void* data) {
int uv_idle_init(uv_idle_t* handle) {
uv_counters()->handle_init++;
uv_counters()->idle_init++;
handle->type = UV_IDLE;
return uv_loop_init((uv_handle_t*)handle, close_cb, data);
return uv_loop_init((uv_handle_t*)handle);
}
@ -1483,13 +1495,13 @@ int uv_is_active(uv_handle_t* handle) {
}
int uv_async_init(uv_async_t* handle, uv_async_cb async_cb,
uv_close_cb close_cb, void* data) {
int uv_async_init(uv_async_t* handle, uv_async_cb async_cb) {
uv_req_t* req;
uv_counters()->handle_init++;
uv_counters()->async_init++;
handle->type = UV_ASYNC;
handle->close_cb = (void*) close_cb;
handle->data = data;
handle->flags = 0;
handle->async_sent = 0;
handle->error = uv_ok_;
@ -1587,7 +1599,7 @@ static void uv_process_timers() {
timer->flags &= ~UV_HANDLE_ACTIVE;
}
((uv_loop_cb) timer->timer_cb)((uv_handle_t*)timer, 0);
timer->timer_cb((uv_handle_t*) timer, 0);
}
}

8
deps/uv/uv-win.h

@ -55,13 +55,13 @@ typedef struct uv_buf_t {
#define uv_tcp_connection_fields \
uv_alloc_cb alloc_cb; \
void* read_cb; \
uv_read_cb read_cb; \
struct uv_req_s read_req; \
unsigned int write_reqs_pending; \
uv_req_t* shutdown_req;
#define uv_tcp_server_fields \
void *connection_cb; \
uv_connection_cb connection_cb; \
SOCKET accept_socket; \
struct uv_req_s accept_req; \
char accept_buffer[sizeof(struct sockaddr_storage) * 2 + 32];
@ -81,12 +81,12 @@ typedef struct uv_buf_t {
RB_ENTRY(uv_timer_s) tree_entry; \
int64_t due; \
int64_t repeat; \
void* timer_cb;
uv_loop_cb timer_cb;
#define UV_LOOP_PRIVATE_FIELDS \
uv_handle_t* loop_prev; \
uv_handle_t* loop_next; \
void* loop_cb;
uv_loop_cb loop_cb;
#define UV_ASYNC_PRIVATE_FIELDS \
struct uv_req_s async_req; \

44
deps/uv/uv.h

@ -57,15 +57,15 @@ typedef struct uv_req_s uv_req_t;
* user.
*/
typedef uv_buf_t (*uv_alloc_cb)(uv_tcp_t* tcp, size_t suggested_size);
typedef void (*uv_read_cb)(uv_tcp_t* tcp, int nread, uv_buf_t buf);
typedef void (*uv_read_cb)(uv_tcp_t* tcp, ssize_t nread, uv_buf_t buf);
typedef void (*uv_write_cb)(uv_req_t* req, int status);
typedef void (*uv_connect_cb)(uv_req_t* req, int status);
typedef void (*uv_shutdown_cb)(uv_req_t* req, int status);
typedef void (*uv_connection_cb)(uv_tcp_t* server, int status);
typedef void (*uv_close_cb)(uv_handle_t* handle, int status);
typedef void (*uv_close_cb)(uv_handle_t* handle);
/* TODO: do loop_cb and async_cb really need a status argument? */
typedef void (*uv_loop_cb)(uv_handle_t* handle, int status);
typedef void (*uv_async_cb)(uv_handle_t* handle, int stats);
typedef void (*uv_async_cb)(uv_handle_t* handle, int status);
/* Expand this list if necessary. */
@ -180,7 +180,7 @@ int uv_is_active(uv_handle_t* handle);
* Request handle to be closed. close_cb will be called asynchronously after
* this call. This MUST be called on each handle before memory is released.
*/
int uv_close(uv_handle_t* handle);
int uv_close(uv_handle_t* handle, uv_close_cb close_cb);
/*
@ -194,7 +194,7 @@ struct uv_tcp_s {
UV_TCP_PRIVATE_FIELDS
};
int uv_tcp_init(uv_tcp_t* handle, uv_close_cb close_cb, void* data);
int uv_tcp_init(uv_tcp_t* handle);
int uv_bind(uv_tcp_t* handle, struct sockaddr_in);
@ -204,9 +204,12 @@ int uv_shutdown(uv_req_t* req);
int uv_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb);
/* Call this after connection_cb. client does not need to be initialized. */
int uv_accept(uv_tcp_t* server, uv_tcp_t* client,
uv_close_cb close_cb, void* data);
/* This call is used in conjunction with uv_listen() to accept incoming TCP
* connections. Call uv_accept after receiving a uv_connection_cb to accept
* the connection. Before calling uv_accept use uv_tcp_init() must be
* called on the client. Non-zero return value indicates an error.
*/
int uv_accept(uv_tcp_t* server, uv_tcp_t* client);
/* Read data from an incoming stream. The callback will be made several
* several times until there is no more data to read or uv_read_stop is
@ -234,7 +237,7 @@ struct uv_prepare_s {
UV_PREPARE_PRIVATE_FIELDS
};
int uv_prepare_init(uv_prepare_t* prepare, uv_close_cb close_cb, void* data);
int uv_prepare_init(uv_prepare_t* prepare);
int uv_prepare_start(uv_prepare_t* prepare, uv_loop_cb cb);
@ -251,7 +254,7 @@ struct uv_check_s {
UV_CHECK_PRIVATE_FIELDS
};
int uv_check_init(uv_check_t* check, uv_close_cb close_cb, void* data);
int uv_check_init(uv_check_t* check);
int uv_check_start(uv_check_t* check, uv_loop_cb cb);
@ -269,7 +272,7 @@ struct uv_idle_s {
UV_IDLE_PRIVATE_FIELDS
};
int uv_idle_init(uv_idle_t* idle, uv_close_cb close_cb, void* data);
int uv_idle_init(uv_idle_t* idle);
int uv_idle_start(uv_idle_t* idle, uv_loop_cb cb);
@ -289,8 +292,7 @@ typedef struct {
UV_ASYNC_PRIVATE_FIELDS
} uv_async_t;
int uv_async_init(uv_async_t* async, uv_async_cb async_cb,
uv_close_cb close_cb, void* data);
int uv_async_init(uv_async_t* async, uv_async_cb async_cb);
int uv_async_send(uv_async_t* async);
@ -304,7 +306,7 @@ struct uv_timer_s {
UV_TIMER_PRIVATE_FIELDS
};
int uv_timer_init(uv_timer_t* timer, uv_close_cb close_cb, void* data);
int uv_timer_init(uv_timer_t* timer);
int uv_timer_start(uv_timer_t* timer, uv_loop_cb cb, int64_t timeout, int64_t repeat);
@ -368,6 +370,20 @@ union uv_any_handle {
uv_timer_t timer;
};
/* Diagnostic counters */
typedef struct {
uint64_t req_init;
uint64_t handle_init;
uint64_t tcp_init;
uint64_t prepare_init;
uint64_t check_init;
uint64_t idle_init;
uint64_t async_init;
uint64_t timer_init;
} uv_counters_t;
uv_counters_t* const uv_counters();
#ifdef __cplusplus
}
#endif

20
src/node.cc

@ -2384,36 +2384,36 @@ char** Init(int argc, char *argv[]) {
wsa_init();
#endif // __MINGW32__
uv_prepare_init(&node::prepare_tick_watcher, NULL, NULL);
uv_prepare_init(&node::prepare_tick_watcher);
uv_prepare_start(&node::prepare_tick_watcher, PrepareTick);
uv_unref();
uv_check_init(&node::check_tick_watcher, NULL, NULL);
uv_check_init(&node::check_tick_watcher);
uv_check_start(&node::check_tick_watcher, node::CheckTick);
uv_unref();
uv_idle_init(&node::tick_spinner, NULL, NULL);
uv_idle_init(&node::tick_spinner);
uv_unref();
uv_check_init(&node::gc_check, NULL, NULL);
uv_check_init(&node::gc_check);
uv_check_start(&node::gc_check, node::Check);
uv_unref();
uv_idle_init(&node::gc_idle, NULL, NULL);
uv_idle_init(&node::gc_idle);
uv_unref();
uv_timer_init(&node::gc_timer, NULL, NULL);
uv_timer_init(&node::gc_timer);
uv_unref();
// Setup the EIO thread pool. It requires 3, yes 3, watchers.
{
uv_idle_init(&node::eio_poller, NULL, NULL);
uv_idle_init(&node::eio_poller);
uv_idle_start(&eio_poller, node::DoPoll);
uv_async_init(&node::eio_want_poll_notifier, node::WantPollNotifier, NULL, NULL);
uv_async_init(&node::eio_want_poll_notifier, node::WantPollNotifier);
uv_unref();
uv_async_init(&node::eio_done_poll_notifier, node::DonePollNotifier, NULL, NULL);
uv_async_init(&node::eio_done_poll_notifier, node::DonePollNotifier);
uv_unref();
eio_init(node::EIOWantPoll, node::EIODonePoll);
@ -2432,7 +2432,7 @@ char** Init(int argc, char *argv[]) {
// main thread to execute a random bit of javascript - which will give V8
// control so it can handle whatever new message had been received on the
// debug thread.
uv_async_init(&node::debug_watcher, node::DebugMessageCallback, NULL, NULL);
uv_async_init(&node::debug_watcher, node::DebugMessageCallback);
// unref it so that we exit the event loop despite it being active.
uv_unref();

7
src/timer_wrap.cc

@ -85,7 +85,8 @@ class TimerWrap {
TimerWrap(Handle<Object> object) {
active_ = false;
int r = uv_timer_init(&handle_, OnClose, this);
int r = uv_timer_init(&handle_);
handle_.data = this;
assert(r == 0); // How do we proxy this error up to javascript?
// Suggestion: uv_timer_init() returns void.
assert(object_.IsEmpty());
@ -122,7 +123,7 @@ class TimerWrap {
}
// Free the C++ object on the close callback.
static void OnClose(uv_handle_t* handle, int status) {
static void OnClose(uv_handle_t* handle) {
TimerWrap* wrap = static_cast<TimerWrap*>(handle->data);
delete wrap;
}
@ -203,7 +204,7 @@ class TimerWrap {
UNWRAP
int r = uv_close((uv_handle_t*) &wrap->handle_);
int r = uv_close((uv_handle_t*) &wrap->handle_, OnClose);
if (r) SetErrno(uv_last_error().code);

1
wscript

@ -442,7 +442,6 @@ def configure(conf):
# LFS
conf.env.append_value('CPPFLAGS', '-D_LARGEFILE_SOURCE')
conf.env.append_value('CPPFLAGS', '-D_FILE_OFFSET_BITS=64')
conf.env.append_value('CPPFLAGS', '-DEV_MULTIPLICITY=0')
# Makes select on windows support more than 64 FDs
if sys.platform.startswith("win32"):

Loading…
Cancel
Save