Browse Source

Upgrade libuv to e5f513c

v0.7.4-release
Ryan Dahl 13 years ago
parent
commit
bf6b5299b4
  1. 1
      deps/uv/config-mingw.mk
  2. 159
      deps/uv/doc/desired-api.md
  3. 5
      deps/uv/src/eio/eio.c
  4. 4
      deps/uv/src/win/pipe.c
  5. 97
      deps/uv/test/benchmark-pound.c

1
deps/uv/config-mingw.mk

@ -46,7 +46,6 @@ src/uv-common.o: src/uv-common.c include/uv.h include/uv-win.h
$(CC) $(CFLAGS) -c src/uv-common.c -o src/uv-common.o
EIO_CPPFLAGS += $(CPPFLAGS)
EIO_CPPFLAGS += -DEIO_CONFIG_H=\"$(EIO_CONFIG)\"
EIO_CPPFLAGS += -DEIO_STACKSIZE=65536
EIO_CPPFLAGS += -D_GNU_SOURCE

159
deps/uv/doc/desired-api.md

@ -1,159 +0,0 @@
Warning: this is not actual API but desired API.
# `uv_handle_t`
This is the abstract base class of all types of handles. All handles have in
common:
* When handles are initialized, the reference count to the event loop is
increased by one.
* The user owns the `uv_handle_t` memory and is in charge of freeing it.
* In order to free resources associated with a handle, one must `uv_close()`
and wait for the `uv_close_cb` callback. After the close callback has been
made, the user is allowed to the `uv_handle_t` object.
* The `uv_close_cb` is always made directly off the event loop. That is, it
is not called from `uv_close()`.
# `uv_tcp_server_t`
A TCP server class that is a subclass of `uv_handle_t`. This can be bound to
an address and begin accepting new TCP sockets.
int uv_bind4(uv_tcp_server_t* tcp_server, struct sockaddr_in* address);
int uv_bind6(uv_tcp_server_t* tcp_server, struct sockaddr_in6* address);
Binds the TCP server to an address. The `address` can be created with
`uv_ip4_addr()`. Call this before `uv_listen()`
Returns zero on success, -1 on failure. Errors in order of least-seriousness:
* `UV_EADDRINUSE` There is already another socket bound to the specified
address.
* `UV_EADDRNOTAVAIL` The `address` parameter is an IP address that is not
* `UV_EINVAL` The server is already bound to an address.
* `UV_EFAULT` Memory of `address` parameter is unintelligible.
int uv_listen(uv_tcp_server_t*, int backlog, uv_connection_cb cb);
Begins listening for connections. The accept callback is level-triggered.
int uv_accept(uv_tcp_server_t* server,
uv_tcp_t* client);
Accepts a connection. This should be called after the accept callback is
made. The `client` parameter should be uninitialized memory; `uv_accept` is
used instead of `uv_tcp_init` for server-side `uv_tcp_t` initialization.
Return value 0 indicates success, -1 failure. Possible errors:
* `UV_EAGAIN` There are no connections. Wait for the `uv_connection_cb` callback
to be called again.
* `UV_EFAULT` The memory of either `server` is unintelligible.
# `uv_stream_t`
An abstract subclass of `uv_handle_t`. Streams represent something that
reads and/or writes data. Streams can be half or full-duplex. TCP sockets
are streams, files are streams with offsets.
int uv_read_start(uv_stream_t* stream,
uv_alloc_cb alloc_cb,
uv_read_cb read_cb);
Starts the stream reading continuously. The `alloc_cb` is used to allow the
user to implement various means of supplying the stream with buffers to
fill. The `read_cb` returns buffers to the user filled with data.
Sometimes the buffers returned to the user do not contain data. This does
not indicate EOF as in other systems. EOF is made via the `uv_eof_cb` which
can be set like this `uv_set_eof_cb(stream, eof_cb);`
int uv_read_stop(uv_stream_t* stream);
Stops reading from the stream.
int uv_write_req_init(uv_write_req_t*,
uv_stream_t*,
uv_buf_t bufs[],
int butcnf);
Initiates a write request on a stream.
int uv_shutdown_req_init(uv_shutdown_req_t*, uv_stream_t*)
Initiates a shutdown of outgoing data once the write queue drains.
# `uv_tcp_t`
The TCP handle class represents one endpoint of a duplex TCP stream.
`uv_tcp_t` is a subclass of `uv_stream_t`. A TCP handle can represent a
client side connection (one that has been used with uv_connect_req_init`)
or a server-side connection (one that was initialized with `uv_accept`)
int uv_connect_req_init(uv_connect_req_t* req,
uv_tcp_t* socket,
struct sockaddr* addr);
Initiates a request to open a connection.
# `uv_req_t`
Abstract class represents an asynchronous request. This is a subclass of `uv_handle_t`.
# `uv_connect_req_t`
Subclass of `uv_req_t`. Represents a request for a TCP connection. Operates
on `uv_tcp_t` handles. Like other types of requests the `close_cb` indicates
completion of the request.
int uv_connect_req_init(uv_connect_req_t* req,
uv_tcp_t* socket,
struct sockaddr* addr);
Initializes the connection request. Returning 0 indicates success, -1 if
there was an error. The following values can be retrieved from
`uv_last_error` in the case of an error:
* ???
# `uv_shutdown_req_t`
Subclass of `uv_req_t`. Represents an ongoing shutdown request. Once the
write queue of the parent `uv_stream_t` is drained, the outbound data
channel is shutdown. Once a shutdown request is initiated on a stream, the
stream will allow no more writes.
int uv_shutdown_req_init(uv_shutdown_req_t*,
uv_stream_t* parent);
Initializes the shutdown request.
# `uv_write_req_t`
int uv_write_req_init(uv_write_req_t*,
uv_stream_t*,
uv_buf_t bufs[],
int butcnf);
Initiates a write request on a stream.

5
deps/uv/src/eio/eio.c

@ -110,6 +110,8 @@ static void eio_destroy (eio_req *req);
#ifdef _WIN32
#include <direct.h>
#undef PAGESIZE
#define PAGESIZE 4096 /* GetSystemInfo? */
@ -140,6 +142,9 @@ static void eio_destroy (eio_req *req);
#define statvfs(path,buf) EIO_ENOSYS ()
#define fstatvfs(fd,buf) EIO_ENOSYS ()
#define getcwd(buf,s) _getcwd(buf, s)
#define rmdir(path) _rmdir(path)
/* rename() uses MoveFile, which fails to overwrite */
#define rename(old,neu) eio__rename (old, neu)

4
deps/uv/src/win/pipe.c

@ -317,8 +317,6 @@ static DWORD WINAPI pipe_connect_thread_proc(void* parameter) {
uv_fatal_error(GetLastError(), "PostQueuedCompletionStatus");
}
handle->reqs_pending++;
return 0;
}
@ -363,6 +361,8 @@ int uv_pipe_connect(uv_connect_t* req, uv_pipe_t* handle,
goto error;
}
handle->reqs_pending++;
return 0;
}

97
deps/uv/test/benchmark-pound.c

@ -22,6 +22,11 @@
#include "task.h"
#include "uv.h"
/* Update this is you're going to run > 1000 concurrent requests. */
#define MAX_CONNS 1000
#define NANOSEC ((uint64_t)10e8)
/* Base class for tcp_conn_rec and pipe_conn_rec.
* The ordering of fields matters!
*/
@ -45,8 +50,13 @@ typedef struct {
static char buffer[] = "QS";
static int64_t start_time, end_time;
static int closed_streams, concurrency;
static tcp_conn_rec tcp_conns[MAX_CONNS];
static pipe_conn_rec pipe_conns[MAX_CONNS];
static uint64_t start_time;
static uint64_t end_time;
static int closed_streams;
static int conns_failed;
typedef void *(*setup_fn)(int num, void* arg);
typedef int (*connect_fn)(int num, void* handles, void* arg);
@ -71,6 +81,12 @@ static void connect_cb(uv_connect_t* req, int status) {
uv_buf_t buf;
int r;
if (status != 0) {
uv_close((uv_handle_t*)req->handle, close_cb);
conns_failed++;
return;
}
ASSERT(req != NULL);
ASSERT(status == 0);
@ -98,63 +114,34 @@ static void read_cb(uv_stream_t* stream, ssize_t nread, uv_buf_t buf) {
static void close_cb(uv_handle_t* handle) {
ASSERT(handle != NULL);
closed_streams++;
if (closed_streams == concurrency) {
uv_update_time();
end_time = uv_now();
}
}
static void* tcp_do_setup(int num, void* arg) {
tcp_conn_rec* conns;
tcp_conn_rec* pe;
tcp_conn_rec* p;
int r;
concurrency = num;
closed_streams = 0;
conns = calloc(num, sizeof(tcp_conn_rec));
ASSERT(conns != NULL);
for (p = conns, pe = p + num; p < pe; p++) {
for (p = tcp_conns, pe = p + num; p < pe; p++) {
r = uv_tcp_init(&p->stream);
ASSERT(r == 0);
p->stream.data = p;
p->conn_req.data = p;
p->write_req.data = p;
p->conn_req.handle = (uv_stream_t*)&p->stream;
p->write_req.handle = (uv_stream_t*)&p->stream;
}
return conns;
return tcp_conns;
}
static void* pipe_do_setup(int num, void* arg) {
pipe_conn_rec* conns;
pipe_conn_rec* pe;
pipe_conn_rec* p;
int r;
concurrency = num;
closed_streams = 0;
conns = calloc(num, sizeof(pipe_conn_rec));
ASSERT(conns != NULL);
for (p = conns, pe = p + num; p < pe; p++) {
for (p = pipe_conns, pe = p + num; p < pe; p++) {
r = uv_pipe_init(&p->stream);
ASSERT(r == 0);
p->stream.data = p;
p->conn_req.data = p;
p->write_req.data = p;
p->conn_req.handle = (uv_stream_t*)&p->stream;
p->write_req.handle = (uv_stream_t*)&p->stream;
}
return conns;
return pipe_conns;
}
@ -165,9 +152,11 @@ static int tcp_do_connect(int num, void* conns, void* arg) {
int r;
addr = uv_ip4_addr("127.0.0.1", TEST_PORT);
for (p = conns, pe = p + num; p < pe; p++) {
for (p = tcp_conns, pe = p + num; p < pe; p++) {
r = uv_tcp_connect(&p->conn_req, &p->stream, addr, connect_cb);
ASSERT(r == 0);
p->conn_req.data = p;
}
return 0;
@ -179,9 +168,11 @@ static int pipe_do_connect(int num, void* conns, void* arg) {
pipe_conn_rec* p;
int r;
for (p = conns, pe = p + num; p < pe; p++) {
for (p = pipe_conns, pe = p + num; p < pe; p++) {
r = uv_pipe_connect(&p->conn_req, &p->stream, TEST_PIPENAME, connect_cb);
ASSERT(r == 0);
p->conn_req.data = p;
}
return 0;
@ -193,25 +184,35 @@ static int pound_it(int concurrency,
setup_fn do_setup,
connect_fn do_connect,
void* arg) {
double secs;
void* state;
int r;
uv_init();
state = do_setup(concurrency, arg);
ASSERT(state != NULL);
/* Run benchmark for at least five seconds. */
start_time = uv_hrtime();
do {
state = do_setup(concurrency, arg);
ASSERT(state != NULL);
uv_update_time();
start_time = uv_now();
r = do_connect(concurrency, state, arg);
ASSERT(!r);
r = do_connect(concurrency, state, arg);
ASSERT(!r);
uv_run();
uv_run();
end_time = uv_hrtime();
}
while ((end_time - start_time) < 5 * NANOSEC);
LOGF("%s-conn-pound-%d: %.0f accepts/s\n",
type, concurrency,
(double) concurrency / (double) (end_time - start_time) * 1000.0);
/* Number of fractional seconds it took to run the benchmark. */
secs = (double)(end_time - start_time) / NANOSEC;
LOGF("%s-conn-pound-%d: %.0f accepts/s (%d failed)\n",
type,
concurrency,
closed_streams / secs,
conns_failed);
return 0;
}
@ -234,4 +235,4 @@ BENCHMARK_IMPL(pipe_pound_100) {
BENCHMARK_IMPL(pipe_pound_1000) {
return pound_it(1000, "pipe", pipe_do_setup, pipe_do_connect, NULL);
}
}

Loading…
Cancel
Save