Browse Source

deps: update libuv to 0.10.37

Fixes: https://github.com/nodejs/node/issues/7199
Refs: https://github.com/nodejs/node/pull/2723
PR-URL: https://github.com/nodejs/node/pull/7293
Reviewed-By: Rod Vagg <rod@vagg.org>
v0.10
Saúl Ibarra Corretgé 8 years ago
committed by Rod Vagg
parent
commit
3374f57973
  1. 1
      deps/uv/AUTHORS
  2. 27
      deps/uv/ChangeLog
  3. 11
      deps/uv/README.md
  4. 2
      deps/uv/config-unix.mk
  5. 14
      deps/uv/include/uv-private/ngx-queue.h
  6. 18
      deps/uv/include/uv-private/uv-win.h
  7. 8
      deps/uv/src/unix/async.c
  8. 8
      deps/uv/src/unix/darwin.c
  9. 1
      deps/uv/src/unix/error.c
  10. 6
      deps/uv/src/unix/fsevents.c
  11. 2
      deps/uv/src/unix/linux-core.c
  12. 9
      deps/uv/src/unix/linux-inotify.c
  13. 7
      deps/uv/src/unix/loop-watcher.c
  14. 2
      deps/uv/src/unix/signal.c
  15. 33
      deps/uv/src/unix/thread.c
  16. 7
      deps/uv/src/unix/threadpool.c
  17. 9
      deps/uv/src/uv-common.c
  18. 2
      deps/uv/src/version.c
  19. 255
      deps/uv/src/win/thread.c
  20. 28
      deps/uv/src/win/winapi.c
  21. 28
      deps/uv/src/win/winapi.h
  22. 13
      deps/uv/test/test-fs.c
  23. 2
      deps/uv/test/test-list.h
  24. 59
      deps/uv/test/test-mutexes.c
  25. 6
      deps/uv/test/test-tcp-close-while-connecting.c
  26. 2
      deps/uv/test/test-tcp-connect-timeout.c
  27. 2
      deps/uv/test/test-udp-multicast-join.c
  28. 6
      deps/uv/test/test-udp-multicast-ttl.c
  29. 4
      deps/uv/vcbuild.bat

1
deps/uv/AUTHORS

@ -135,3 +135,4 @@ Helge Deller <deller@gmx.de>
Logan Rosen <loganrosen@gmail.com>
Kenneth Perry <thothonegan@gmail.com>
Michael Penick <michael.penick@datastax.com>
Stephen von Takach <steve@advancedcontrol.com.au>

27
deps/uv/ChangeLog

@ -1,4 +1,29 @@
2015.02.27, Version 0.10.36 (Stable)
2016.06.14, Version 0.10.37 (Stable)
Changes since version 0.10.36:
* build: update the location of gyp (Stephen von Takach)
* linux: fix epoll_pwait() fallback on arm64 (Ben Noordhuis)
* test: fix fs_chown when running as root (Ben Noordhuis)
* tests: skip some tests when network is unreachable (Luca Bruno)
* unix: do not discard environmental LDFLAGS (Luca Bruno)
* src: replace ngx_queue_split with ngx_queue_move (Ben Noordhuis)
* unix: use ngx_queue_move when iterating over lists (Ben Noordhuis)
* win: fix unsavory rwlock fallback implementation (Bert Belder)
* unix: map ENFILE errno (Saúl Ibarra Corretgé)
* doc: add note indicating branch status (Saúl Ibarra Corretgé)
2015.02.27, Version 0.10.36 (Stable), cc4d42a89a2a0ae0ff8e14321de086eba3c3b4ca
Changes since version 0.10.35:

11
deps/uv/README.md

@ -6,6 +6,9 @@ eventually contain all platform differences in this library.
http://nodejs.org/
**This branch only receives security fixes and will be EOL'd by the end of 2016,
please switch to version v1.x**
## Features
* Non-blocking TCP sockets
@ -81,13 +84,7 @@ To have GYP generate build script for another system, make sure that
you have Python 2.6 or 2.7 installed, then checkout GYP into the
project tree manually:
mkdir -p build
svn co http://gyp.googlecode.com/svn/trunk build/gyp
Or:
mkdir -p build
git clone https://git.chromium.org/external/gyp.git build/gyp
git clone https://chromium.googlesource.com/external/gyp.git build/gyp
Unix users run

2
deps/uv/config-unix.mk

@ -22,7 +22,7 @@ E=
CSTDFLAG=--std=c89 -pedantic -Wall -Wextra -Wno-unused-parameter
CFLAGS += -g
CPPFLAGS += -I$(SRCDIR)/src
LDFLAGS=-lm -pthread
LDFLAGS += -lm -pthread
CPPFLAGS += -D_LARGEFILE_SOURCE
CPPFLAGS += -D_FILE_OFFSET_BITS=64

14
deps/uv/include/uv-private/ngx-queue.h

@ -106,6 +106,17 @@ struct ngx_queue_s {
while (0)
#define ngx_queue_move(h, n) \
do { \
if (ngx_queue_empty(h)) \
ngx_queue_init(n); \
else { \
ngx_queue_t* q = ngx_queue_head(h); \
ngx_queue_split(h, q, n); \
} \
} \
while (0)
#define ngx_queue_add(h, n) \
do { \
(h)->prev->next = (n)->next; \
@ -120,6 +131,9 @@ struct ngx_queue_s {
((type *) ((unsigned char *) q - offsetof(type, link)))
/* Important note: mutating the list while ngx_queue_foreach is
* iterating over its elements results in undefined behavior.
*/
#define ngx_queue_foreach(q, h) \
for ((q) = ngx_queue_head(h); \
(q) != ngx_queue_sentinel(h) && !ngx_queue_empty(h); \

18
deps/uv/include/uv-private/uv-win.h

@ -235,14 +235,20 @@ typedef union {
} uv_cond_t;
typedef union {
/* srwlock_ has type SRWLOCK, but not all toolchains define this type in */
/* windows.h. */
SRWLOCK srwlock_;
struct {
uv_mutex_t read_mutex_;
uv_mutex_t write_mutex_;
unsigned int num_readers_;
} fallback_;
CRITICAL_SECTION num_readers_lock_;
HANDLE write_semaphore_;
} state_;
/* TODO: remove me in v2.x. */
struct {
SRWLOCK unused_;
} unused1_;
/* TODO: remove me in v2.x. */
struct {
uv_mutex_t unused1_;
uv_mutex_t unused2_;
} unused2_;
} uv_rwlock_t;
typedef struct {

8
deps/uv/src/unix/async.c

@ -74,12 +74,18 @@ void uv__async_close(uv_async_t* handle) {
static void uv__async_event(uv_loop_t* loop,
struct uv__async* w,
unsigned int nevents) {
ngx_queue_t queue;
ngx_queue_t* q;
uv_async_t* h;
ngx_queue_foreach(q, &loop->async_handles) {
ngx_queue_move(&loop->async_handles, &queue);
while (!ngx_queue_empty(&queue)) {
q = ngx_queue_head(&queue);
h = ngx_queue_data(q, uv_async_t, queue);
ngx_queue_remove(q);
ngx_queue_insert_tail(&loop->async_handles, q);
if (cmpxchgi(&h->pending, 1, 0) == 0)
continue;

8
deps/uv/src/unix/darwin.c

@ -133,15 +133,12 @@ static void uv__cf_loop_cb(void* arg) {
loop = arg;
uv_mutex_lock(&loop->cf_mutex);
ngx_queue_init(&split_head);
if (!ngx_queue_empty(&loop->cf_signals)) {
ngx_queue_t* split_pos = ngx_queue_next(&loop->cf_signals);
ngx_queue_split(&loop->cf_signals, split_pos, &split_head);
}
ngx_queue_move(&loop->cf_signals, &split_head);
uv_mutex_unlock(&loop->cf_mutex);
while (!ngx_queue_empty(&split_head)) {
item = ngx_queue_head(&split_head);
ngx_queue_remove(item);
s = ngx_queue_data(item, uv__cf_loop_signal_t, member);
@ -151,7 +148,6 @@ static void uv__cf_loop_cb(void* arg) {
else
s->cb(s->arg);
ngx_queue_remove(item);
free(s);
}
}

1
deps/uv/src/unix/error.c

@ -110,6 +110,7 @@ uv_err_code uv_translate_sys_error(int sys_errno) {
case ERANGE: return UV_ERANGE;
case ENXIO: return UV_ENXIO;
case EMLINK: return UV_EMLINK;
case ENFILE: return UV_ENFILE;
default: return UV_UNKNOWN;
}
UNREACHABLE();

6
deps/uv/src/unix/fsevents.c

@ -55,11 +55,7 @@ struct uv__fsevents_event_s {
ngx_queue_t split_head; \
uv__fsevents_event_t* event; \
uv_mutex_lock(&(handle)->cf_mutex); \
ngx_queue_init(&split_head); \
if (!ngx_queue_empty(&(handle)->cf_events)) { \
ngx_queue_t* split_pos = ngx_queue_next(&(handle)->cf_events); \
ngx_queue_split(&(handle)->cf_events, split_pos, &split_head); \
} \
ngx_queue_move(&(handle)->cf_events, &split_head); \
uv_mutex_unlock(&(handle)->cf_mutex); \
while (!ngx_queue_empty(&split_head)) { \
curr = ngx_queue_head(&split_head); \

2
deps/uv/src/unix/linux-core.c

@ -199,7 +199,7 @@ void uv__io_poll(uv_loop_t* loop, int timeout) {
if (pthread_sigmask(SIG_BLOCK, &sigset, NULL))
abort();
if (sigmask != 0 && no_epoll_pwait == 0) {
if (no_epoll_wait != 0 || (sigmask != 0 && no_epoll_pwait == 0)) {
nfds = uv__epoll_pwait(loop->backend_fd,
events,
ARRAY_SIZE(events),

9
deps/uv/src/unix/linux-inotify.c

@ -119,6 +119,7 @@ static void uv__inotify_read(uv_loop_t* loop,
const struct uv__inotify_event* e;
struct watcher_list* w;
uv_fs_event_t* h;
ngx_queue_t queue;
ngx_queue_t* q;
const char* path;
ssize_t size;
@ -158,8 +159,14 @@ static void uv__inotify_read(uv_loop_t* loop,
*/
path = e->len ? (const char*) (e + 1) : basename_r(w->path);
ngx_queue_foreach(q, &w->watchers) {
ngx_queue_move(&w->watchers, &queue);
while (!ngx_queue_empty(&queue)) {
q = ngx_queue_head(&queue);
h = ngx_queue_data(q, uv_fs_event_t, watchers);
ngx_queue_remove(q);
ngx_queue_insert_tail(&w->watchers, q);
h->cb(h, path, events, 0);
}
}

7
deps/uv/src/unix/loop-watcher.c

@ -48,9 +48,14 @@
\
void uv__run_##name(uv_loop_t* loop) { \
uv_##name##_t* h; \
ngx_queue_t queue; \
ngx_queue_t* q; \
ngx_queue_foreach(q, &loop->name##_handles) { \
ngx_queue_move(&loop->name##_handles, &queue); \
while (!ngx_queue_empty(&queue)) { \
q = ngx_queue_head(&queue); \
h = ngx_queue_data(q, uv_##name##_t, queue); \
ngx_queue_remove(q); \
ngx_queue_insert_tail(&loop->name##_handles, q); \
h->name##_cb(h, 0); \
} \
} \

2
deps/uv/src/unix/signal.c

@ -231,6 +231,8 @@ void uv__signal_loop_cleanup(uv_loop_t* loop) {
/* Stop all the signal watchers that are still attached to this loop. This
* ensures that the (shared) signal tree doesn't contain any invalid entries
* entries, and that signal handlers are removed when appropriate.
* It's safe to use QUEUE_FOREACH here because the handles and the handle
* queue are not modified by uv__signal_stop().
*/
ngx_queue_foreach(q, &loop->handle_queue) {
uv_handle_t* handle = ngx_queue_data(q, uv_handle_t, handle_queue);

33
deps/uv/src/unix/thread.c

@ -84,13 +84,12 @@ int uv_mutex_trylock(uv_mutex_t* mutex) {
r = pthread_mutex_trylock(mutex);
if (r && r != EBUSY && r != EAGAIN)
abort();
if (r)
if (r) {
if (r != EBUSY && r != EAGAIN)
abort();
return -1;
else
return 0;
}
return 0;
}
@ -125,13 +124,12 @@ int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
r = pthread_rwlock_tryrdlock(rwlock);
if (r && r != EBUSY && r != EAGAIN)
abort();
if (r)
if (r) {
if (r != EBUSY && r != EAGAIN)
abort();
return -1;
else
return 0;
}
return 0;
}
@ -152,13 +150,12 @@ int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
r = pthread_rwlock_trywrlock(rwlock);
if (r && r != EBUSY && r != EAGAIN)
abort();
if (r)
if (r) {
if (r != EBUSY && r != EAGAIN)
abort();
return -1;
else
return 0;
}
return 0;
}

7
deps/uv/src/unix/threadpool.c

@ -202,13 +202,8 @@ void uv__work_done(uv_async_t* handle, int status) {
int err;
loop = container_of(handle, uv_loop_t, wq_async);
ngx_queue_init(&wq);
uv_mutex_lock(&loop->wq_mutex);
if (!ngx_queue_empty(&loop->wq)) {
q = ngx_queue_head(&loop->wq);
ngx_queue_split(&loop->wq, q, &wq);
}
ngx_queue_move(&loop->wq, &wq);
uv_mutex_unlock(&loop->wq_mutex);
while (!ngx_queue_empty(&wq)) {

9
deps/uv/src/uv-common.c

@ -362,11 +362,18 @@ unsigned long uv_thread_self(void) {
void uv_walk(uv_loop_t* loop, uv_walk_cb walk_cb, void* arg) {
ngx_queue_t queue;
ngx_queue_t* q;
uv_handle_t* h;
ngx_queue_foreach(q, &loop->handle_queue) {
ngx_queue_move(&loop->handle_queue, &queue);
while (!ngx_queue_empty(&queue)) {
q = ngx_queue_head(&queue);
h = ngx_queue_data(q, uv_handle_t, handle_queue);
ngx_queue_remove(q);
ngx_queue_insert_tail(&loop->handle_queue, q);
if (h->flags & UV__HANDLE_INTERNAL) continue;
walk_cb(h, arg);
}

2
deps/uv/src/version.c

@ -34,7 +34,7 @@
#define UV_VERSION_MAJOR 0
#define UV_VERSION_MINOR 10
#define UV_VERSION_PATCH 36
#define UV_VERSION_PATCH 37
#define UV_VERSION_IS_RELEASE 1

255
deps/uv/src/win/thread.c

@ -26,7 +26,6 @@
#include "internal.h"
#define HAVE_SRWLOCK_API() (pTryAcquireSRWLockShared != NULL)
#define HAVE_CONDVAR_API() (pInitializeConditionVariable != NULL)
#ifdef _MSC_VER /* msvc */
@ -38,25 +37,6 @@
#endif
inline static int uv__rwlock_srwlock_init(uv_rwlock_t* rwlock);
inline static void uv__rwlock_srwlock_destroy(uv_rwlock_t* rwlock);
inline static void uv__rwlock_srwlock_rdlock(uv_rwlock_t* rwlock);
inline static int uv__rwlock_srwlock_tryrdlock(uv_rwlock_t* rwlock);
inline static void uv__rwlock_srwlock_rdunlock(uv_rwlock_t* rwlock);
inline static void uv__rwlock_srwlock_wrlock(uv_rwlock_t* rwlock);
inline static int uv__rwlock_srwlock_trywrlock(uv_rwlock_t* rwlock);
inline static void uv__rwlock_srwlock_wrunlock(uv_rwlock_t* rwlock);
inline static int uv__rwlock_fallback_init(uv_rwlock_t* rwlock);
inline static void uv__rwlock_fallback_destroy(uv_rwlock_t* rwlock);
inline static void uv__rwlock_fallback_rdlock(uv_rwlock_t* rwlock);
inline static int uv__rwlock_fallback_tryrdlock(uv_rwlock_t* rwlock);
inline static void uv__rwlock_fallback_rdunlock(uv_rwlock_t* rwlock);
inline static void uv__rwlock_fallback_wrlock(uv_rwlock_t* rwlock);
inline static int uv__rwlock_fallback_trywrlock(uv_rwlock_t* rwlock);
inline static void uv__rwlock_fallback_wrunlock(uv_rwlock_t* rwlock);
inline static int uv_cond_fallback_init(uv_cond_t* cond);
inline static void uv_cond_fallback_destroy(uv_cond_t* cond);
inline static void uv_cond_fallback_signal(uv_cond_t* cond);
@ -158,68 +138,112 @@ void uv_mutex_unlock(uv_mutex_t* mutex) {
int uv_rwlock_init(uv_rwlock_t* rwlock) {
uv__once_init();
/* Initialize the semaphore that acts as the write lock. */
HANDLE handle = CreateSemaphoreW(NULL, 1, 1, NULL);
if (handle == NULL)
return -1;
rwlock->state_.write_semaphore_ = handle;
if (HAVE_SRWLOCK_API())
return uv__rwlock_srwlock_init(rwlock);
else
return uv__rwlock_fallback_init(rwlock);
/* Initialize the critical section protecting the reader count. */
InitializeCriticalSection(&rwlock->state_.num_readers_lock_);
/* Initialize the reader count. */
rwlock->state_.num_readers_ = 0;
return 0;
}
void uv_rwlock_destroy(uv_rwlock_t* rwlock) {
if (HAVE_SRWLOCK_API())
uv__rwlock_srwlock_destroy(rwlock);
else
uv__rwlock_fallback_destroy(rwlock);
DeleteCriticalSection(&rwlock->state_.num_readers_lock_);
CloseHandle(rwlock->state_.write_semaphore_);
}
void uv_rwlock_rdlock(uv_rwlock_t* rwlock) {
if (HAVE_SRWLOCK_API())
uv__rwlock_srwlock_rdlock(rwlock);
else
uv__rwlock_fallback_rdlock(rwlock);
/* Acquire the lock that protects the reader count. */
EnterCriticalSection(&rwlock->state_.num_readers_lock_);
/* Increase the reader count, and lock for write if this is the first
* reader.
*/
if (++rwlock->state_.num_readers_ == 1) {
DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, INFINITE);
if (r != WAIT_OBJECT_0)
uv_fatal_error(GetLastError(), "WaitForSingleObject");
}
/* Release the lock that protects the reader count. */
LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
}
int uv_rwlock_tryrdlock(uv_rwlock_t* rwlock) {
if (HAVE_SRWLOCK_API())
return uv__rwlock_srwlock_tryrdlock(rwlock);
else
return uv__rwlock_fallback_tryrdlock(rwlock);
int err;
if (!TryEnterCriticalSection(&rwlock->state_.num_readers_lock_))
return -1;
err = 0;
if (rwlock->state_.num_readers_ == 0) {
/* Currently there are no other readers, which means that the write lock
* needs to be acquired.
*/
DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, 0);
if (r == WAIT_OBJECT_0)
rwlock->state_.num_readers_++;
else if (r == WAIT_TIMEOUT)
err = -1;
else if (r == WAIT_FAILED)
uv_fatal_error(GetLastError(), "WaitForSingleObject");
} else {
/* The write lock has already been acquired because there are other
* active readers.
*/
rwlock->state_.num_readers_++;
}
LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
return err;
}
void uv_rwlock_rdunlock(uv_rwlock_t* rwlock) {
if (HAVE_SRWLOCK_API())
uv__rwlock_srwlock_rdunlock(rwlock);
else
uv__rwlock_fallback_rdunlock(rwlock);
EnterCriticalSection(&rwlock->state_.num_readers_lock_);
if (--rwlock->state_.num_readers_ == 0) {
if (!ReleaseSemaphore(rwlock->state_.write_semaphore_, 1, NULL))
uv_fatal_error(GetLastError(), "ReleaseSemaphore");
}
LeaveCriticalSection(&rwlock->state_.num_readers_lock_);
}
void uv_rwlock_wrlock(uv_rwlock_t* rwlock) {
if (HAVE_SRWLOCK_API())
uv__rwlock_srwlock_wrlock(rwlock);
else
uv__rwlock_fallback_wrlock(rwlock);
DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, INFINITE);
if (r != WAIT_OBJECT_0)
uv_fatal_error(GetLastError(), "WaitForSingleObject");
}
int uv_rwlock_trywrlock(uv_rwlock_t* rwlock) {
if (HAVE_SRWLOCK_API())
return uv__rwlock_srwlock_trywrlock(rwlock);
DWORD r = WaitForSingleObject(rwlock->state_.write_semaphore_, 0);
if (r == WAIT_OBJECT_0)
return 0;
else if (r == WAIT_TIMEOUT)
return -1;
else
return uv__rwlock_fallback_trywrlock(rwlock);
uv_fatal_error(GetLastError(), "WaitForSingleObject");
return -1;
}
void uv_rwlock_wrunlock(uv_rwlock_t* rwlock) {
if (HAVE_SRWLOCK_API())
uv__rwlock_srwlock_wrunlock(rwlock);
else
uv__rwlock_fallback_wrunlock(rwlock);
if (!ReleaseSemaphore(rwlock->state_.write_semaphore_, 1, NULL))
uv_fatal_error(GetLastError(), "ReleaseSemaphore");
}
@ -261,133 +285,6 @@ int uv_sem_trywait(uv_sem_t* sem) {
}
inline static int uv__rwlock_srwlock_init(uv_rwlock_t* rwlock) {
pInitializeSRWLock(&rwlock->srwlock_);
return 0;
}
inline static void uv__rwlock_srwlock_destroy(uv_rwlock_t* rwlock) {
(void) rwlock;
}
inline static void uv__rwlock_srwlock_rdlock(uv_rwlock_t* rwlock) {
pAcquireSRWLockShared(&rwlock->srwlock_);
}
inline static int uv__rwlock_srwlock_tryrdlock(uv_rwlock_t* rwlock) {
if (pTryAcquireSRWLockShared(&rwlock->srwlock_))
return 0;
else
return -1;
}
inline static void uv__rwlock_srwlock_rdunlock(uv_rwlock_t* rwlock) {
pReleaseSRWLockShared(&rwlock->srwlock_);
}
inline static void uv__rwlock_srwlock_wrlock(uv_rwlock_t* rwlock) {
pAcquireSRWLockExclusive(&rwlock->srwlock_);
}
inline static int uv__rwlock_srwlock_trywrlock(uv_rwlock_t* rwlock) {
if (pTryAcquireSRWLockExclusive(&rwlock->srwlock_))
return 0;
else
return -1;
}
inline static void uv__rwlock_srwlock_wrunlock(uv_rwlock_t* rwlock) {
pReleaseSRWLockExclusive(&rwlock->srwlock_);
}
inline static int uv__rwlock_fallback_init(uv_rwlock_t* rwlock) {
if (uv_mutex_init(&rwlock->fallback_.read_mutex_))
return -1;
if (uv_mutex_init(&rwlock->fallback_.write_mutex_)) {
uv_mutex_destroy(&rwlock->fallback_.read_mutex_);
return -1;
}
rwlock->fallback_.num_readers_ = 0;
return 0;
}
inline static void uv__rwlock_fallback_destroy(uv_rwlock_t* rwlock) {
uv_mutex_destroy(&rwlock->fallback_.read_mutex_);
uv_mutex_destroy(&rwlock->fallback_.write_mutex_);
}
inline static void uv__rwlock_fallback_rdlock(uv_rwlock_t* rwlock) {
uv_mutex_lock(&rwlock->fallback_.read_mutex_);
if (++rwlock->fallback_.num_readers_ == 1)
uv_mutex_lock(&rwlock->fallback_.write_mutex_);
uv_mutex_unlock(&rwlock->fallback_.read_mutex_);
}
inline static int uv__rwlock_fallback_tryrdlock(uv_rwlock_t* rwlock) {
int ret;
ret = -1;
if (uv_mutex_trylock(&rwlock->fallback_.read_mutex_))
goto out;
if (rwlock->fallback_.num_readers_ == 0)
ret = uv_mutex_trylock(&rwlock->fallback_.write_mutex_);
else
ret = 0;
if (ret == 0)
rwlock->fallback_.num_readers_++;
uv_mutex_unlock(&rwlock->fallback_.read_mutex_);
out:
return ret;
}
inline static void uv__rwlock_fallback_rdunlock(uv_rwlock_t* rwlock) {
uv_mutex_lock(&rwlock->fallback_.read_mutex_);
if (--rwlock->fallback_.num_readers_ == 0)
uv_mutex_unlock(&rwlock->fallback_.write_mutex_);
uv_mutex_unlock(&rwlock->fallback_.read_mutex_);
}
inline static void uv__rwlock_fallback_wrlock(uv_rwlock_t* rwlock) {
uv_mutex_lock(&rwlock->fallback_.write_mutex_);
}
inline static int uv__rwlock_fallback_trywrlock(uv_rwlock_t* rwlock) {
return uv_mutex_trylock(&rwlock->fallback_.write_mutex_);
}
inline static void uv__rwlock_fallback_wrunlock(uv_rwlock_t* rwlock) {
uv_mutex_unlock(&rwlock->fallback_.write_mutex_);
}
/* This condition variable implementation is based on the SetEvent solution
* (section 3.2) at http://www.cs.wustl.edu/~schmidt/win32-cv-1.html
* We could not use the SignalObjectAndWait solution (section 3.4) because

28
deps/uv/src/win/winapi.c

@ -38,13 +38,6 @@ sGetQueuedCompletionStatusEx pGetQueuedCompletionStatusEx;
sSetFileCompletionNotificationModes pSetFileCompletionNotificationModes;
sCreateSymbolicLinkW pCreateSymbolicLinkW;
sCancelIoEx pCancelIoEx;
sInitializeSRWLock pInitializeSRWLock;
sAcquireSRWLockShared pAcquireSRWLockShared;
sAcquireSRWLockExclusive pAcquireSRWLockExclusive;
sTryAcquireSRWLockShared pTryAcquireSRWLockShared;
sTryAcquireSRWLockExclusive pTryAcquireSRWLockExclusive;
sReleaseSRWLockShared pReleaseSRWLockShared;
sReleaseSRWLockExclusive pReleaseSRWLockExclusive;
sInitializeConditionVariable pInitializeConditionVariable;
sSleepConditionVariableCS pSleepConditionVariableCS;
sSleepConditionVariableSRW pSleepConditionVariableSRW;
@ -114,27 +107,6 @@ void uv_winapi_init() {
pCancelIoEx = (sCancelIoEx)
GetProcAddress(kernel32_module, "CancelIoEx");
pInitializeSRWLock = (sInitializeSRWLock)
GetProcAddress(kernel32_module, "InitializeSRWLock");
pAcquireSRWLockShared = (sAcquireSRWLockShared)
GetProcAddress(kernel32_module, "AcquireSRWLockShared");
pAcquireSRWLockExclusive = (sAcquireSRWLockExclusive)
GetProcAddress(kernel32_module, "AcquireSRWLockExclusive");
pTryAcquireSRWLockShared = (sTryAcquireSRWLockShared)
GetProcAddress(kernel32_module, "TryAcquireSRWLockShared");
pTryAcquireSRWLockExclusive = (sTryAcquireSRWLockExclusive)
GetProcAddress(kernel32_module, "TryAcquireSRWLockExclusive");
pReleaseSRWLockShared = (sReleaseSRWLockShared)
GetProcAddress(kernel32_module, "ReleaseSRWLockShared");
pReleaseSRWLockExclusive = (sReleaseSRWLockExclusive)
GetProcAddress(kernel32_module, "ReleaseSRWLockExclusive");
pInitializeConditionVariable = (sInitializeConditionVariable)
GetProcAddress(kernel32_module, "InitializeConditionVariable");

28
deps/uv/src/win/winapi.h

@ -4405,27 +4405,6 @@ typedef BOOL (WINAPI* sCancelIoEx)
(HANDLE hFile,
LPOVERLAPPED lpOverlapped);
typedef VOID (WINAPI* sInitializeSRWLock)
(PSRWLOCK SRWLock);
typedef VOID (WINAPI* sAcquireSRWLockShared)
(PSRWLOCK SRWLock);
typedef VOID (WINAPI* sAcquireSRWLockExclusive)
(PSRWLOCK SRWLock);
typedef BOOL (WINAPI* sTryAcquireSRWLockShared)
(PSRWLOCK SRWLock);
typedef BOOL (WINAPI* sTryAcquireSRWLockExclusive)
(PSRWLOCK SRWLock);
typedef VOID (WINAPI* sReleaseSRWLockShared)
(PSRWLOCK SRWLock);
typedef VOID (WINAPI* sReleaseSRWLockExclusive)
(PSRWLOCK SRWLock);
typedef VOID (WINAPI* sInitializeConditionVariable)
(PCONDITION_VARIABLE ConditionVariable);
@ -4460,13 +4439,6 @@ extern sGetQueuedCompletionStatusEx pGetQueuedCompletionStatusEx;
extern sSetFileCompletionNotificationModes pSetFileCompletionNotificationModes;
extern sCreateSymbolicLinkW pCreateSymbolicLinkW;
extern sCancelIoEx pCancelIoEx;
extern sInitializeSRWLock pInitializeSRWLock;
extern sAcquireSRWLockShared pAcquireSRWLockShared;
extern sAcquireSRWLockExclusive pAcquireSRWLockExclusive;
extern sTryAcquireSRWLockShared pTryAcquireSRWLockShared;
extern sTryAcquireSRWLockExclusive pTryAcquireSRWLockExclusive;
extern sReleaseSRWLockShared pReleaseSRWLockShared;
extern sReleaseSRWLockExclusive pReleaseSRWLockExclusive;
extern sInitializeConditionVariable pInitializeConditionVariable;
extern sSleepConditionVariableCS pSleepConditionVariableCS;
extern sSleepConditionVariableSRW pSleepConditionVariableSRW;

13
deps/uv/test/test-fs.c

@ -196,9 +196,16 @@ static void chown_root_cb(uv_fs_t* req) {
/* On windows, chown is a no-op and always succeeds. */
ASSERT(req->result == 0);
#else
/* On unix, chown'ing the root directory is not allowed. */
ASSERT(req->result == -1);
ASSERT(req->errorno == UV_EPERM);
/* On unix, chown'ing the root directory is not allowed -
* unless you're root, of course.
*/
if (geteuid() == 0) {
ASSERT(req->result == 0);
}
else {
ASSERT(req->result == -1);
ASSERT(req->errorno == UV_EPERM);
}
#endif
chown_cb_count++;
uv_fs_req_cleanup(req);

2
deps/uv/test/test-list.h

@ -211,6 +211,7 @@ TEST_DECLARE (threadpool_cancel_fs)
TEST_DECLARE (threadpool_cancel_single)
TEST_DECLARE (thread_mutex)
TEST_DECLARE (thread_rwlock)
TEST_DECLARE (thread_rwlock_trylock)
TEST_DECLARE (thread_create)
TEST_DECLARE (strlcpy)
TEST_DECLARE (strlcat)
@ -522,6 +523,7 @@ TASK_LIST_START
TEST_ENTRY (threadpool_cancel_single)
TEST_ENTRY (thread_mutex)
TEST_ENTRY (thread_rwlock)
TEST_ENTRY (thread_rwlock_trylock)
TEST_ENTRY (thread_create)
TEST_ENTRY (strlcpy)
TEST_ENTRY (strlcat)

59
deps/uv/test/test-mutexes.c

@ -61,3 +61,62 @@ TEST_IMPL(thread_rwlock) {
return 0;
}
TEST_IMPL(thread_rwlock_trylock) {
uv_rwlock_t rwlock;
int r;
r = uv_rwlock_init(&rwlock);
ASSERT(r == 0);
/* No locks held. */
r = uv_rwlock_trywrlock(&rwlock);
ASSERT(r == 0);
/* Write lock held. */
r = uv_rwlock_tryrdlock(&rwlock);
ASSERT(r == -1);
r = uv_rwlock_trywrlock(&rwlock);
ASSERT(r == -1);
uv_rwlock_wrunlock(&rwlock);
/* No locks held. */
r = uv_rwlock_tryrdlock(&rwlock);
ASSERT(r == 0);
/* One read lock held. */
r = uv_rwlock_tryrdlock(&rwlock);
ASSERT(r == 0);
/* Two read locks held. */
r = uv_rwlock_trywrlock(&rwlock);
ASSERT(r == -1);
uv_rwlock_rdunlock(&rwlock);
/* One read lock held. */
uv_rwlock_rdunlock(&rwlock);
/* No read locks held. */
r = uv_rwlock_trywrlock(&rwlock);
ASSERT(r == 0);
/* Write lock held. */
uv_rwlock_wrunlock(&rwlock);
/* No locks held. */
uv_rwlock_destroy(&rwlock);
return 0;
}

6
deps/uv/test/test-tcp-close-while-connecting.c

@ -60,12 +60,16 @@ TEST_IMPL(tcp_close_while_connecting) {
uv_connect_t connect_req;
struct sockaddr_in addr;
uv_loop_t* loop;
int r;
addr = uv_ip4_addr("1.2.3.4", TEST_PORT);
loop = uv_default_loop();
ASSERT(0 == uv_tcp_init(loop, &tcp_handle));
ASSERT(0 == uv_tcp_connect(&connect_req, &tcp_handle, addr, connect_cb));
r = uv_tcp_connect(&connect_req, &tcp_handle, addr, connect_cb);
if (r == -1 && uv_last_error(uv_default_loop()).code == UV_ENETUNREACH)
RETURN_SKIP("Network unreachable.");
ASSERT(r == 0);
ASSERT(0 == uv_timer_init(loop, &timer1_handle));
ASSERT(0 == uv_timer_start(&timer1_handle, timer1_cb, 50, 0));
ASSERT(0 == uv_timer_init(loop, &timer2_handle));

2
deps/uv/test/test-tcp-connect-timeout.c

@ -76,6 +76,8 @@ TEST_IMPL(tcp_connect_timeout) {
ASSERT(r == 0);
r = uv_tcp_connect(&connect_req, &conn, addr, connect_cb);
if (r == -1 && uv_last_error(uv_default_loop()).code == UV_ENETUNREACH)
RETURN_SKIP("Network unreachable.");
ASSERT(r == 0);
r = uv_run(uv_default_loop(), UV_RUN_DEFAULT);

2
deps/uv/test/test-udp-multicast-join.c

@ -113,6 +113,8 @@ TEST_IMPL(udp_multicast_join) {
/* join the multicast channel */
r = uv_udp_set_membership(&client, "239.255.0.1", NULL, UV_JOIN_GROUP);
if (r == -1 && uv_last_error(uv_default_loop()).code == UV_ENODEV)
RETURN_SKIP("No multicast support.");
ASSERT(r == 0);
r = uv_udp_recv_start(&client, alloc_cb, cl_recv_cb);

6
deps/uv/test/test-udp-multicast-ttl.c

@ -44,7 +44,11 @@ static void close_cb(uv_handle_t* handle) {
static void sv_send_cb(uv_udp_send_t* req, int status) {
ASSERT(req != NULL);
ASSERT(status == 0);
if (status == -1) {
ASSERT(uv_last_error(uv_default_loop()).code == UV_ENETUNREACH);
} else {
ASSERT(status == 0);
}
CHECK_HANDLE(req->handle);
sv_send_cb_called++;

4
deps/uv/vcbuild.bat

@ -90,8 +90,8 @@ if defined noprojgen goto msbuild
@rem Generate the VS project.
if exist build\gyp goto have_gyp
echo git clone https://git.chromium.org/external/gyp.git build/gyp
git clone https://git.chromium.org/external/gyp.git build/gyp
echo git clone https://chromium.googlesource.com/external/gyp build/gyp
git clone https://chromium.googlesource.com/external/gyp build/gyp
if errorlevel 1 goto gyp_install_failed
goto have_gyp

Loading…
Cancel
Save