|
@ -199,6 +199,11 @@ void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) { |
|
|
assert(!(handle->flags & UV_HANDLE_CLOSED)); |
|
|
assert(!(handle->flags & UV_HANDLE_CLOSED)); |
|
|
handle->flags |= UV_HANDLE_CLOSED; |
|
|
handle->flags |= UV_HANDLE_CLOSED; |
|
|
|
|
|
|
|
|
|
|
|
if (!(handle->flags & UV_HANDLE_TCP_SOCKET_CLOSED)) { |
|
|
|
|
|
closesocket(handle->socket); |
|
|
|
|
|
handle->flags |= UV_HANDLE_TCP_SOCKET_CLOSED; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
if (!(handle->flags & UV_HANDLE_CONNECTION) && handle->accept_reqs) { |
|
|
if (!(handle->flags & UV_HANDLE_CONNECTION) && handle->accept_reqs) { |
|
|
if (handle->flags & UV_HANDLE_EMULATE_IOCP) { |
|
|
if (handle->flags & UV_HANDLE_EMULATE_IOCP) { |
|
|
for (i = 0; i < uv_simultaneous_server_accepts; i++) { |
|
|
for (i = 0; i < uv_simultaneous_server_accepts; i++) { |
|
@ -218,6 +223,18 @@ void uv_tcp_endgame(uv_loop_t* loop, uv_tcp_t* handle) { |
|
|
handle->accept_reqs = NULL; |
|
|
handle->accept_reqs = NULL; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (handle->flags & UV_HANDLE_CONNECTION && |
|
|
|
|
|
handle->flags & UV_HANDLE_EMULATE_IOCP) { |
|
|
|
|
|
if (handle->read_req.wait_handle != INVALID_HANDLE_VALUE) { |
|
|
|
|
|
UnregisterWait(handle->read_req.wait_handle); |
|
|
|
|
|
handle->read_req.wait_handle = INVALID_HANDLE_VALUE; |
|
|
|
|
|
} |
|
|
|
|
|
if (handle->read_req.event_handle) { |
|
|
|
|
|
CloseHandle(handle->read_req.event_handle); |
|
|
|
|
|
handle->read_req.event_handle = NULL; |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
if (handle->close_cb) { |
|
|
if (handle->close_cb) { |
|
|
handle->close_cb((uv_handle_t*)handle); |
|
|
handle->close_cb((uv_handle_t*)handle); |
|
|
} |
|
|
} |
|
@ -341,7 +358,7 @@ static void uv_tcp_queue_accept(uv_tcp_t* handle, uv_tcp_accept_t* req) { |
|
|
/* Prepare the overlapped structure. */ |
|
|
/* Prepare the overlapped structure. */ |
|
|
memset(&(req->overlapped), 0, sizeof(req->overlapped)); |
|
|
memset(&(req->overlapped), 0, sizeof(req->overlapped)); |
|
|
if (handle->flags & UV_HANDLE_EMULATE_IOCP) { |
|
|
if (handle->flags & UV_HANDLE_EMULATE_IOCP) { |
|
|
req->overlapped.hEvent = (HANDLE) ((DWORD) req->event_handle | 1); |
|
|
req->overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1); |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
success = handle->func_acceptex(handle->socket, |
|
|
success = handle->func_acceptex(handle->socket, |
|
@ -415,6 +432,13 @@ static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) { |
|
|
buf.len = 0; |
|
|
buf.len = 0; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
/* Prepare the overlapped structure. */ |
|
|
|
|
|
memset(&(req->overlapped), 0, sizeof(req->overlapped)); |
|
|
|
|
|
if (handle->flags & UV_HANDLE_EMULATE_IOCP) { |
|
|
|
|
|
assert(req->event_handle); |
|
|
|
|
|
req->overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
flags = 0; |
|
|
flags = 0; |
|
|
result = WSARecv(handle->socket, |
|
|
result = WSARecv(handle->socket, |
|
|
(WSABUF*)&buf, |
|
|
(WSABUF*)&buf, |
|
@ -434,6 +458,14 @@ static void uv_tcp_queue_read(uv_loop_t* loop, uv_tcp_t* handle) { |
|
|
/* The req will be processed with IOCP. */ |
|
|
/* The req will be processed with IOCP. */ |
|
|
handle->flags |= UV_HANDLE_READ_PENDING; |
|
|
handle->flags |= UV_HANDLE_READ_PENDING; |
|
|
handle->reqs_pending++; |
|
|
handle->reqs_pending++; |
|
|
|
|
|
if (handle->flags & UV_HANDLE_EMULATE_IOCP && |
|
|
|
|
|
req->wait_handle == INVALID_HANDLE_VALUE && |
|
|
|
|
|
!RegisterWaitForSingleObject(&req->wait_handle, |
|
|
|
|
|
req->overlapped.hEvent, post_completion, (void*) req, |
|
|
|
|
|
INFINITE, WT_EXECUTEINWAITTHREAD)) { |
|
|
|
|
|
SET_REQ_ERROR(req, GetLastError()); |
|
|
|
|
|
uv_insert_pending_req(loop, (uv_req_t*)req); |
|
|
|
|
|
} |
|
|
} else { |
|
|
} else { |
|
|
/* Make this req pending reporting an error. */ |
|
|
/* Make this req pending reporting an error. */ |
|
|
SET_REQ_ERROR(req, WSAGetLastError()); |
|
|
SET_REQ_ERROR(req, WSAGetLastError()); |
|
@ -466,7 +498,7 @@ int uv_tcp_listen(uv_tcp_t* handle, int backlog, uv_connection_cb cb) { |
|
|
} |
|
|
} |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if (!(handle->flags & UV_HANDLE_SHARED_TCP_SERVER) && |
|
|
if (!(handle->flags & UV_HANDLE_SHARED_TCP_SOCKET) && |
|
|
listen(handle->socket, backlog) == SOCKET_ERROR) { |
|
|
listen(handle->socket, backlog) == SOCKET_ERROR) { |
|
|
uv__set_sys_error(loop, WSAGetLastError()); |
|
|
uv__set_sys_error(loop, WSAGetLastError()); |
|
|
return -1; |
|
|
return -1; |
|
@ -593,10 +625,18 @@ int uv_tcp_read_start(uv_tcp_t* handle, uv_alloc_cb alloc_cb, |
|
|
handle->read_cb = read_cb; |
|
|
handle->read_cb = read_cb; |
|
|
handle->alloc_cb = alloc_cb; |
|
|
handle->alloc_cb = alloc_cb; |
|
|
|
|
|
|
|
|
/* If reading was stopped and then started again, there could stell be a */ |
|
|
/* If reading was stopped and then started again, there could still be a */ |
|
|
/* read request pending. */ |
|
|
/* read request pending. */ |
|
|
if (!(handle->flags & UV_HANDLE_READ_PENDING)) |
|
|
if (!(handle->flags & UV_HANDLE_READ_PENDING)) { |
|
|
|
|
|
if (handle->flags & UV_HANDLE_EMULATE_IOCP && |
|
|
|
|
|
!handle->read_req.event_handle) { |
|
|
|
|
|
handle->read_req.event_handle = CreateEvent(NULL, 0, 0, NULL); |
|
|
|
|
|
if (!handle->read_req.event_handle) { |
|
|
|
|
|
uv_fatal_error(GetLastError(), "CreateEvent"); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
uv_tcp_queue_read(loop, handle); |
|
|
uv_tcp_queue_read(loop, handle); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
return 0; |
|
|
return 0; |
|
|
} |
|
|
} |
|
@ -790,6 +830,16 @@ int uv_tcp_write(uv_loop_t* loop, uv_write_t* req, uv_tcp_t* handle, |
|
|
req->cb = cb; |
|
|
req->cb = cb; |
|
|
memset(&req->overlapped, 0, sizeof(req->overlapped)); |
|
|
memset(&req->overlapped, 0, sizeof(req->overlapped)); |
|
|
|
|
|
|
|
|
|
|
|
/* Prepare the overlapped structure. */ |
|
|
|
|
|
memset(&(req->overlapped), 0, sizeof(req->overlapped)); |
|
|
|
|
|
if (handle->flags & UV_HANDLE_EMULATE_IOCP) { |
|
|
|
|
|
req->event_handle = CreateEvent(NULL, 0, 0, NULL); |
|
|
|
|
|
if (!req->event_handle) { |
|
|
|
|
|
uv_fatal_error(GetLastError(), "CreateEvent"); |
|
|
|
|
|
} |
|
|
|
|
|
req->overlapped.hEvent = (HANDLE) ((ULONG_PTR) req->event_handle | 1); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
result = WSASend(handle->socket, |
|
|
result = WSASend(handle->socket, |
|
|
(WSABUF*)bufs, |
|
|
(WSABUF*)bufs, |
|
|
bufcnt, |
|
|
bufcnt, |
|
@ -812,6 +862,14 @@ int uv_tcp_write(uv_loop_t* loop, uv_write_t* req, uv_tcp_t* handle, |
|
|
handle->write_reqs_pending++; |
|
|
handle->write_reqs_pending++; |
|
|
handle->write_queue_size += req->queued_bytes; |
|
|
handle->write_queue_size += req->queued_bytes; |
|
|
uv_ref(loop); |
|
|
uv_ref(loop); |
|
|
|
|
|
if (handle->flags & UV_HANDLE_EMULATE_IOCP && |
|
|
|
|
|
req->wait_handle == INVALID_HANDLE_VALUE && |
|
|
|
|
|
!RegisterWaitForSingleObject(&req->wait_handle, |
|
|
|
|
|
req->overlapped.hEvent, post_completion, (void*) req, |
|
|
|
|
|
INFINITE, WT_EXECUTEINWAITTHREAD)) { |
|
|
|
|
|
SET_REQ_ERROR(req, GetLastError()); |
|
|
|
|
|
uv_insert_pending_req(loop, (uv_req_t*)req); |
|
|
|
|
|
} |
|
|
} else { |
|
|
} else { |
|
|
/* Send failed due to an error. */ |
|
|
/* Send failed due to an error. */ |
|
|
uv__set_sys_error(loop, WSAGetLastError()); |
|
|
uv__set_sys_error(loop, WSAGetLastError()); |
|
@ -945,6 +1003,17 @@ void uv_process_tcp_write_req(uv_loop_t* loop, uv_tcp_t* handle, |
|
|
assert(handle->write_queue_size >= req->queued_bytes); |
|
|
assert(handle->write_queue_size >= req->queued_bytes); |
|
|
handle->write_queue_size -= req->queued_bytes; |
|
|
handle->write_queue_size -= req->queued_bytes; |
|
|
|
|
|
|
|
|
|
|
|
if (handle->flags & UV_HANDLE_EMULATE_IOCP) { |
|
|
|
|
|
if (req->wait_handle != INVALID_HANDLE_VALUE) { |
|
|
|
|
|
UnregisterWait(req->wait_handle); |
|
|
|
|
|
req->wait_handle = INVALID_HANDLE_VALUE; |
|
|
|
|
|
} |
|
|
|
|
|
if (req->event_handle) { |
|
|
|
|
|
CloseHandle(req->event_handle); |
|
|
|
|
|
req->event_handle = NULL; |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
if (req->cb) { |
|
|
if (req->cb) { |
|
|
uv__set_sys_error(loop, GET_REQ_SOCK_ERROR(req)); |
|
|
uv__set_sys_error(loop, GET_REQ_SOCK_ERROR(req)); |
|
|
((uv_write_cb)req->cb)(req, loop->last_err.code == UV_OK ? 0 : -1); |
|
|
((uv_write_cb)req->cb)(req, loop->last_err.code == UV_OK ? 0 : -1); |
|
@ -1036,7 +1105,8 @@ void uv_process_tcp_connect_req(uv_loop_t* loop, uv_tcp_t* handle, |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
int uv_tcp_import(uv_tcp_t* tcp, WSAPROTOCOL_INFOW* socket_protocol_info) { |
|
|
int uv_tcp_import(uv_tcp_t* tcp, WSAPROTOCOL_INFOW* socket_protocol_info, |
|
|
|
|
|
int tcp_connection) { |
|
|
SOCKET socket = WSASocketW(AF_INET, |
|
|
SOCKET socket = WSASocketW(AF_INET, |
|
|
SOCK_STREAM, |
|
|
SOCK_STREAM, |
|
|
IPPROTO_IP, |
|
|
IPPROTO_IP, |
|
@ -1050,13 +1120,22 @@ int uv_tcp_import(uv_tcp_t* tcp, WSAPROTOCOL_INFOW* socket_protocol_info) { |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
tcp->flags |= UV_HANDLE_BOUND; |
|
|
tcp->flags |= UV_HANDLE_BOUND; |
|
|
tcp->flags |= UV_HANDLE_SHARED_TCP_SERVER; |
|
|
tcp->flags |= UV_HANDLE_SHARED_TCP_SOCKET; |
|
|
|
|
|
|
|
|
|
|
|
if (tcp_connection) { |
|
|
|
|
|
uv_connection_init((uv_stream_t*)tcp); |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
if (socket_protocol_info->iAddressFamily == AF_INET6) { |
|
|
if (socket_protocol_info->iAddressFamily == AF_INET6) { |
|
|
tcp->flags |= UV_HANDLE_IPV6; |
|
|
tcp->flags |= UV_HANDLE_IPV6; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
return uv_tcp_set_socket(tcp->loop, tcp, socket, 1); |
|
|
if (uv_tcp_set_socket(tcp->loop, tcp, socket, 1) != 0) { |
|
|
|
|
|
return -1; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
tcp->loop->active_tcp_streams++; |
|
|
|
|
|
return 0; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
@ -1097,15 +1176,12 @@ int uv_tcp_keepalive(uv_tcp_t* handle, int enable, unsigned int delay) { |
|
|
|
|
|
|
|
|
int uv_tcp_duplicate_socket(uv_tcp_t* handle, int pid, |
|
|
int uv_tcp_duplicate_socket(uv_tcp_t* handle, int pid, |
|
|
LPWSAPROTOCOL_INFOW protocol_info) { |
|
|
LPWSAPROTOCOL_INFOW protocol_info) { |
|
|
assert(!(handle->flags & UV_HANDLE_CONNECTION)); |
|
|
if (!(handle->flags & UV_HANDLE_CONNECTION)) { |
|
|
|
|
|
|
|
|
/*
|
|
|
/*
|
|
|
* We're about to share the socket with another process. Because |
|
|
* We're about to share the socket with another process. Because |
|
|
* this is a listening socket, we assume that the other process will |
|
|
* this is a listening socket, we assume that the other process will |
|
|
* be accepting connections on it. So, before sharing the socket |
|
|
* be accepting connections on it. So, before sharing the socket |
|
|
* with another process, we call listen here in the parent process. |
|
|
* with another process, we call listen here in the parent process. |
|
|
* This needs to be modified if the socket is shared with |
|
|
|
|
|
* another process for anything other than accepting connections. |
|
|
|
|
|
*/ |
|
|
*/ |
|
|
|
|
|
|
|
|
if (!(handle->flags & UV_HANDLE_LISTENING)) { |
|
|
if (!(handle->flags & UV_HANDLE_LISTENING)) { |
|
@ -1117,8 +1193,7 @@ int uv_tcp_duplicate_socket(uv_tcp_t* handle, int pid, |
|
|
uv__set_sys_error(handle->loop, WSAGetLastError()); |
|
|
uv__set_sys_error(handle->loop, WSAGetLastError()); |
|
|
return -1; |
|
|
return -1; |
|
|
} |
|
|
} |
|
|
|
|
|
} |
|
|
handle->flags |= UV_HANDLE_SHARED_TCP_SERVER; |
|
|
|
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
if (WSADuplicateSocketW(handle->socket, pid, protocol_info)) { |
|
|
if (WSADuplicateSocketW(handle->socket, pid, protocol_info)) { |
|
@ -1126,6 +1201,8 @@ int uv_tcp_duplicate_socket(uv_tcp_t* handle, int pid, |
|
|
return -1; |
|
|
return -1; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
handle->flags |= UV_HANDLE_SHARED_TCP_SOCKET; |
|
|
|
|
|
|
|
|
return 0; |
|
|
return 0; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
@ -1162,3 +1239,45 @@ int uv_tcp_simultaneous_accepts(uv_tcp_t* handle, int enable) { |
|
|
|
|
|
|
|
|
return 0; |
|
|
return 0; |
|
|
} |
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
void uv_tcp_close(uv_tcp_t* tcp) { |
|
|
|
|
|
int non_ifs_lsp; |
|
|
|
|
|
int close_socket = 1; |
|
|
|
|
|
|
|
|
|
|
|
/*
|
|
|
|
|
|
* In order for winsock to do a graceful close there must not be |
|
|
|
|
|
* any pending reads. |
|
|
|
|
|
*/ |
|
|
|
|
|
if (tcp->flags & UV_HANDLE_READ_PENDING) { |
|
|
|
|
|
/* Just do shutdown on non-shared sockets, which ensures graceful close. */ |
|
|
|
|
|
if (!(tcp->flags & UV_HANDLE_SHARED_TCP_SOCKET)) { |
|
|
|
|
|
shutdown(tcp->socket, SD_SEND); |
|
|
|
|
|
tcp->flags |= UV_HANDLE_SHUT; |
|
|
|
|
|
} else { |
|
|
|
|
|
/* Check if we have any non-IFS LSPs stacked on top of TCP */ |
|
|
|
|
|
non_ifs_lsp = (tcp->flags & UV_HANDLE_IPV6) ? uv_tcp_non_ifs_lsp_ipv6 : |
|
|
|
|
|
uv_tcp_non_ifs_lsp_ipv4; |
|
|
|
|
|
|
|
|
|
|
|
if (!non_ifs_lsp) { |
|
|
|
|
|
/*
|
|
|
|
|
|
* Shared socket with no non-IFS LSPs, request to cancel pending I/O. |
|
|
|
|
|
* The socket will be closed inside endgame. |
|
|
|
|
|
*/ |
|
|
|
|
|
CancelIo((HANDLE)tcp->socket); |
|
|
|
|
|
close_socket = 0; |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
tcp->flags &= ~(UV_HANDLE_READING | UV_HANDLE_LISTENING); |
|
|
|
|
|
|
|
|
|
|
|
if (close_socket) { |
|
|
|
|
|
closesocket(tcp->socket); |
|
|
|
|
|
tcp->flags |= UV_HANDLE_TCP_SOCKET_CLOSED; |
|
|
|
|
|
} |
|
|
|
|
|
|
|
|
|
|
|
if (tcp->reqs_pending == 0) { |
|
|
|
|
|
uv_want_endgame(tcp->loop, (uv_handle_t*)tcp); |
|
|
|
|
|
} |
|
|
|
|
|
} |
|
|