From ff539c5bb55c828c538665f07ed6d31d34c4ed22 Mon Sep 17 00:00:00 2001 From: Santiago Gimeno Date: Tue, 29 Dec 2015 10:21:55 +0100 Subject: [PATCH] cluster: ignore queryServer msgs on disconnection MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It avoids the creation of unnecessary handles. This issue is causing intermitent failures in `test-cluster-disconnect-race` on `FreeBSD` and `OS X`. The problem is that the `worker2.disconnect` is being called on the master before the `queryServer` is handled, causing the worker to be deleted, then the Server handle is created afterwards. Later on, when `removeWorker` is called from the `exit` handler, there are no workers left, but one handle, thus the `AssertionError`. Add a new `test/sequential/test-cluster-disconnect-leak` based on `test-cluster-disconnect-race` that creates lots of workers and fails consistently without this patch. PR-URL: https://github.com/nodejs/node/pull/4465 Reviewed-By: James M Snell Reviewed-By: Johan Bergström Reviewed-By: Rich Trott --- lib/cluster.js | 3 ++ .../test-cluster-disconnect-leak.js | 47 +++++++++++++++++++ 2 files changed, 50 insertions(+) create mode 100644 test/sequential/test-cluster-disconnect-leak.js diff --git a/lib/cluster.js b/lib/cluster.js index f202f25cdd..8deba88ca2 100644 --- a/lib/cluster.js +++ b/lib/cluster.js @@ -440,6 +440,9 @@ function masterInit() { } function queryServer(worker, message) { + // Stop processing if worker already disconnecting + if (worker.suicide) + return; var args = [message.address, message.port, message.addressType, diff --git a/test/sequential/test-cluster-disconnect-leak.js b/test/sequential/test-cluster-disconnect-leak.js new file mode 100644 index 0000000000..33476dd427 --- /dev/null +++ b/test/sequential/test-cluster-disconnect-leak.js @@ -0,0 +1,47 @@ +'use strict'; +// Flags: --expose-internals + +const common = require('../common'); +const assert = require('assert'); +const net = require('net'); +const cluster = require('cluster'); +const handles = require('internal/cluster').handles; +const os = require('os'); + +if (common.isWindows) { + console.log('1..0 # Skipped: This test does not apply to Windows.'); + return; +} + +cluster.schedulingPolicy = cluster.SCHED_NONE; + +if (cluster.isMaster) { + const cpus = os.cpus().length; + const tries = cpus > 8 ? 128 : cpus * 16; + + const worker1 = cluster.fork(); + worker1.on('message', common.mustCall(() => { + worker1.disconnect(); + for (let i = 0; i < tries; ++ i) { + const w = cluster.fork(); + w.on('online', common.mustCall(w.disconnect)); + } + })); + + cluster.on('exit', common.mustCall((worker, code) => { + assert.strictEqual(code, 0, 'worker exited with error'); + }, tries + 1)); + + process.on('exit', () => { + assert.deepEqual(Object.keys(cluster.workers), []); + assert.strictEqual(Object.keys(handles).length, 0); + }); + + return; +} + +var server = net.createServer(); + +server.listen(common.PORT, function() { + process.send('listening'); +});