|
|
|
'use strict';
|
|
|
|
var common = require('../common');
|
|
|
|
var assert = require('assert');
|
|
|
|
var http = require('http');
|
|
|
|
|
|
|
|
var requests = 0;
|
|
|
|
var responses = 0;
|
|
|
|
|
|
|
|
var headers = {};
|
|
|
|
var N = 2000;
|
|
|
|
for (var i = 0; i < N; ++i) {
|
|
|
|
headers['key' + i] = i;
|
|
|
|
}
|
|
|
|
|
|
|
|
var maxAndExpected = [ // for server
|
|
|
|
[50, 50],
|
|
|
|
[1500, 1500],
|
|
|
|
[0, N + 2] // Host and Connection
|
|
|
|
];
|
|
|
|
var max = maxAndExpected[requests][0];
|
|
|
|
var expected = maxAndExpected[requests][1];
|
|
|
|
|
|
|
|
var server = http.createServer(function(req, res) {
|
|
|
|
assert.equal(Object.keys(req.headers).length, expected);
|
|
|
|
if (++requests < maxAndExpected.length) {
|
|
|
|
max = maxAndExpected[requests][0];
|
|
|
|
expected = maxAndExpected[requests][1];
|
|
|
|
server.maxHeadersCount = max;
|
|
|
|
}
|
|
|
|
res.writeHead(200, headers);
|
|
|
|
res.end();
|
|
|
|
});
|
|
|
|
server.maxHeadersCount = max;
|
|
|
|
|
|
|
|
server.listen(common.PORT, function() {
|
|
|
|
var maxAndExpected = [ // for client
|
|
|
|
[20, 20],
|
|
|
|
[1200, 1200],
|
|
|
|
[0, N + 3] // Connection, Date and Transfer-Encoding
|
|
|
|
];
|
|
|
|
doRequest();
|
|
|
|
|
|
|
|
function doRequest() {
|
|
|
|
var max = maxAndExpected[responses][0];
|
|
|
|
var expected = maxAndExpected[responses][1];
|
|
|
|
var req = http.request({
|
|
|
|
port: common.PORT,
|
|
|
|
headers: headers
|
|
|
|
}, function(res) {
|
|
|
|
assert.equal(Object.keys(res.headers).length, expected);
|
|
|
|
res.on('end', function() {
|
|
|
|
if (++responses < maxAndExpected.length) {
|
|
|
|
doRequest();
|
|
|
|
} else {
|
|
|
|
server.close();
|
|
|
|
}
|
|
|
|
});
|
stream: Don't emit 'end' unless read() called
This solves the problem of calling `readable.pipe(writable)` after the
readable stream has already emitted 'end', as often is the case when
writing simple HTTP proxies.
The spirit of streams2 is that things will work properly, even if you
don't set them up right away on the first tick.
This approach breaks down, however, because pipe()ing from an ended
readable will just do nothing. No more data will ever arrive, and the
writable will hang open forever never being ended.
However, that does not solve the case of adding a `on('end')` listener
after the stream has received the EOF chunk, if it was the first chunk
received (and thus, length was 0, and 'end' got emitted). So, with
this, we defer the 'end' event emission until the read() function is
called.
Also, in pipe(), if the source has emitted 'end' already, we call the
cleanup/onend function on nextTick. Piping from an already-ended stream
is thus the same as piping from a stream that is in the process of
ending.
Updates many tests that were relying on 'end' coming immediately, even
though they never read() from the req.
Fix #4942
12 years ago
|
|
|
res.resume();
|
|
|
|
});
|
|
|
|
req.maxHeadersCount = max;
|
|
|
|
req.end();
|
|
|
|
}
|
|
|
|
});
|
|
|
|
|
|
|
|
process.on('exit', function() {
|
|
|
|
assert.equal(requests, maxAndExpected.length);
|
|
|
|
assert.equal(responses, maxAndExpected.length);
|
|
|
|
});
|