|
|
|
'use strict';
|
|
|
|
require('../common');
|
stream: Return false from push() more properly
There are cases where a push() call would return true, even though
the thing being pushed was in fact way way larger than the high
water mark, simply because the 'needReadable' was already set, and
would not get unset until nextTick.
In some cases, this could lead to an infinite loop of pushing data
into the buffer, never getting to the 'readable' event which would
unset the needReadable flag.
Fix by splitting up the emitReadable function, so that it always
sets the flag on this tick, even if it defers until nextTick to
actually emit the event.
Also, if we're not ending or already in the process of reading, it
now calls read(0) if we're below the high water mark. Thus, the
highWaterMark value is the intended amount to buffer up to, and it
is smarter about hitting the target.
12 years ago
|
|
|
var assert = require('assert');
|
|
|
|
var stream = require('stream');
|
|
|
|
var str = 'asdfasdfasdfasdfasdf';
|
|
|
|
|
|
|
|
var r = new stream.Readable({
|
|
|
|
highWaterMark: 5,
|
|
|
|
encoding: 'utf8'
|
|
|
|
});
|
|
|
|
|
|
|
|
var reads = 0;
|
|
|
|
var eofed = false;
|
|
|
|
var ended = false;
|
|
|
|
|
stream: There is no _read cb, there is only push
This makes it so that `stream.push(chunk)` is the only way to signal the
end of reading, removing the confusing disparity between the
callback-style _read method, and the fact that most real-world streams
do not have a 1:1 corollation between the "please give me data" event,
and the actual arrival of a chunk of data.
It is still possible, of course, to implement a `CallbackReadable` on
top of this. Simply provide a method like this as the callback:
function readCallback(er, chunk) {
if (er)
stream.emit('error', er);
else
stream.push(chunk);
}
However, *only* fs streams actually would behave in this way, so it
makes not a lot of sense to make TCP, TLS, HTTP, and all the rest have
to bend into this uncomfortable paradigm.
12 years ago
|
|
|
r._read = function(n) {
|
stream: Return false from push() more properly
There are cases where a push() call would return true, even though
the thing being pushed was in fact way way larger than the high
water mark, simply because the 'needReadable' was already set, and
would not get unset until nextTick.
In some cases, this could lead to an infinite loop of pushing data
into the buffer, never getting to the 'readable' event which would
unset the needReadable flag.
Fix by splitting up the emitReadable function, so that it always
sets the flag on this tick, even if it defers until nextTick to
actually emit the event.
Also, if we're not ending or already in the process of reading, it
now calls read(0) if we're below the high water mark. Thus, the
highWaterMark value is the intended amount to buffer up to, and it
is smarter about hitting the target.
12 years ago
|
|
|
if (reads === 0) {
|
|
|
|
setTimeout(function() {
|
stream: There is no _read cb, there is only push
This makes it so that `stream.push(chunk)` is the only way to signal the
end of reading, removing the confusing disparity between the
callback-style _read method, and the fact that most real-world streams
do not have a 1:1 corollation between the "please give me data" event,
and the actual arrival of a chunk of data.
It is still possible, of course, to implement a `CallbackReadable` on
top of this. Simply provide a method like this as the callback:
function readCallback(er, chunk) {
if (er)
stream.emit('error', er);
else
stream.push(chunk);
}
However, *only* fs streams actually would behave in this way, so it
makes not a lot of sense to make TCP, TLS, HTTP, and all the rest have
to bend into this uncomfortable paradigm.
12 years ago
|
|
|
r.push(str);
|
stream: Return false from push() more properly
There are cases where a push() call would return true, even though
the thing being pushed was in fact way way larger than the high
water mark, simply because the 'needReadable' was already set, and
would not get unset until nextTick.
In some cases, this could lead to an infinite loop of pushing data
into the buffer, never getting to the 'readable' event which would
unset the needReadable flag.
Fix by splitting up the emitReadable function, so that it always
sets the flag on this tick, even if it defers until nextTick to
actually emit the event.
Also, if we're not ending or already in the process of reading, it
now calls read(0) if we're below the high water mark. Thus, the
highWaterMark value is the intended amount to buffer up to, and it
is smarter about hitting the target.
12 years ago
|
|
|
});
|
|
|
|
reads++;
|
|
|
|
} else if (reads === 1) {
|
|
|
|
var ret = r.push(str);
|
|
|
|
assert.equal(ret, false);
|
|
|
|
reads++;
|
|
|
|
} else {
|
|
|
|
assert(!eofed);
|
|
|
|
eofed = true;
|
stream: There is no _read cb, there is only push
This makes it so that `stream.push(chunk)` is the only way to signal the
end of reading, removing the confusing disparity between the
callback-style _read method, and the fact that most real-world streams
do not have a 1:1 corollation between the "please give me data" event,
and the actual arrival of a chunk of data.
It is still possible, of course, to implement a `CallbackReadable` on
top of this. Simply provide a method like this as the callback:
function readCallback(er, chunk) {
if (er)
stream.emit('error', er);
else
stream.push(chunk);
}
However, *only* fs streams actually would behave in this way, so it
makes not a lot of sense to make TCP, TLS, HTTP, and all the rest have
to bend into this uncomfortable paradigm.
12 years ago
|
|
|
r.push(null);
|
stream: Return false from push() more properly
There are cases where a push() call would return true, even though
the thing being pushed was in fact way way larger than the high
water mark, simply because the 'needReadable' was already set, and
would not get unset until nextTick.
In some cases, this could lead to an infinite loop of pushing data
into the buffer, never getting to the 'readable' event which would
unset the needReadable flag.
Fix by splitting up the emitReadable function, so that it always
sets the flag on this tick, even if it defers until nextTick to
actually emit the event.
Also, if we're not ending or already in the process of reading, it
now calls read(0) if we're below the high water mark. Thus, the
highWaterMark value is the intended amount to buffer up to, and it
is smarter about hitting the target.
12 years ago
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
r.on('end', function() {
|
|
|
|
ended = true;
|
|
|
|
});
|
|
|
|
|
|
|
|
// push some data in to start.
|
|
|
|
// we've never gotten any read event at this point.
|
|
|
|
var ret = r.push(str);
|
|
|
|
// should be false. > hwm
|
|
|
|
assert(!ret);
|
|
|
|
var chunk = r.read();
|
|
|
|
assert.equal(chunk, str);
|
|
|
|
chunk = r.read();
|
|
|
|
assert.equal(chunk, null);
|
|
|
|
|
|
|
|
r.once('readable', function() {
|
|
|
|
// this time, we'll get *all* the remaining data, because
|
|
|
|
// it's been added synchronously, as the read WOULD take
|
|
|
|
// us below the hwm, and so it triggered a _read() again,
|
|
|
|
// which synchronously added more, which we then return.
|
|
|
|
chunk = r.read();
|
|
|
|
assert.equal(chunk, str + str);
|
|
|
|
|
|
|
|
chunk = r.read();
|
|
|
|
assert.equal(chunk, null);
|
|
|
|
});
|
|
|
|
|
|
|
|
process.on('exit', function() {
|
|
|
|
assert(eofed);
|
|
|
|
assert(ended);
|
|
|
|
assert.equal(reads, 2);
|
|
|
|
console.log('ok');
|
|
|
|
});
|