Browse Source

benchmark: use t-test for comparing node versions

The data sampling is done in node and the data processing is done in R.
Only plyr was added as an R dependency and it is fairly standard.

PR-URL: https://github.com/nodejs/node/pull/7094
Reviewed-By: Trevor Norris <trev.norris@gmail.com>
Reviewed-By: Jeremiah Senkpiel <fishrock123@rocketmail.com>
Reviewed-By: Brian White <mscdex@mscdex.net>
Reviewed-By: Anna Henningsen <anna@addaleax.net>
v7.x
Andreas Madsen 9 years ago
parent
commit
855009af7f
  1. 24
      benchmark/_cli.R
  2. 70
      benchmark/compare.R
  3. 229
      benchmark/compare.js

24
benchmark/_cli.R

@ -0,0 +1,24 @@
args = commandArgs(TRUE);
args.options = list();
temp.option.key = NULL;
for (arg in args) {
# Optional arguments declaration
if (substring(arg, 1, 1) == '-') {
temp.option.key = substring(arg, 2);
if (substring(arg, 2, 2) == '-') {
temp.option.key = substring(arg, 3);
}
args.options[[temp.option.key]] = TRUE;
}
# Optional arguments value
else if (!is.null(temp.option.key)) {
args.options[[temp.option.key]] = arg;
temp.option.key = NULL;
}
}

70
benchmark/compare.R

@ -0,0 +1,70 @@
#!/usr/bin/env Rscript
library(ggplot2);
library(plyr);
# get __dirname and load ./_cli.R
args = commandArgs(trailingOnly = F);
dirname = dirname(sub("--file=", "", args[grep("--file", args)]));
source(paste0(dirname, '/_cli.R'), chdir=T);
if (!is.null(args.options$help) ||
(!is.null(args.options$plot) && args.options$plot == TRUE)) {
stop("usage: cat file.csv | Rscript compare.R
--help show this message
--plot filename save plot to filename");
}
plot.filename = args.options$plot;
dat = read.csv(file('stdin'));
dat = data.frame(dat);
dat$nameTwoLines = paste0(dat$filename, '\n', dat$configuration);
dat$name = paste0(dat$filename, dat$configuration);
# Create a box plot
if (!is.null(plot.filename)) {
p = ggplot(data=dat);
p = p + geom_boxplot(aes(x=nameTwoLines, y=rate, fill=binary));
p = p + ylab("rate of operations (higher is better)");
p = p + xlab("benchmark");
p = p + theme(axis.text.x = element_text(angle = 90, hjust = 1, vjust = 0.5));
ggsave(plot.filename, p);
}
# Print a table with results
statistics = ddply(dat, "name", function(subdat) {
# Perform a statistics test to see of there actually is a difference in
# performace.
w = t.test(rate ~ binary, data=subdat);
# Calculate improvement for the "new" binary compared with the "old" binary
new_mu = mean(subset(subdat, binary == "new")$rate);
old_mu = mean(subset(subdat, binary == "old")$rate);
improvement = sprintf("%.2f %%", ((new_mu - old_mu) / old_mu * 100));
# Add user friendly stars to the table. There should be at least one star
# before you can say that there is an improvement.
significant = '';
if (w$p.value < 0.001) {
significant = '***';
} else if (w$p.value < 0.01) {
significant = '**';
} else if (w$p.value < 0.05) {
significant = '*';
}
r = list(
improvement = improvement,
significant = significant,
p.value = w$p.value
);
return(data.frame(r));
});
# Set the benchmark names as the row.names to left align them in the print
row.names(statistics) = statistics$name;
statistics$name = NULL;
options(width = 200);
print(statistics);

229
benchmark/compare.js

@ -1,181 +1,86 @@
'use strict'; 'use strict';
var usage = 'node benchmark/compare.js ' +
'<node-binary1> <node-binary2> ' +
'[--html] [--red|-r] [--green|-g] ' +
'[-- <type> [testFilter]]';
var show = 'both'; const fork = require('child_process').fork;
var nodes = []; const path = require('path');
var html = false; const CLI = require('./_cli.js');
var benchmarks;
//
// Parse arguments
//
const cli = CLI(`usage: ./node compare.js [options] [--] <category> ...
Run each benchmark in the <category> directory many times using two diffrent
node versions. More than one <category> directory can be specified.
The output is formatted as csv, which can be processed using for
example 'compare.R'.
--new ./new-node-binary new node binary (required)
--old ./old-node-binary old node binary (required)
--runs 30 number of samples
--filter pattern string to filter benchmark scripts
--set variable=value set benchmark variable (can be repeated)
`, {
arrayArgs: ['set']
});
if (!cli.optional.new || !cli.optional.old) {
cli.abort(cli.usage);
return;
}
for (var i = 2; i < process.argv.length; i++) { const binaries = ['old', 'new'];
var arg = process.argv[i]; const runs = cli.optional.runs ? parseInt(cli.optional.runs, 10) : 30;
switch (arg) { const benchmarks = cli.benchmarks();
case '--red': case '-r':
show = show === 'green' ? 'both' : 'red'; if (benchmarks.length === 0) {
break; console.error('no benchmarks found');
case '--green': case '-g': process.exit(1);
show = show === 'red' ? 'both' : 'green';
break;
case '--html':
html = true;
break;
case '-h': case '-?': case '--help':
console.log(usage);
process.exit(0);
break;
case '--':
benchmarks = [];
break;
default:
if (Array.isArray(benchmarks))
benchmarks.push(arg);
else
nodes.push(arg);
break;
}
} }
var start, green, red, reset, end; // Create queue from the benchmarks list such both node versions are tested
if (!html) { // `runs` amount of times each.
start = ''; const queue = [];
green = '\u001b[1;32m'; for (let iter = 0; iter < runs; iter++) {
red = '\u001b[1;31m'; for (const filename of benchmarks) {
reset = '\u001b[m'; for (const binary of binaries) {
end = ''; queue.push({ binary, filename, iter });
} else { }
start = '<pre style="background-color:#333;color:#eee">'; }
green = '<span style="background-color:#0f0;color:#000">';
red = '<span style="background-color:#f00;color:#fff">';
reset = '</span>';
end = '</pre>';
} }
var runBench = process.env.NODE_BENCH || 'bench'; // Print csv header
console.log('"binary", "filename", "configuration", "rate", "time"');
if (nodes.length !== 2) (function recursive(i) {
return console.error('usage:\n %s', usage); const job = queue[i];
var spawn = require('child_process').spawn; const child = fork(path.resolve(__dirname, job.filename), cli.optional.set, {
var results = {}; execPath: cli.optional[job.binary]
var toggle = 1; });
var r = (+process.env.NODE_BENCH_RUNS || 1) * 2;
run(); child.on('message', function(data) {
function run() { // Construct configuration string, " A=a, B=b, ..."
if (--r < 0) let conf = '';
return compare(); for (const key of Object.keys(data.conf)) {
toggle = ++toggle % 2; conf += ' ' + key + '=' + JSON.stringify(data.conf[key]);
}
conf = conf.slice(1);
var node = nodes[toggle]; // Escape qoutes (") for correct csv formatting
console.error('running %s', node); conf = conf.replace(/"/g, '""');
var env = {};
for (var i in process.env)
env[i] = process.env[i];
env.NODE = node;
var out = ''; console.log(`"${job.binary}", "${job.filename}", "${conf}", ` +
var child; `${data.rate}, ${data.time}`);
if (Array.isArray(benchmarks) && benchmarks.length) {
child = spawn(
node,
['benchmark/run.js'].concat(benchmarks),
{ env: env }
);
} else {
child = spawn('make', [runBench], { env: env });
}
child.stdout.setEncoding('utf8');
child.stdout.on('data', function(c) {
out += c;
}); });
child.stderr.pipe(process.stderr); child.once('close', function(code) {
child.on('close', function(code) {
if (code) { if (code) {
console.error('%s exited with code=%d', node, code);
process.exit(code); process.exit(code);
} else {
out.trim().split(/\r?\n/).forEach(function(line) {
line = line.trim();
if (!line)
return;
var s = line.split(':');
var num = +s.pop();
if (!num && num !== 0)
return;
line = s.join(':');
var res = results[line] = results[line] || {};
res[node] = res[node] || [];
res[node].push(num);
});
run();
}
});
}
function compare() {
// each result is an object with {"foo.js arg=bar":12345,...}
// compare each thing, and show which node did the best.
// node[0] is shown in green, node[1] shown in red.
var maxLen = -Infinity;
var util = require('util');
console.log(start);
Object.keys(results).map(function(bench) {
var res = results[bench];
var n0 = avg(res[nodes[0]]);
var n1 = avg(res[nodes[1]]);
var pct = ((n0 - n1) / n1 * 100).toFixed(2);
var g = n0 > n1 ? green : '';
var r = n0 > n1 ? '' : red;
var c = r || g;
if (show === 'green' && !g || show === 'red' && !r)
return; return;
}
var r0 = util.format( // If there are more benchmarks execute the next
'%s%s: %d%s', if (i + 1 < queue.length) {
g, recursive(i + 1);
nodes[0], }
n0.toPrecision(5), g ? reset : ''
);
var r1 = util.format(
'%s%s: %d%s',
r,
nodes[1],
n1.toPrecision(5), r ? reset : ''
);
pct = c + pct + '%' + reset;
var l = util.format('%s: %s %s', bench, r0, r1);
maxLen = Math.max(l.length + pct.length, maxLen);
return [l, pct];
}).filter(function(l) {
return l;
}).forEach(function(line) {
var l = line[0];
var pct = line[1];
var dotLen = maxLen - l.length - pct.length + 2;
var dots = ' ' + new Array(Math.max(0, dotLen)).join('.') + ' ';
console.log(l + dots + pct);
}); });
console.log(end); })(0);
}
function avg(list) {
if (list.length >= 3) {
list = list.sort();
var q = Math.floor(list.length / 4) || 1;
list = list.slice(q, -q);
}
return list.reduce(function(a, b) {
return a + b;
}, 0) / list.length;
}

Loading…
Cancel
Save