diff --git a/LICENSE b/LICENSE
index f83efa6c24..7b72de0719 100644
--- a/LICENSE
+++ b/LICENSE
@@ -28,6 +28,11 @@ are:
- HTTP Parser, located at deps/http_parser, is a small C library
copyrighted by Ryan Lienhart Dahl and has a MIT license.
+ - RonnJS, located at tools/ronnjs, is a library that generates man pages
+ and HTML from markdown. RonnJS is released under an MIT-style license
+ and has copyrights from Jérémy Lal, Ryan Tomayko, Dominic Baggott, Ash
+ Berlin, and Joey Mazzarelli.
+
Node's license follows:
diff --git a/Makefile b/Makefile
index ac65b39fad..f0f6e2d7a4 100644
--- a/Makefile
+++ b/Makefile
@@ -44,8 +44,8 @@ benchmark: all
doc: doc/node.1 doc/api.html doc/index.html doc/changelog.html
## HACK to give the ronn-generated page a TOC
-doc/api.html: doc/api.markdown doc/api_header.html doc/api_footer.html
- ronn -f --html doc/api.markdown \
+doc/api.html: all doc/api.markdown doc/api_header.html doc/api_footer.html
+ build/default/node tools/ronnjs/bin/ronn.js --fragment doc/api.markdown \
| sed "s/
\(.*\)<\/h2>/\1<\/h2>/g" \
| cat doc/api_header.html - doc/api_footer.html > doc/api.html
@@ -53,7 +53,7 @@ doc/changelog.html: ChangeLog doc/changelog_header.html doc/changelog_footer.htm
cat doc/changelog_header.html ChangeLog doc/changelog_footer.html > doc/changelog.html
doc/node.1: doc/api.markdown
- ronn --roff doc/api.markdown > doc/node.1
+ build/default/node tools/ronnjs/bin/ronn.js --roff doc/api.markdown > doc/node.1
website-upload: doc
scp doc/* ryan@nodejs.org:~/tinyclouds/node/
diff --git a/doc/api_header.html b/doc/api_header.html
index 1b989fe571..7414bd91f0 100644
--- a/doc/api_header.html
+++ b/doc/api_header.html
@@ -300,8 +300,6 @@
-
node(1)
-
- node(1)
diff --git a/tools/ronnjs/CHANGES b/tools/ronnjs/CHANGES
new file mode 100644
index 0000000000..581617998d
--- /dev/null
+++ b/tools/ronnjs/CHANGES
@@ -0,0 +1,12 @@
+Ronnjs Changes and Release Notes
+==============================
+
+Version 0.2
+------------------------------
+
+Supports output to html fragment.
+
+Version 0.1
+------------------------------
+
+Initial release.
diff --git a/tools/ronnjs/LICENSE b/tools/ronnjs/LICENSE
new file mode 100644
index 0000000000..f9d7162531
--- /dev/null
+++ b/tools/ronnjs/LICENSE
@@ -0,0 +1,62 @@
+Ronnjs is a javascript port of Ronn, which is an original
+work of Ryan Tomayko.
+
+Copyright: 2009 Ryan Tomayko
+License: MIT
+
+Files: bin/ronn.js, lib/ronn.js
+Copyright: 2010 Jérémy Lal
+License : MIT
+
+Files: lib/ext/markdown.js
+Copyright: 2009-2010 Dominic Baggott, 2009-2010 Ash Berlin
+License: MIT
+
+Files: lib/ext/opts.js
+Copyright: 2010 Joey Mazzarelli . All rights reserved.
+License: Simplified BSD License
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+ THIS SOFTWARE IS PROVIDED BY JOEY MAZZARELLI 'AS IS' AND ANY EXPRESS OR IMPLIED
+ WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+ EVENT SHALL JOEY MAZZARELLI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+ OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ The views and conclusions contained in the software and documentation are those
+ of the authors and should not be interpreted as representing official policies,
+ either expressed or implied, of Joey Mazzarelli.
+
+License: MIT
+ Permission is hereby granted, free of charge, to any person
+ obtaining a copy of this software and associated documentation
+ files (the "Software"), to deal in the Software without restriction,
+ including without limitation the rights to use, copy, modify,
+ merge, publish, distribute, sublicense, and/or sell copies of
+ the Software, and to permit persons to whom the Software is furnished
+ to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be
+ included in all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
+ OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ SOFTWARE.
diff --git a/tools/ronnjs/README b/tools/ronnjs/README
new file mode 100644
index 0000000000..87474a4148
--- /dev/null
+++ b/tools/ronnjs/README
@@ -0,0 +1,25 @@
+ronnjs(1) -- markdown to roff converter
+=======================================
+
+## Synopsis
+
+Javascript port of [ronn], using [markdown-js] to produce roff man pages.
+Not fully compatible with [ronn], although it aims to be, wherever possible.
+
+## Usage
+
+This outputs doc.roff from a markdown file :
+
+ ronn.js --build --roff doc.md
+
+Command-line options are listed with -h
+
+
+## How it works ?
+
+[markdown-js] parses markdown text to a document model, which in turn is
+used to ouput a man page.
+
+
+[ronn]: http://github.com/rtomayko/ronn
+[markdown-js]: http://github.com/evilstreak/markdown-js
diff --git a/tools/ronnjs/TODO b/tools/ronnjs/TODO
new file mode 100644
index 0000000000..fe65eed014
--- /dev/null
+++ b/tools/ronnjs/TODO
@@ -0,0 +1,7 @@
+# TODO
+
+* show
tags using something like
+ \l'\n(.lu*8u/10u'
+ and take care of the current indentation.
+
+* tests !
diff --git a/tools/ronnjs/bin/ronn.js b/tools/ronnjs/bin/ronn.js
new file mode 100755
index 0000000000..5087b8e2f1
--- /dev/null
+++ b/tools/ronnjs/bin/ronn.js
@@ -0,0 +1,102 @@
+#!/usr/bin/nodejs
+
+var opts = require(__dirname + '/../lib/ext/opts');
+var ronn = require(__dirname + '/../lib/ronn');
+
+var options = [
+ { short : 'V'
+ , description : 'Show version and exit'
+ , callback : function () { sys.puts('0.1'); process.exit(1); }
+ },
+ { short : 'b'
+ , long : 'build'
+ , description : 'Output to files with appropriate extension'
+ },
+ { short : 'm'
+ , long : 'man'
+ , description : 'Convert to roff and open with man'
+ },
+ { short : 'r'
+ , long : 'roff'
+ , description : 'Convert to roff format'
+ },
+ { short : '5'
+ , long : 'html'
+ , description : 'Convert to html format'
+ },
+ { short : 'f'
+ , long : 'fragment'
+ , description : 'Convert to html fragment format'
+ },
+ { long : 'manual'
+ , description : 'Set "manual" attribute'
+ , value : true
+ },
+ { long : 'version'
+ , description : 'Set "version" attribute'
+ , value : true
+ },
+ { long : 'date'
+ , description : 'Set "date" attribute'
+ , value : true
+ }
+];
+var arguments = [
+ { name : 'file'
+ , required : true
+ , description: 'A ronn file'
+ }
+];
+opts.parse(options, arguments, true);
+
+
+var sys = require('sys');
+var fs = require('fs');
+var path = require('path');
+
+var fPath = opts.arg('file');
+var fBase = path.join(path.dirname(fPath), path.basename(fPath, path.extname(fPath)));
+
+var fTxt = fs.readFileSync(fPath, 'utf8');
+var ronn = new ronn.Ronn(fTxt, opts.get("version"), opts.get("manual"), opts.get("date"));
+
+if (opts.get("man") && !opts.get("build")) {
+ var spawn = require('child_process').spawn;
+ var man = spawn('man', ['--warnings', '-E UTF-8', '-l', '-'], {"LANG":"C"});
+ man.stdout.addListener('data', function (data) {
+ sys.puts(data);
+ });
+ man.stderr.addListener('data', function (data) {
+ sys.puts(data);
+ });
+ man.addListener('exit', function() {
+ process.exit(0);
+ });
+ man.stdin.write(ronn.roff(), 'utf8');
+ man.stdin.end();
+} else {
+ var fRoff = null;
+ var fHtml = null;
+ var fFrag = null;
+ if (!opts.get("html") && !opts.get("fragment")) fRoff = ronn.roff();
+ else {
+ if (opts.get("roff")) fRoff = ronn.roff();
+ if (opts.get("html")) fHtml = ronn.html();
+ if (opts.get("fragment")) {
+ if (opts.get("html")) {
+ sys.debug("Can't use both --fragment and --html");
+ process.exit(-1);
+ }
+ fFrag = ronn.fragment();
+ }
+ }
+ if (opts.get("build")) {
+ if (fRoff) fs.writeFileSync(fBase + ".roff", fRoff, 'utf8');
+ if (fHtml) fs.writeFileSync(fBase + ".html", fHtml, 'utf8');
+ if (fFrag) fs.writeFileSync(fBase + ".fragment", fFrag, 'utf8');
+ } else {
+ if (fRoff) sys.puts(fRoff);
+ if (fHtml) sys.puts(fHtml);
+ if (fFrag) sys.puts(fFrag);
+ }
+}
diff --git a/tools/ronnjs/lib/ext/markdown.js b/tools/ronnjs/lib/ext/markdown.js
new file mode 100644
index 0000000000..f46f59533d
--- /dev/null
+++ b/tools/ronnjs/lib/ext/markdown.js
@@ -0,0 +1,1454 @@
+// Released under MIT license
+// Copyright (c) 2009-2010 Dominic Baggott
+// Copyright (c) 2009-2010 Ash Berlin
+
+(function( expose ) {
+
+/**
+ * class Markdown
+ *
+ * Markdown processing in Javascript done right. We have very particular views
+ * on what constitutes 'right' which include:
+ *
+ * - produces well-formed HTML (this means that em and strong nesting is
+ * important)
+ *
+ * - has an intermediate representation to allow processing of parsed data (We
+ * in fact have two, both as [JsonML]: a markdown tree and an HTML tree).
+ *
+ * - is easily extensible to add new dialects without having to rewrite the
+ * entire parsing mechanics
+ *
+ * - has a good test suite
+ *
+ * This implementation fulfills all of these (except that the test suite could
+ * do with expanding to automatically run all the fixtures from other Markdown
+ * implementations.)
+ *
+ * ##### Intermediate Representation
+ *
+ * *TODO* Talk about this :) Its JsonML, but document the node names we use.
+ *
+ * [JsonML]: http://jsonml.org/ "JSON Markup Language"
+ **/
+var Markdown = expose.Markdown = function Markdown(dialect) {
+ switch (typeof dialect) {
+ case "undefined":
+ this.dialect = Markdown.dialects.Gruber;
+ break;
+ case "object":
+ this.dialect = dialect;
+ break;
+ default:
+ if (dialect in Markdown.dialects) {
+ this.dialect = Markdown.dialects[dialect];
+ }
+ else {
+ throw new Error("Unknown Markdown dialect '" + String(dialect) + "'");
+ }
+ break;
+ }
+ this.em_state = [];
+ this.strong_state = [];
+ this.debug_indent = "";
+}
+
+/**
+ * parse( markdown, [dialect] ) -> JsonML
+ * - markdown (String): markdown string to parse
+ * - dialect (String | Dialect): the dialect to use, defaults to gruber
+ *
+ * Parse `markdown` and return a markdown document as a Markdown.JsonML tree.
+ **/
+expose.parse = function( source, dialect ) {
+ // dialect will default if undefined
+ var md = new Markdown( dialect );
+ return md.toTree( source );
+}
+
+/**
+ * toHTML( markdown ) -> String
+ * toHTML( md_tree ) -> String
+ * - markdown (String): markdown string to parse
+ * - md_tree (Markdown.JsonML): parsed markdown tree
+ *
+ * Take markdown (either as a string or as a JsonML tree) and run it through
+ * [[toHTMLTree]] then turn it into a well-formated HTML fragment.
+ **/
+expose.toHTML = function toHTML( source ) {
+ var input = expose.toHTMLTree( source );
+
+ return expose.renderJsonML( input );
+}
+
+/**
+ * toHTMLTree( markdown, [dialect] ) -> JsonML
+ * toHTMLTree( md_tree ) -> JsonML
+ * - markdown (String): markdown string to parse
+ * - dialect (String | Dialect): the dialect to use, defaults to gruber
+ * - md_tree (Markdown.JsonML): parsed markdown tree
+ *
+ * Turn markdown into HTML, represented as a JsonML tree. If a string is given
+ * to this function, it is first parsed into a markdown tree by calling
+ * [[parse]].
+ **/
+expose.toHTMLTree = function toHTMLTree( input, dialect ) {
+ // convert string input to an MD tree
+ if ( typeof input ==="string" ) input = this.parse( input, dialect );
+
+ // Now convert the MD tree to an HTML tree
+
+ // remove references from the tree
+ var attrs = extract_attr( input ),
+ refs = {};
+
+ if ( attrs && attrs.references ) {
+ refs = attrs.references;
+ }
+
+ var html = convert_tree_to_html( input, refs );
+ merge_text_nodes( html );
+ return html;
+}
+
+var mk_block = Markdown.mk_block = function(block, trail, line) {
+ // Be helpful for default case in tests.
+ if ( arguments.length == 1 ) trail = "\n\n";
+
+ var s = new String(block);
+ s.trailing = trail;
+ // To make it clear its not just a string
+ s.toSource = function() {
+ return "Markdown.mk_block( " +
+ uneval(block) +
+ ", " +
+ uneval(trail) +
+ ", " +
+ uneval(line) +
+ " )"
+ }
+
+ if (line != undefined)
+ s.lineNumber = line;
+
+ return s;
+}
+
+function count_lines( str ) {
+ var n = 0, i = -1;;
+ while ( ( i = str.indexOf('\n', i+1) ) != -1) n++;
+ return n;
+}
+
+// Internal - split source into rough blocks
+Markdown.prototype.split_blocks = function splitBlocks( input, startLine ) {
+ // [\s\S] matches _anything_ (newline or space)
+ var re = /([\s\S]+?)($|\n(?:\s*\n|$)+)/g,
+ blocks = [],
+ m;
+
+ var line_no = 1;
+
+ if ( ( m = (/^(\s*\n)/)(input) ) != null ) {
+ // skip (but count) leading blank lines
+ line_no += count_lines( m[0] );
+ re.lastIndex = m[0].length;
+ }
+
+ while ( ( m = re(input) ) != null ) {
+ blocks.push( mk_block( m[1], m[2], line_no ) );
+ line_no += count_lines( m[0] );
+ }
+
+ return blocks;
+}
+
+/**
+ * Markdown#processBlock( block, next ) -> undefined | [ JsonML, ... ]
+ * - block (String): the block to process
+ * - next (Array): the following blocks
+ *
+ * Process `block` and return an array of JsonML nodes representing `block`.
+ *
+ * It does this by asking each block level function in the dialect to process
+ * the block until one can. Succesful handling is indicated by returning an
+ * array (with zero or more JsonML nodes), failure by a false value.
+ *
+ * Blocks handlers are responsible for calling [[Markdown#processInline]]
+ * themselves as appropriate.
+ *
+ * If the blocks were split incorrectly or adjacent blocks need collapsing you
+ * can adjust `next` in place using shift/splice etc.
+ *
+ * If any of this default behaviour is not right for the dialect, you can
+ * define a `__call__` method on the dialect that will get invoked to handle
+ * the block processing.
+ */
+Markdown.prototype.processBlock = function processBlock( block, next ) {
+ var cbs = this.dialect.block,
+ ord = cbs.__order__;
+
+ if ( "__call__" in cbs ) {
+ return cvs.__call__.call(this, block, next);
+ }
+
+ for ( var i = 0; i < ord.length; i++ ) {
+ //D:this.debug( "Testing", ord[i] );
+ var res = cbs[ ord[i] ].call( this, block, next );
+ if ( res ) {
+ //D:this.debug(" matched");
+ if ( !res instanceof Array || ( res.length > 0 && !( res[0] instanceof Array ) ) )
+ this.debug(ord[i], "didn't return a proper array");
+ //D:this.debug( "" );
+ return res;
+ }
+ }
+
+ // Uhoh! no match! Should we throw an error?
+ return [];
+}
+
+Markdown.prototype.processInline = function processInline( block ) {
+ return this.dialect.inline.__call__.call( this, String( block ) );
+}
+
+/**
+ * Markdown#toTree( source ) -> JsonML
+ * - source (String): markdown source to parse
+ *
+ * Parse `source` into a JsonML tree representing the markdown document.
+ **/
+// custom_tree means set this.tree to `custom_tree` and restore old value on return
+Markdown.prototype.toTree = function toTree( source, custom_root ) {
+ var blocks = source instanceof Array
+ ? source
+ : this.split_blocks( source );
+
+ // Make tree a member variable so its easier to mess with in extensions
+ var old_tree = this.tree;
+ try {
+ this.tree = custom_root || this.tree || [ "markdown" ];
+
+ blocks:
+ while ( blocks.length ) {
+ var b = this.processBlock( blocks.shift(), blocks );
+
+ // Reference blocks and the like won't return any content
+ if ( !b.length ) continue blocks;
+
+ this.tree.push.apply( this.tree, b );
+ }
+ return this.tree;
+ }
+ finally {
+ if ( custom_root )
+ this.tree = old_tree;
+ }
+
+}
+
+// Noop by default
+Markdown.prototype.debug = function () {
+ var args = Array.prototype.slice.call( arguments);
+ args.unshift(this.debug_indent);
+ print.apply( print, args );
+}
+
+Markdown.prototype.loop_re_over_block = function( re, block, cb ) {
+ // Dont use /g regexps with this
+ var m,
+ b = block.valueOf();
+
+ while ( b.length && (m = re(b) ) != null) {
+ b = b.substr( m[0].length );
+ cb.call(this, m);
+ }
+ return b;
+}
+
+/**
+ * Markdown.dialects
+ *
+ * Namespace of built-in dialects.
+ **/
+Markdown.dialects = {};
+
+/**
+ * Markdown.dialects.Gruber
+ *
+ * The default dialect that follows the rules set out by John Gruber's
+ * markdown.pl as closely as possible. Well actually we follow the behaviour of
+ * that script which in some places is not exactly what the syntax web page
+ * says.
+ **/
+Markdown.dialects.Gruber = {
+ block: {
+ atxHeader: function atxHeader( block, next ) {
+ var m = block.match( /^(#{1,6})\s*(.*?)\s*#*\s*(?:\n|$)/ );
+
+ if ( !m ) return undefined;
+
+ var header = [ "header", { level: m[ 1 ].length }, m[ 2 ] ];
+
+ if ( m[0].length < block.length )
+ next.unshift( mk_block( block.substr( m[0].length ), block.trailing, block.lineNumber + 2 ) );
+
+ return [ header ];
+ },
+
+ setextHeader: function setextHeader( block, next ) {
+ var m = block.match( /^(.*)\n([-=])\2\2+(?:\n|$)/ );
+
+ if ( !m ) return undefined;
+
+ var level = ( m[ 2 ] === "=" ) ? 1 : 2;
+ var header = [ "header", { level : level }, m[ 1 ] ];
+
+ if ( m[0].length < block.length )
+ next.unshift( mk_block( block.substr( m[0].length ), block.trailing, block.lineNumber + 2 ) );
+
+ return [ header ];
+ },
+
+ code: function code( block, next ) {
+ // | Foo
+ // |bar
+ // should be a code block followed by a paragraph. Fun
+ //
+ // There might also be adjacent code block to merge.
+
+ var ret = [],
+ re = /^(?: {0,3}\t| {4})(.*)\n?/,
+ lines;
+
+ // 4 spaces + content
+ var m = block.match( re );
+
+ if ( !m ) return undefined;
+
+ block_search:
+ do {
+ // Now pull out the rest of the lines
+ var b = this.loop_re_over_block(
+ re, block.valueOf(), function( m ) { ret.push( m[1] ) } );
+
+ if (b.length) {
+ // Case alluded to in first comment. push it back on as a new block
+ next.unshift( mk_block(b, block.trailing) );
+ break block_search;
+ }
+ else if (next.length) {
+ // Check the next block - it might be code too
+ var m = next[0].match( re );
+
+ if ( !m ) break block_search;
+
+ // Pull how how many blanks lines follow - minus two to account for .join
+ ret.push ( block.trailing.replace(/[^\n]/g, '').substring(2) );
+
+ block = next.shift();
+ }
+ else
+ break block_search;
+ } while (true);
+
+ return [ [ "code_block", ret.join("\n") ] ];
+ },
+
+ horizRule: function horizRule( block, next ) {
+ // this needs to find any hr in the block to handle abutting blocks
+ var m = block.match( /^(?:([\s\S]*?)\n)?[ \t]*([-_*])(?:[ \t]*\2){2,}[ \t]*(?:\n([\s\S]*))?$/ );
+
+ if ( !m ) {
+ return undefined;
+ }
+
+ var jsonml = [ [ "hr" ] ];
+
+ // if there's a leading abutting block, process it
+ if ( m[ 1 ] ) {
+ jsonml.unshift.apply( jsonml, this.processBlock( m[ 1 ], [] ) );
+ }
+
+ // if there's a trailing abutting block, stick it into next
+ if ( m[ 3 ] ) {
+ next.unshift( mk_block( m[ 3 ] ) );
+ }
+
+ return jsonml;
+ },
+
+ // There are two types of lists. Tight and loose. Tight lists have no whitespace
+ // between the items (and result in text just in the - ) and loose lists,
+ // which have an empty line between list items, resulting in (one or more)
+ // paragraphs inside the
- .
+ //
+ // There are all sorts weird edge cases about the original markdown.pl's
+ // handling of lists:
+ //
+ // * Nested lists are supposed to be indented by four chars per level. But
+ // if they aren't, you can get a nested list by indenting by less than
+ // four so long as the indent doesn't match an indent of an existing list
+ // item in the 'nest stack'.
+ //
+ // * The type of the list (bullet or number) is controlled just by the
+ // first item at the indent. Subsequent changes are ignored unless they
+ // are for nested lists
+ //
+ lists: (function( ) {
+ // Use a closure to hide a few variables.
+ var any_list = "[*+-]|\\d\\.",
+ bullet_list = /[*+-]/,
+ number_list = /\d+\./,
+ // Capture leading indent as it matters for determining nested lists.
+ is_list_re = new RegExp( "^( {0,3})(" + any_list + ")[ \t]+" ),
+ indent_re = "(?: {0,3}\\t| {4})";
+
+ // TODO: Cache this regexp for certain depths.
+ // Create a regexp suitable for matching an li for a given stack depth
+ function regex_for_depth( depth ) {
+
+ return new RegExp(
+ // m[1] = indent, m[2] = list_type
+ "(?:^(" + indent_re + "{0," + depth + "} {0,3})(" + any_list + ")\\s+)|" +
+ // m[3] = cont
+ "(^" + indent_re + "{0," + (depth-1) + "}[ ]{0,4})"
+ );
+ }
+ function expand_tab( input ) {
+ return input.replace( / {0,3}\t/g, " " );
+ }
+
+ // Add inline content `inline` to `li`. inline comes from processInline
+ // so is an array of content
+ function add(li, loose, inline, nl) {
+ if (loose) {
+ li.push( [ "para" ].concat(inline) );
+ return;
+ }
+ // Hmmm, should this be any block level element or just paras?
+ var add_to = li[li.length -1] instanceof Array && li[li.length - 1][0] == "para"
+ ? li[li.length -1]
+ : li;
+
+ // If there is already some content in this list, add the new line in
+ if (nl && li.length > 1) inline.unshift(nl);
+
+ for (var i=0; i < inline.length; i++) {
+ var what = inline[i],
+ is_str = typeof what == "string";
+ if (is_str && add_to.length > 1 && typeof add_to[add_to.length-1] == "string" )
+ {
+ add_to[ add_to.length-1 ] += what;
+ }
+ else {
+ add_to.push( what );
+ }
+ }
+ }
+
+ // contained means have an indent greater than the current one. On
+ // *every* line in the block
+ function get_contained_blocks( depth, blocks ) {
+
+ var re = new RegExp( "^(" + indent_re + "{" + depth + "}.*?\\n?)*$" ),
+ replace = new RegExp("^" + indent_re + "{" + depth + "}", "gm"),
+ ret = [];
+
+ while ( blocks.length > 0 ) {
+ if ( re( blocks[0] ) ) {
+ var b = blocks.shift(),
+ // Now remove that indent
+ x = b.replace( replace, "");
+
+ ret.push( mk_block( x, b.trailing, b.lineNumber ) );
+ }
+ break;
+ }
+ return ret;
+ }
+
+ // passed to stack.forEach to turn list items up the stack into paras
+ function paragraphify(s, i, stack) {
+ var list = s.list;
+ var last_li = list[list.length-1];
+
+ if (last_li[1] instanceof Array && last_li[1][0] == "para") {
+ return;
+ }
+ if (i+1 == stack.length) {
+ // Last stack frame
+ // Keep the same array, but replace the contents
+ last_li.push( ["para"].concat( last_li.splice(1) ) );
+ }
+ else {
+ var sublist = last_li.pop();
+ last_li.push( ["para"].concat( last_li.splice(1) ), sublist );
+ }
+ }
+
+ // The matcher function
+ return function( block, next ) {
+ var m = block.match( is_list_re );
+ if ( !m ) return undefined;
+
+ function make_list( m ) {
+ var list = bullet_list( m[2] )
+ ? ["bulletlist"]
+ : ["numberlist"];
+
+ stack.push( { list: list, indent: m[1] } );
+ return list;
+ }
+
+
+ var stack = [], // Stack of lists for nesting.
+ list = make_list( m ),
+ last_li,
+ loose = false,
+ ret = [ stack[0].list ];
+
+ // Loop to search over block looking for inner block elements and loose lists
+ loose_search:
+ while( true ) {
+ // Split into lines preserving new lines at end of line
+ var lines = block.split( /(?=\n)/ );
+
+ // We have to grab all lines for a li and call processInline on them
+ // once as there are some inline things that can span lines.
+ var li_accumulate = "";
+
+ // Loop over the lines in this block looking for tight lists.
+ tight_search:
+ for (var line_no=0; line_no < lines.length; line_no++) {
+ var nl = "",
+ l = lines[line_no].replace(/^\n/, function(n) { nl = n; return "" });
+
+ // TODO: really should cache this
+ var line_re = regex_for_depth( stack.length );
+
+ m = l.match( line_re );
+ //print( "line:", uneval(l), "\nline match:", uneval(m) );
+
+ // We have a list item
+ if ( m[1] !== undefined ) {
+ // Process the previous list item, if any
+ if ( li_accumulate.length ) {
+ add( last_li, loose, this.processInline( li_accumulate ), nl );
+ // Loose mode will have been dealt with. Reset it
+ loose = false;
+ li_accumulate = "";
+ }
+
+ m[1] = expand_tab( m[1] );
+ var wanted_depth = Math.floor(m[1].length/4)+1;
+ //print( "want:", wanted_depth, "stack:", stack.length);
+ if ( wanted_depth > stack.length ) {
+ // Deep enough for a nested list outright
+ //print ( "new nested list" );
+ list = make_list( m );
+ last_li.push( list );
+ last_li = list[1] = [ "listitem" ];
+ }
+ else {
+ // We aren't deep enough to be strictly a new level. This is
+ // where Md.pl goes nuts. If the indent matches a level in the
+ // stack, put it there, else put it one deeper then the
+ // wanted_depth deserves.
+ var found = stack.some(function(s, i) {
+ if ( s.indent != m[1] ) return false;
+ list = s.list; // Found the level we want
+ stack.splice(i+1); // Remove the others
+ //print("found");
+ return true; // And stop looping
+ });
+
+ if (!found) {
+ //print("not found. l:", uneval(l));
+ wanted_depth++;
+ if (wanted_depth <= stack.length) {
+ stack.splice(wanted_depth);
+ //print("Desired depth now", wanted_depth, "stack:", stack.length);
+ list = stack[wanted_depth-1].list;
+ //print("list:", uneval(list) );
+ }
+ else {
+ //print ("made new stack for messy indent");
+ list = make_list(m);
+ last_li.push(list);
+ }
+ }
+
+ //print( uneval(list), "last", list === stack[stack.length-1].list );
+ last_li = [ "listitem" ];
+ list.push(last_li);
+ } // end depth of shenegains
+ nl = "";
+ }
+
+ // Add content
+ if (l.length > m[0].length) {
+ li_accumulate += nl + l.substr( m[0].length );
+ }
+ } // tight_search
+
+ if ( li_accumulate.length ) {
+ add( last_li, loose, this.processInline( li_accumulate ), nl );
+ // Loose mode will have been dealt with. Reset it
+ loose = false;
+ li_accumulate = "";
+ }
+
+ // Look at the next block - we might have a loose list. Or an extra
+ // paragraph for the current li
+ var contained = get_contained_blocks( stack.length, next );
+
+ // Deal with code blocks or properly nested lists
+ if (contained.length > 0) {
+ // Make sure all listitems up the stack are paragraphs
+ stack.forEach( paragraphify, this );
+
+ last_li.push.apply( last_li, this.toTree( contained, [] ) );
+ }
+
+ var next_block = next[0] && next[0].valueOf() || "";
+
+ if ( next_block.match(is_list_re) || next_block.match( /^ / ) ) {
+ block = next.shift();
+
+ // Check for an HR following a list: features/lists/hr_abutting
+ var hr = this.dialect.block.horizRule( block, next );
+
+ if (hr) {
+ ret.push.apply(ret, hr);
+ break;
+ }
+
+ // Make sure all listitems up the stack are paragraphs
+ stack.forEach( paragraphify , this );
+
+ loose = true;
+ continue loose_search;
+ }
+ break;
+ } // loose_search
+
+ return ret;
+ }
+ })(),
+
+ blockquote: function blockquote( block, next ) {
+ if ( !block.match( /^>/m ) )
+ return undefined;
+
+ var jsonml = [];
+
+ // separate out the leading abutting block, if any
+ if ( block[ 0 ] != ">" ) {
+ var lines = block.split( /\n/ ),
+ prev = [];
+
+ // keep shifting lines until you find a crotchet
+ while ( lines.length && lines[ 0 ][ 0 ] != ">" ) {
+ prev.push( lines.shift() );
+ }
+
+ // reassemble!
+ block = lines.join( "\n" );
+ jsonml.push.apply( jsonml, this.processBlock( prev.join( "\n" ), [] ) );
+ }
+
+ // if the next block is also a blockquote merge it in
+ while ( next.length && next[ 0 ][ 0 ] == ">" ) {
+ var b = next.shift();
+ block += block.trailing + b;
+ block.trailing = b.trailing;
+ }
+
+ // Strip off the leading "> " and re-process as a block.
+ var input = block.replace( /^> ?/gm, '' ),
+ old_tree = this.tree;
+ jsonml.push( this.toTree( input, [ "blockquote" ] ) );
+
+ return jsonml;
+ },
+
+ referenceDefn: function referenceDefn( block, next) {
+ var re = /^\s*\[(.*?)\]:\s*(\S+)(?:\s+(?:(['"])(.*?)\3|\((.*?)\)))?\n?/;
+ // interesting matches are [ , ref_id, url, , title, title ]
+
+ if ( !block.match(re) )
+ return undefined;
+
+ // make an attribute node if it doesn't exist
+ if ( !extract_attr( this.tree ) ) {
+ this.tree.splice( 1, 0, {} );
+ }
+
+ var attrs = extract_attr( this.tree );
+
+ // make a references hash if it doesn't exist
+ if ( attrs.references === undefined ) {
+ attrs.references = {};
+ }
+
+ var b = this.loop_re_over_block(re, block, function( m ) {
+
+ if ( m[2] && m[2][0] == '<' && m[2][m[2].length-1] == '>' )
+ m[2] = m[2].substring( 1, m[2].length - 1 );
+
+ var ref = attrs.references[ m[1].toLowerCase() ] = {
+ href: m[2]
+ };
+
+ if (m[4] !== undefined)
+ ref.title = m[4];
+ else if (m[5] !== undefined)
+ ref.title = m[5];
+
+ } );
+
+ if (b.length)
+ next.unshift( mk_block( b, block.trailing ) );
+
+ return [];
+ },
+
+ para: function para( block, next ) {
+ // everything's a para!
+ return [ ["para"].concat( this.processInline( block ) ) ];
+ }
+ }
+}
+
+Markdown.dialects.Gruber.inline = {
+ __call__: function inline( text, patterns ) {
+ // Hmmm - should this function be directly in Md#processInline, or
+ // conversely, should Md#processBlock be moved into block.__call__ too
+ var out = [ ],
+ m,
+ // Look for the next occurange of a special character/pattern
+ re = new RegExp( "([\\s\\S]*?)(" + (patterns.source || patterns) + ")", "g" ),
+ lastIndex = 0;
+
+ //D:var self = this;
+ //D:self.debug("processInline:", uneval(text) );
+ function add(x) {
+ //D:self.debug(" adding output", uneval(x));
+ if (typeof x == "string" && typeof out[out.length-1] == "string")
+ out[ out.length-1 ] += x;
+ else
+ out.push(x);
+ }
+
+ while ( ( m = re.exec(text) ) != null) {
+ if ( m[1] ) add( m[1] ); // Some un-interesting text matched
+ else m[1] = { length: 0 }; // Or there was none, but make m[1].length == 0
+
+ var res;
+ if ( m[2] in this.dialect.inline ) {
+ res = this.dialect.inline[ m[2] ].call(
+ this,
+ text.substr( m.index + m[1].length ), m, out );
+ }
+ // Default for now to make dev easier. just slurp special and output it.
+ res = res || [ m[2].length, m[2] ];
+
+ var len = res.shift();
+ // Update how much input was consumed
+ re.lastIndex += ( len - m[2].length );
+
+ // Add children
+ res.forEach(add);
+
+ lastIndex = re.lastIndex;
+ }
+
+ // Add last 'boring' chunk
+ if ( text.length > lastIndex )
+ add( text.substr( lastIndex ) );
+
+ return out;
+ },
+
+ "\\": function escaped( text ) {
+ // [ length of input processed, node/children to add... ]
+ // Only esacape: \ ` * _ { } [ ] ( ) # * + - . !
+ if ( text.match( /^\\[\\`\*_{}\[\]()#\+.!\-]/ ) )
+ return [ 2, text[1] ];
+ else
+ // Not an esacpe
+ return [ 1, "\\" ];
+ },
+
+ "![": function image( text ) {
+ // ![Alt text](/path/to/img.jpg "Optional title")
+ // 1 2 3 4 <--- captures
+ var m = text.match( /^!\[(.*?)\][ \t]*\([ \t]*(\S*)(?:[ \t]+(["'])(.*?)\3)?[ \t]*\)/ );
+
+ if ( m ) {
+ if ( m[2] && m[2][0] == '<' && m[2][m[2].length-1] == '>' )
+ m[2] = m[2].substring( 1, m[2].length - 1 );
+
+ m[2] == this.dialect.inline.__call__.call( this, m[2], /\\/ )[0];
+
+ var attrs = { alt: m[1], href: m[2] || "" };
+ if ( m[4] !== undefined)
+ attrs.title = m[4];
+
+ return [ m[0].length, [ "img", attrs ] ];
+ }
+
+ // ![Alt text][id]
+ m = text.match( /^!\[(.*?)\][ \t]*\[(.*?)\]/ );
+
+ if ( m ) {
+ // We can't check if the reference is known here as it likely wont be
+ // found till after. Check it in md tree->hmtl tree conversion
+ return [ m[0].length, [ "img_ref", { alt: m[1], ref: m[2].toLowerCase(), text: m[0] } ] ];
+ }
+
+ // Just consume the '!['
+ return [ 2, "![" ];
+ },
+
+ "[": function link( text ) {
+ // [link text](/path/to/img.jpg "Optional title")
+ // 1 2 3 4 <--- captures
+ var m = text.match( /^\[([\s\S]*?)\][ \t]*\([ \t]*(\S+)(?:[ \t]+(["'])(.*?)\3)?[ \t]*\)/ );
+
+ if ( m ) {
+ if ( m[2] && m[2][0] == '<' && m[2][m[2].length-1] == '>' )
+ m[2] = m[2].substring( 1, m[2].length - 1 );
+
+ // Process escapes only
+ m[2] = this.dialect.inline.__call__.call( this, m[2], /\\/ )[0];
+
+ var attrs = { href: m[2] || "" };
+ if ( m[4] !== undefined)
+ attrs.title = m[4];
+
+ return [ m[0].length, [ "link", attrs, m[1] ] ];
+ }
+
+ // [Alt text][id]
+ // [Alt text] [id]
+ // [id]
+ m = text.match( /^\[([\s\S]*?)\](?: ?\[(.*?)\])?/ );
+
+ if ( m ) {
+ // [id] case, text == id
+ if ( m[2] === undefined || m[2] === "" ) m[2] = m[1];
+
+ // We can't check if the reference is known here as it likely wont be
+ // found till after. Check it in md tree->hmtl tree conversion.
+ // Store the original so that conversion can revert if the ref isn't found.
+ return [
+ m[ 0 ].length,
+ [
+ "link_ref",
+ {
+ ref: m[ 2 ].toLowerCase(),
+ original: m[ 0 ]
+ },
+ m[ 1 ]
+ ]
+ ];
+ }
+
+ // Just consume the '['
+ return [ 1, "[" ];
+ },
+
+
+ "<": function autoLink( text ) {
+ var m;
+
+ if ( ( m = text.match( /^<(?:((https?|ftp|mailto):[^>]+)|(.*?@.*?\.[a-zA-Z]+))>/ ) ) != null ) {
+ if ( m[3] ) {
+ return [ m[0].length, [ "link", { href: "mailto:" + m[3] }, m[3] ] ];
+
+ }
+ else if ( m[2] == "mailto" ) {
+ return [ m[0].length, [ "link", { href: m[1] }, m[1].substr("mailto:".length ) ] ];
+ }
+ else
+ return [ m[0].length, [ "link", { href: m[1] }, m[1] ] ];
+ }
+
+ return [ 1, "<" ];
+ },
+
+ "`": function inlineCode( text ) {
+ // Inline code block. as many backticks as you like to start it
+ // Always skip over the opening ticks.
+ var m = text.match( /(`+)(([\s\S]*?)\1)/ );
+
+ if ( m && m[2] )
+ return [ m[1].length + m[2].length, [ "inlinecode", m[3] ] ];
+ else {
+ // TODO: No matching end code found - warn!
+ return [ 1, "`" ];
+ }
+ },
+
+ " \n": function lineBreak( text ) {
+ return [ 3, [ "linebreak" ] ];
+ }
+
+}
+
+// Meta Helper/generator method for em and strong handling
+function strong_em( tag, md ) {
+
+ var state_slot = tag + "_state",
+ other_slot = tag == "strong" ? "em_state" : "strong_state";
+
+ function CloseTag(len) {
+ this.len_after = len;
+ this.name = "close_" + md;
+ }
+
+ return function ( text, orig_match ) {
+
+ if (this[state_slot][0] == md) {
+ // Most recent em is of this type
+ //D:this.debug("closing", md);
+ this[state_slot].shift();
+
+ // "Consume" everything to go back to the recrusion in the else-block below
+ return[ text.length, new CloseTag(text.length-md.length) ];
+ }
+ else {
+ // Store a clone of the em/strong states
+ var other = this[other_slot].slice(),
+ state = this[state_slot].slice();
+
+ this[state_slot].unshift(md);
+
+ //D:this.debug_indent += " ";
+
+ // Recurse
+ var res = this.processInline( text.substr( md.length ) );
+ //D:this.debug_indent = this.debug_indent.substr(2);
+
+ var last = res[res.length - 1];
+
+ //D:this.debug("processInline from", tag + ": ", uneval( res ) );
+
+ var check = this[state_slot].shift();
+ if (last instanceof CloseTag) {
+ res.pop();
+ // We matched! Huzzah.
+ var consumed = text.length - last.len_after;
+ return [ consumed, [ tag ].concat(res) ];
+ }
+ else {
+ // Restore the state of the other kind. We might have mistakenly closed it.
+ this[other_slot] = other;
+ this[state_slot] = state;
+
+ // We can't reuse the processed result as it could have wrong parsing contexts in it.
+ return [ md.length, md ];
+ }
+ }
+ } // End returned function
+}
+
+Markdown.dialects.Gruber.inline["**"] = strong_em("strong", "**");
+Markdown.dialects.Gruber.inline["__"] = strong_em("strong", "__");
+Markdown.dialects.Gruber.inline["*"] = strong_em("em", "*");
+Markdown.dialects.Gruber.inline["_"] = strong_em("em", "_");
+
+
+// Build default order from insertion order.
+Markdown.buildBlockOrder = function(d) {
+ var ord = [];
+ for ( var i in d ) {
+ if ( i == "__order__" || i == "__call__" ) continue;
+ ord.push( i );
+ }
+ d.__order__ = ord;
+}
+
+// Build patterns for inline matcher
+Markdown.buildInlinePatterns = function(d) {
+ var patterns = [];
+
+ for ( var i in d ) {
+ if (i == "__call__") continue;
+ var l = i.replace( /([\\.*+?|()\[\]{}])/g, "\\$1" )
+ .replace( /\n/, "\\n" );
+ patterns.push( i.length == 1 ? l : "(?:" + l + ")" );
+ }
+
+ patterns = patterns.join("|");
+ //print("patterns:", uneval( patterns ) );
+
+ var fn = d.__call__;
+ d.__call__ = function(text, pattern) {
+ if (pattern != undefined)
+ return fn.call(this, text, pattern);
+ else
+ return fn.call(this, text, patterns);
+ }
+}
+
+// Helper function to make sub-classing a dialect easier
+Markdown.subclassDialect = function( d ) {
+ function Block() {};
+ Block.prototype = d.block;
+ function Inline() {};
+ Inline.prototype = d.inline;
+
+ return { block: new Block(), inline: new Inline() };
+}
+
+Markdown.buildBlockOrder ( Markdown.dialects.Gruber.block );
+Markdown.buildInlinePatterns( Markdown.dialects.Gruber.inline );
+
+Markdown.dialects.Maruku = Markdown.subclassDialect( Markdown.dialects.Gruber );
+
+Markdown.dialects.Maruku.block.document_meta = function document_meta( block, next ) {
+ // we're only interested in the first block
+ if ( block.lineNumber > 1 ) return undefined;
+
+ // document_meta blocks consist of one or more lines of `Key: Value\n`
+ if ( ! block.match( /^(?:\w+:.*\n)*\w+:.*$/ ) ) return undefined;
+
+ // make an attribute node if it doesn't exist
+ if ( !extract_attr( this.tree ) ) {
+ this.tree.splice( 1, 0, {} );
+ }
+
+ var pairs = block.split( /\n/ );
+ for ( p in pairs ) {
+ var m = pairs[ p ].match( /(\w+):\s*(.*)$/ ),
+ key = m[ 1 ].toLowerCase(),
+ value = m[ 2 ];
+
+ this.tree[ 1 ][ key ] = value;
+ }
+
+ // document_meta produces no content!
+ return [];
+}
+
+Markdown.dialects.Maruku.block.block_meta = function block_meta( block, next ) {
+ // check if the last line of the block is an meta hash
+ var m = block.match( /(^|\n) {0,3}\{:\s*((?:\\\}|[^\}])*)\s*\}$/ );
+ if ( !m ) return undefined;
+
+ // process the meta hash
+ var attr = process_meta_hash( m[ 2 ] );
+
+ // if we matched ^ then we need to apply meta to the previous block
+ if ( m[ 1 ] === "" ) {
+ var node = this.tree[ this.tree.length - 1 ],
+ hash = extract_attr( node );
+
+ // if the node is a string (rather than JsonML), bail
+ if ( typeof node === "string" ) return undefined;
+
+ // create the attribute hash if it doesn't exist
+ if ( !hash ) {
+ hash = {};
+ node.splice( 1, 0, hash );
+ }
+
+ // add the attributes in
+ for ( a in attr ) {
+ hash[ a ] = attr[ a ];
+ }
+
+ // return nothing so the meta hash is removed
+ return [];
+ }
+
+ // pull the meta hash off the block and process what's left
+ var b = block.replace( /\n.*$/, "" ),
+ result = this.processBlock( b, [] );
+
+ // get or make the attributes hash
+ var hash = extract_attr( result[ 0 ] );
+ if ( !hash ) {
+ hash = {};
+ result[ 0 ].splice( 1, 0, hash );
+ }
+
+ // attach the attributes to the block
+ for ( a in attr ) {
+ hash[ a ] = attr[ a ];
+ }
+
+ return result;
+}
+
+Markdown.dialects.Maruku.block.definition_list = function definition_list( block, next ) {
+ // one or more terms followed by one or more definitions, in a single block
+ var tight = /^((?:[^\s:].*\n)+):\s+([^]+)$/,
+ list = [ "dl" ];
+
+ // see if we're dealing with a tight or loose block
+ if ( ( m = block.match( tight ) ) ) {
+ // pull subsequent tight DL blocks out of `next`
+ var blocks = [ block ];
+ while ( next.length && tight.exec( next[ 0 ] ) ) {
+ blocks.push( next.shift() );
+ }
+
+ for ( var b = 0; b < blocks.length; ++b ) {
+ var m = blocks[ b ].match( tight ),
+ terms = m[ 1 ].replace( /\n$/, "" ).split( /\n/ ),
+ defns = m[ 2 ].split( /\n:\s+/ );
+
+ // print( uneval( m ) );
+
+ for ( var i = 0; i < terms.length; ++i ) {
+ list.push( [ "dt", terms[ i ] ] );
+ }
+
+ for ( var i = 0; i < defns.length; ++i ) {
+ // run inline processing over the definition
+ list.push( [ "dd" ].concat( this.processInline( defns[ i ].replace( /(\n)\s+/, "$1" ) ) ) );
+ }
+ }
+ }
+ else {
+ return undefined;
+ }
+
+ return [ list ];
+}
+
+Markdown.dialects.Maruku.inline[ "{:" ] = function inline_meta( text, matches, out ) {
+ if ( !out.length ) {
+ return [ 2, "{:" ];
+ }
+
+ // get the preceeding element
+ var before = out[ out.length - 1 ];
+
+ if ( typeof before === "string" ) {
+ return [ 2, "{:" ];
+ }
+
+ // match a meta hash
+ var m = text.match( /^\{:\s*((?:\\\}|[^\}])*)\s*\}/ );
+
+ // no match, false alarm
+ if ( !m ) {
+ return [ 2, "{:" ];
+ }
+
+ // attach the attributes to the preceeding element
+ var meta = process_meta_hash( m[ 1 ] ),
+ attr = extract_attr( before );
+
+ if ( !attr ) {
+ attr = {};
+ before.splice( 1, 0, attr );
+ }
+
+ for ( var k in meta ) {
+ attr[ k ] = meta[ k ];
+ }
+
+ // cut out the string and replace it with nothing
+ return [ m[ 0 ].length, "" ];
+}
+
+Markdown.buildBlockOrder ( Markdown.dialects.Maruku.block );
+Markdown.buildInlinePatterns( Markdown.dialects.Maruku.inline );
+
+function extract_attr( jsonml ) {
+ return jsonml instanceof Array
+ && jsonml.length > 1
+ && typeof jsonml[ 1 ] === "object"
+ && !( jsonml[ 1 ] instanceof Array )
+ ? jsonml[ 1 ]
+ : undefined;
+}
+
+function process_meta_hash( meta_string ) {
+ var meta = split_meta_hash( meta_string ),
+ attr = {};
+
+ for ( var i = 0; i < meta.length; ++i ) {
+ // id: #foo
+ if ( /^#/.test( meta[ i ] ) ) {
+ attr.id = meta[ i ].substring( 1 );
+ }
+ // class: .foo
+ else if ( /^\./.test( meta[ i ] ) ) {
+ // if class already exists, append the new one
+ if ( attr['class'] ) {
+ attr['class'] = attr['class'] + meta[ i ].replace( /./, " " );
+ }
+ else {
+ attr['class'] = meta[ i ].substring( 1 );
+ }
+ }
+ // attribute: foo=bar
+ else if ( /=/.test( meta[ i ] ) ) {
+ var s = meta[ i ].split( /=/ );
+ attr[ s[ 0 ] ] = s[ 1 ];
+ }
+ }
+
+ return attr;
+}
+
+function split_meta_hash( meta_string ) {
+ var meta = meta_string.split( "" ),
+ parts = [ "" ],
+ in_quotes = false;
+
+ while ( meta.length ) {
+ var letter = meta.shift();
+ switch ( letter ) {
+ case " " :
+ // if we're in a quoted section, keep it
+ if ( in_quotes ) {
+ parts[ parts.length - 1 ] += letter;
+ }
+ // otherwise make a new part
+ else {
+ parts.push( "" );
+ }
+ break;
+ case "'" :
+ case '"' :
+ // reverse the quotes and move straight on
+ in_quotes = !in_quotes;
+ break;
+ case "\\" :
+ // shift off the next letter to be used straight away.
+ // it was escaped so we'll keep it whatever it is
+ letter = meta.shift();
+ default :
+ parts[ parts.length - 1 ] += letter;
+ break;
+ }
+ }
+
+ return parts;
+}
+
+/**
+ * renderJsonML( jsonml[, options] ) -> String
+ * - jsonml (Array): JsonML array to render to XML
+ * - options (Object): options
+ *
+ * Converts the given JsonML into well-formed XML.
+ *
+ * The options currently understood are:
+ *
+ * - root (Boolean): wether or not the root node should be included in the
+ * output, or just its children. The default `false` is to not include the
+ * root itself.
+ */
+expose.renderJsonML = function( jsonml, options ) {
+ options = options || {};
+ // include the root element in the rendered output?
+ options.root = options.root || false;
+
+ var content = [];
+
+ if ( options.root ) {
+ content.push( render_tree( jsonml ) );
+ }
+ else {
+ jsonml.shift(); // get rid of the tag
+ if ( jsonml.length && typeof jsonml[ 0 ] === "object" && !( jsonml[ 0 ] instanceof Array ) ) {
+ jsonml.shift(); // get rid of the attributes
+ }
+
+ while ( jsonml.length ) {
+ content.push( render_tree( jsonml.shift() ) );
+ }
+ }
+
+ return content.join( "\n" ).replace( /\n+$/, "" );
+}
+
+function render_tree( jsonml ) {
+ // basic case
+ if ( typeof jsonml === "string" ) {
+ return jsonml.replace( /&/g, "&" )
+ .replace( //g, ">" );
+ }
+
+ var tag = jsonml.shift(),
+ attributes = {},
+ content = [];
+
+ if ( jsonml.length && typeof jsonml[ 0 ] === "object" && !( jsonml[ 0 ] instanceof Array ) ) {
+ attributes = jsonml.shift();
+ }
+
+ while ( jsonml.length ) {
+ content.push( arguments.callee( jsonml.shift() ) );
+ }
+
+ var tag_attrs = "";
+ for ( var a in attributes ) {
+ tag_attrs += " " + a + '="' + attributes[ a ] + '"';
+ }
+
+ var newlinetab = "\n ",
+ newline = "\n";
+
+ if ( ~["em", "strong", "img", "br", "a"].indexOf( tag ) ) {
+ newlinetab = "";
+ newline = "";
+ }
+
+ // be careful about adding whitespace here for inline elements
+ return "<"+ tag + tag_attrs + ">" + newlinetab + content.join( "" ).replace( /\n$/, "" ).replace( /\n/g, "\n " ) + newline + "" + tag + ">" + newline;
+}
+
+function convert_tree_to_html( tree, references ) {
+ // shallow clone
+ var jsonml = tree.slice( 0 );
+
+ // Clone attributes if the exist
+ var attrs = extract_attr( jsonml );
+ if ( attrs ) {
+ jsonml[ 1 ] = {};
+ for ( var i in attrs ) {
+ jsonml[ 1 ][ i ] = attrs[ i ];
+ }
+ attrs = jsonml[ 1 ];
+ }
+
+ // basic case
+ if ( typeof jsonml === "string" ) {
+ return jsonml;
+ }
+
+ // convert this node
+ switch ( jsonml[ 0 ] ) {
+ case "header":
+ jsonml[ 0 ] = "h" + jsonml[ 1 ].level;
+ delete jsonml[ 1 ].level;
+ break;
+ case "bulletlist":
+ jsonml[ 0 ] = "ul";
+ break;
+ case "numberlist":
+ jsonml[ 0 ] = "ol";
+ break;
+ case "listitem":
+ jsonml[ 0 ] = "li";
+ break;
+ case "para":
+ jsonml[ 0 ] = "p";
+ break;
+ case "markdown":
+ jsonml[ 0 ] = "html";
+ if ( attrs ) delete attrs.references;
+ break;
+ case "code_block":
+ jsonml[ 0 ] = "pre";
+ var i = attrs ? 2 : 1;
+ var code = [ "code" ];
+ code.push.apply( code, jsonml.splice( i ) );
+ jsonml[ i ] = code;
+ break;
+ case "inlinecode":
+ jsonml[ 0 ] = "code";
+ break;
+ case "img":
+ jsonml[ 1 ].src = jsonml[ 1 ].href;
+ delete jsonml[ 1 ].href;
+ break;
+ case "linebreak":
+ jsonml[0] = "br";
+ break;
+ case "link":
+ jsonml[ 0 ] = "a";
+ break;
+ case "link_ref":
+ jsonml[ 0 ] = "a";
+
+ // grab this ref and clean up the attribute node
+ var ref = references[ attrs.ref ];
+
+ // if the reference exists, make the link
+ if ( ref ) {
+ delete attrs.ref;
+
+ // add in the href and title, if present
+ attrs.href = ref.href;
+ if ( ref.title ) {
+ attrs.title = ref.title;
+ }
+
+ // get rid of the unneeded original text
+ delete attrs.original;
+ }
+ // the reference doesn't exist, so revert to plain text
+ else {
+ return attrs.original;
+ }
+ break;
+ }
+
+ // convert all the children
+ var i = 1;
+
+ // deal with the attribute node, if it exists
+ if ( attrs ) {
+ // if there are keys, skip over it
+ for ( var key in jsonml[ 1 ] ) {
+ i = 2;
+ }
+ // if there aren't, remove it
+ if ( i === 1 ) {
+ jsonml.splice( i, 1 );
+ }
+ }
+
+ for ( ; i < jsonml.length; ++i ) {
+ jsonml[ i ] = arguments.callee( jsonml[ i ], references );
+ }
+
+ return jsonml;
+}
+
+
+// merges adjacent text nodes into a single node
+function merge_text_nodes( jsonml ) {
+ // skip the tag name and attribute hash
+ var i = extract_attr( jsonml ) ? 2 : 1;
+
+ while ( i < jsonml.length ) {
+ // if it's a string check the next item too
+ if ( typeof jsonml[ i ] === "string" ) {
+ if ( i + 1 < jsonml.length && typeof jsonml[ i + 1 ] === "string" ) {
+ // merge the second string into the first and remove it
+ jsonml[ i ] += jsonml.splice( i + 1, 1 )[ 0 ];
+ }
+ else {
+ ++i;
+ }
+ }
+ // if it's not a string recurse
+ else {
+ arguments.callee( jsonml[ i ] );
+ ++i;
+ }
+ }
+}
+
+} )( (function() {
+ if ( typeof exports === "undefined" ) {
+ window.markdown = {};
+ return window.markdown;
+ }
+ else {
+ return exports;
+ }
+} )() );
diff --git a/tools/ronnjs/lib/ext/opts.js b/tools/ronnjs/lib/ext/opts.js
new file mode 100755
index 0000000000..92263b0bc9
--- /dev/null
+++ b/tools/ronnjs/lib/ext/opts.js
@@ -0,0 +1,268 @@
+/***************************************************************************
+Author : Joey Mazzarelli
+Email : mazzarelli@gmail.com
+Homepage : http://joey.mazzarelli.com/js-opts
+Source : http://bitbucket.org/mazzarell/js-opts/
+License : Simplified BSD License
+Version : 1.0
+
+Copyright 2010 Joey Mazzarelli. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY JOEY MAZZARELLI 'AS IS' AND ANY EXPRESS OR IMPLIED
+WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
+EVENT SHALL JOEY MAZZARELLI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
+OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
+ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+The views and conclusions contained in the software and documentation are those
+of the authors and should not be interpreted as representing official policies,
+either expressed or implied, of Joey Mazzarelli.
+***************************************************************************/
+
+var puts = require('sys').puts
+ , values = {}
+ , args = {}
+ , argv = []
+ , errors = []
+ , descriptors = {opts:[], args:[]};
+
+/**
+ * Add a set of option descriptors, not yet ready to be parsed.
+ * See exports.parse for description of options object
+ *
+ * Additionally, it takes a namespace as an argument, useful for
+ * building options for a library in addition to the main app.
+ */
+exports.add = function (options, namespace) {
+ for (var i=0; i or by including the option;
+ * {
+ * long : 'help',
+ * description : 'Show this help message',
+ * callback : require('./opts').help,
+ * }
+ *
+ * ===== Arguments Docs =====
+ * Arguments are different than options, and simpler. They typically come
+ * after the options, but the library really doesn't care. Each argument
+ * can have the form of:
+ * {
+ * name : 'script',
+ * required : true, // default false
+ * callback : function (value) { ... },
+ * }
+ */
+exports.parse = function (options, params, help) {
+
+ if (params === true) {
+ help = true;
+ } else if (!params) {
+ params = [];
+ } else {
+ for (var i=0; i';
+ if (opt.required) line += ' (required)';
+ str += ' ' + line + '\n';
+ }
+ return str;
+};
diff --git a/tools/ronnjs/lib/ronn.js b/tools/ronnjs/lib/ronn.js
new file mode 100644
index 0000000000..884d91cd00
--- /dev/null
+++ b/tools/ronnjs/lib/ronn.js
@@ -0,0 +1,321 @@
+/* ronn.js version 0.1
+ * Copyright : 2010 Jérémy Lal
+ * License : MIT
+ */
+
+var md = require(__dirname + '/ext/markdown');
+var sys = require('sys');
+
+/* exports Ronn class
+ * usage :
+ * var ronn = new Ronn(rofftext, "1.0", "my manual name", "2010-12-25");
+ * ronn.roff();
+ * ronn.html();
+ * ronn.fragment();
+ */
+
+exports.Ronn = function(text, version, manual, date) {
+ if (!manual) manual = "";
+ if (!version) version = "";
+ if (!date) date = new Date();
+ else date = new Date(date + " GMT");
+
+ var gHtml = md.toHTMLTree(text);
+
+ this.roff = function() {
+ return blockFilter("", gHtml, {parent:null, previous:null, position:null});
+ };
+
+ this.html = function() {
+ return toHTML(gHtml);
+ };
+
+ this.fragment = function() {
+ return toHTMLfragment(gHtml);
+ };
+
+ function blockFilter(out, node, context) {
+ if (typeof node == "string") {
+ if (!node.match(/^\s*$/m)) sys.debug("unexpected text: " + node);
+ return out;
+ }
+ var tag = node.shift();
+ var attributes = null;
+ if (node.length && typeof node[0] === "object" && !(node[0] instanceof Array)) {
+ attributes = node.shift();
+ }
+ var fParent = context.parent;
+ var fPrevious = context.previous;
+ context.previous = null;
+ context.parent = tag;
+ switch (tag) {
+ case "html":
+ out = comment(out, "Generated with Ronnjs/v0.1");
+ out = comment(out, "http://github.com/kapouer/ronnjs/");
+ while (node.length) out = blockFilter(out, node.shift(), context);
+ break;
+ case "h1":
+ var fTagline = node.shift();
+ var fMatch = /([\w_.\[\]~+=@:-]+)\s*\((\d\w*)\)\s*-+\s*(.*)/.exec(fTagline);
+ var fName, fSection;
+ if (fMatch != null) {
+ fName = fMatch[1];
+ fSection = fMatch[2];
+ fTagline = fMatch[3];
+ } else {
+ fMatch = /([\w_.\[\]~+=@:-]+)\s+-+\s+(.*)/.exec(fTagline);
+ if (fMatch != null) {
+ fName = fMatch[1];
+ fTagline = fMatch[2];
+ }
+ }
+ if (fMatch == null) {
+ fName = "";
+ fSection = "";
+ fName = "";
+ }
+ out = macro(out, "TH", [
+ quote(esc(fName.toUpperCase()))
+ , quote(fSection)
+ , quote(manDate(date))
+ , quote(version)
+ , quote(manual)
+ ]);
+ out = macro(out, "SH", quote("NAME"));
+ out += "\\fB" + fName + "\\fR";
+ if (fTagline.length > 0) out += " \\-\\- " + esc(fTagline);
+ break;
+ case "h2":
+ out = macro(out, "SH", quote(esc(toHTML(node.shift()))));
+ break;
+ case "h3":
+ out = macro(out, "SS", quote(esc(toHTML(node.shift()))));
+ break;
+ case "hr":
+ out = macro(out, "HR");
+ break;
+ case "p":
+ if (fPrevious && fParent && (fParent == "dd" || fParent == "li"))
+ out = macro(out, "IP");
+ else if (fPrevious && !(fPrevious == "h1" || fPrevious == "h2" || fPrevious == "h3"))
+ out = macro(out, "P");
+ out = callInlineChildren(out, node, context);
+ break;
+ case "pre":
+ var indent = (fPrevious == null || !(fPrevious == "h1" || fPrevious == "h2" || fPrevious == "h3"));
+ if (indent) out = macro(out, "IP", [quote(""), 4]);
+ out = macro(out, "nf");
+ out = callInlineChildren(out, node, context);
+ out = macro(out, "fi");
+ if (indent) out = macro(out, "IP", [quote(""), 0]);
+ break;
+ case "dl":
+ out = macro(out, "TP");
+ while (node.length) out = blockFilter(out, node.shift(), context);
+ break;
+ case "dt":
+ if (fPrevious != null) out = macro(out, "TP");
+ out = callInlineChildren(out, node, context);
+ out += "\n";
+ break;
+ case "dd":
+ if (containsTag(node, {'p':true})) {
+ while (node.length) out = blockFilter(out, node.shift(), context);
+ } else {
+ out = callInlineChildren(out, node, context);
+ }
+ out += "\n";
+ break;
+ case "ol":
+ case "ul":
+ context.position = 0;
+ while (node.length) {
+ out = blockFilter(out, node.shift(), context);
+ }
+ context.position = null;
+ out = macro(out, "IP", [quote(""), 0]);
+ break;
+ case "li":
+ if (fParent == "ol") {
+ context.position += 1;
+ out = macro(out, "IP", [quote(context.position), 4]);
+ } else if (fParent == "ul") {
+ out = macro(out, "IP", [quote("\\(bu"), 4]);
+ }
+ if (containsTag(node, {"p":true, "ol":true, "ul":true, "dl":true, "div":true})) {
+ while (node.length) out = blockFilter(out, node.shift(), context);
+ } else {
+ out = callInlineChildren(out, node, context);
+ }
+ out += "\n";
+ break;
+ default:
+ sys.debug("unrecognized block tag: " + tag);
+ break;
+ }
+ context.parent = fParent;
+ context.previous = tag;
+ return out;
+ }
+
+ function callInlineChildren(out, node, context) {
+ while (node.length) {
+ var lChild = node.shift();
+ if (node.length > 0) context.hasNext = true;
+ else context.hasNext = false;
+ out = inlineFilter(out, lChild, context);
+ }
+ return out;
+ }
+
+ function inlineFilter(out, node, context) {
+ if (typeof node == "string") {
+ if (context.previous && context.previous == "br") node = node.replace(/^\n+/gm, '');
+ if (context.parent == "pre") {
+ // do nothing
+ } else if (context.previous == null && !context.hasNext) {
+ node = node.replace(/\n+$/gm, '');
+ } else {
+ node = node.replace(/\n+$/gm, ' ');
+ }
+ out += esc(node);
+ return out;
+ }
+ var tag = node.shift();
+ var attributes = null;
+ if (node.length && typeof node[0] === "object" && !(node[0] instanceof Array)) {
+ attributes = node.shift();
+ }
+ var fParent = context.parent;
+ var fPrevious = context.previous;
+ context.parent = tag;
+ context.previous = null;
+ switch(tag) {
+ case "code":
+ if (fParent == "pre") {
+ out = callInlineChildren(out, node, context);
+ } else {
+ out += '\\fB';
+ out = callInlineChildren(out, node, context);
+ out += '\\fR';
+ }
+ break;
+ case "b":
+ case "strong":
+ case "kbd":
+ case "samp":
+ out += '\\fB';
+ out = callInlineChildren(out, node, context);
+ out += '\\fR';
+ break;
+ case "var":
+ case "em":
+ case "i":
+ case "u":
+ out += '\\fI';
+ out = callInlineChildren(out, node, context);
+ out += '\\fR';
+ break;
+ case "br":
+ out = macro(out, "br");
+ break;
+ case "a":
+ var fStr = node[0];
+ var fHref = attributes['href'];
+ if (fHref == fStr || decodeURI(fHref) == "mailto:" + decodeURI(fStr)) {
+ out += '\\fI';
+ out = callInlineChildren(out, node, context);
+ out += '\\fR';
+ } else {
+ out = callInlineChildren(out, node, context);
+ out += " ";
+ out += '\\fI';
+ out += esc(fHref);
+ out += '\\fR';
+ }
+ break;
+ default:
+ sys.debug("unrecognized inline tag: " + tag);
+ break;
+ }
+ context.parent = fParent;
+ context.previous = tag;
+ return out;
+ }
+
+ function containsTag(node, tags) {
+ // browse ml tree searching for tags (hash {tag : true, ...})
+ if (typeof node == "string") return false;
+ var jml = node.slice(0);
+ if (jml.length == 0) return false;
+ else while (jml.length && jml[0] instanceof Array) {
+ if (containsTag(jml.shift(), tags)) return true;
+ }
+ var tag = jml.shift();
+ if (tags[tag] === true) return true;
+ if (jml.length && typeof jml[0] === "object" && !(jml[0] instanceof Array)) {
+ // skip attributes
+ jml.shift();
+ }
+ // children
+ if (jml.length) {
+ if (containsTag(jml.shift(), tags)) return true;
+ }
+ // siblings
+ if (jml.length) return containsTag(jml, tags);
+ }
+
+ function toHTML(node) {
+ // problème ici : les & sont remplacés par des &
+ return md.renderJsonML(node, {root:true});
+ }
+
+ function toHTMLfragment(node) {
+ return md.renderJsonML(node);
+ }
+
+ function comment(out, str) {
+ return writeln(out, '.\\" ' + str);
+ }
+
+ function quote(str) {
+ return '"' + str + '"';
+ }
+
+ function esc(str) {
+ return str
+ .replace(/\\/gm, "\\\\")
+ .replace(/-/gm, "\\-")
+ .replace(/^\./gm, "\\|.")
+ .replace(/\./gm, "\\.")
+ .replace(/'/gm, "\\'")
+ ;
+ }
+
+ function writeln(out, str) {
+ if (out.length && out[out.length - 1] != "\n") out += "\n";
+ out += str + "\n";
+ return out;
+ }
+
+ function macro(out, name, list) {
+ var fText = ".\n." + name;
+ if (list != null) {
+ if (typeof list == "string") {
+ fText += ' ' + list;
+ } else {
+ for (var i=0, len=list.length; i < len; i++) {
+ var item = list[i];
+ if (item == null) continue;
+ fText += ' ' + item;
+ }
+ }
+ }
+ return writeln(out, fText);
+ }
+
+ function manDate(pDate) {
+ var fMonth = ["January", "February", "March", "April", "May", "June", "July", "August", "September", "October", "November", "December"][pDate.getMonth()];
+ return fMonth + " " + pDate.getFullYear();
+ }
+};