mirror of https://github.com/lukechilds/node.git
isaacs
13 years ago
15 changed files with 1892 additions and 0 deletions
@ -0,0 +1,96 @@ |
|||
Here's how the node docs work. |
|||
|
|||
1:1 relationship from `lib/<module>.js` to `doc/api/<module>.markdown` |
|||
|
|||
Each type of heading has a description block. |
|||
|
|||
|
|||
## module |
|||
|
|||
Stability: 3 - Stable |
|||
|
|||
description and examples. |
|||
|
|||
### module.property |
|||
|
|||
* Type |
|||
|
|||
description of the property. |
|||
|
|||
### module.someFunction(x, y, [z=100]) |
|||
|
|||
* `x` {String} the description of the string |
|||
* `y` {Boolean} Should I stay or should I go? |
|||
* `z` {Number} How many zebras to bring. |
|||
|
|||
A description of the function. |
|||
|
|||
### Event: 'blerg' |
|||
|
|||
* Argument: SomeClass object. |
|||
|
|||
Modules don't usually raise events on themselves. `cluster` is the |
|||
only exception. |
|||
|
|||
## Class: SomeClass |
|||
|
|||
description of the class. |
|||
|
|||
### Class Method: SomeClass.classMethod(anArg) |
|||
|
|||
* `anArg` {Object} Just an argument |
|||
* `field` {String} anArg can have this field. |
|||
* `field2` {Boolean} Another field. Default: `false`. |
|||
* Return: {Boolean} `true` if it worked. |
|||
|
|||
Description of the method for humans. |
|||
|
|||
### someClass.nextSibling() |
|||
|
|||
* Return: {SomeClass object | null} The next someClass in line. |
|||
|
|||
### someClass.someProperty |
|||
|
|||
* String |
|||
|
|||
The indication of what someProperty is. |
|||
|
|||
### Event: 'grelb' |
|||
|
|||
* `isBlerg` {Boolean} |
|||
|
|||
This event is emitted on instances of SomeClass, not on the module itself. |
|||
|
|||
|
|||
* Modules have (description, Properties, Functions, Classes, Examples) |
|||
* Properties have (type, description) |
|||
* Functions have (list of arguments, description) |
|||
* Classes have (description, Properties, Methods, Events) |
|||
* Events have (list of arguments, description) |
|||
* Methods have (list of arguments, description) |
|||
* Properties have (type, description) |
|||
|
|||
## Stability ratings: 0-5 |
|||
|
|||
These can show up below any section heading, and apply to that section. |
|||
|
|||
0 - Deprecated. This feature is known to be problematic, and changes are |
|||
planned. Do not rely on it. Use of the feature may cause warnings. Backwards |
|||
compatibility should not be expected. |
|||
|
|||
1 - Experimental. This feature was introduced recently, and may change |
|||
or be removed in future versions. Please try it out and provide feedback. |
|||
If it addresses a use-case that is important to you, tell the node core team. |
|||
|
|||
2 - Unstable. The API is in the process of settling, but has not yet had |
|||
sufficient real-world testing to be considered stable. Backwards-compatibility |
|||
will be maintained if reasonable. |
|||
|
|||
3 - Stable. The API has proven satisfactory, but cleanup in the underlying |
|||
code may cause minor changes. Backwards-compatibility is guaranteed. |
|||
|
|||
4 - API Frozen. This API has been tested extensively in production and is |
|||
unlikely to ever have to change. |
|||
|
|||
5 - Locked. Unless serious bugs are found, this code will not ever |
|||
change. Please do not suggest changes in this area, they will be refused. |
@ -0,0 +1,99 @@ |
|||
#!node |
|||
|
|||
var marked = require('marked'); |
|||
var fs = require('fs'); |
|||
var path = require('path'); |
|||
|
|||
// parse the args.
|
|||
// Don't use nopt or whatever for this. It's simple enough.
|
|||
|
|||
var args = process.argv.slice(2); |
|||
var format = 'json'; |
|||
var template = null; |
|||
var inputFile = null; |
|||
|
|||
args.forEach(function (arg) { |
|||
if (!arg.match(/^\-\-/)) { |
|||
inputFile = arg; |
|||
} else if (arg.match(/^\-\-format=/)) { |
|||
format = arg.replace(/^\-\-format=/, ''); |
|||
} else if (arg.match(/^\-\-template=/)) { |
|||
template = arg.replace(/^\-\-template=/, ''); |
|||
} |
|||
}) |
|||
|
|||
|
|||
if (!inputFile) { |
|||
throw new Error('No input file specified'); |
|||
} |
|||
|
|||
|
|||
console.error('Input file = %s', inputFile); |
|||
fs.readFile(inputFile, 'utf8', function(er, input) { |
|||
if (er) throw er; |
|||
// process the input for @include lines
|
|||
processIncludes(input, next); |
|||
}); |
|||
|
|||
|
|||
var includeExpr = /^@include\s+([A-Za-z0-9-_]+)(?:\.)?([a-zA-Z]*)$/gmi; |
|||
var includeData = {}; |
|||
function processIncludes(input, cb) { |
|||
var includes = input.match(includeExpr); |
|||
if (includes === null) return cb(null, input); |
|||
var errState = null; |
|||
console.error(includes); |
|||
var incCount = includes.length; |
|||
if (incCount === 0) cb(null, input); |
|||
includes.forEach(function(include) { |
|||
var fname = include.replace(/^@include\s+/, ''); |
|||
if (!fname.match(/\.markdown$/)) fname += '.markdown'; |
|||
|
|||
if (includeData.hasOwnProperty(fname)) { |
|||
input = input.split(include).join(includeData[fname]); |
|||
incCount--; |
|||
if (incCount === 0) { |
|||
return cb(null, input); |
|||
} |
|||
} |
|||
|
|||
var fullFname = path.resolve(path.dirname(inputFile), fname); |
|||
fs.readFile(fullFname, 'utf8', function(er, inc) { |
|||
if (errState) return; |
|||
if (er) return cb(errState = er); |
|||
processIncludes(inc, function(er, inc) { |
|||
if (errState) return; |
|||
if (er) return cb(errState = er); |
|||
incCount--; |
|||
includeData[fname] = inc; |
|||
input = input.split(include).join(includeData[fname]); |
|||
if (incCount === 0) { |
|||
return cb(null, input); |
|||
} |
|||
}); |
|||
}); |
|||
}); |
|||
} |
|||
|
|||
|
|||
function next(er, input) { |
|||
if (er) throw er; |
|||
switch (format) { |
|||
case 'json': |
|||
require('./json.js')(input, inputFile, function(er, obj) { |
|||
console.log(JSON.stringify(obj, null, 2)); |
|||
if (er) throw er; |
|||
}); |
|||
break; |
|||
|
|||
case 'html': |
|||
require('./html.js')(input, inputFile, template, function(er, html) { |
|||
if (er) throw er; |
|||
console.log(html); |
|||
}); |
|||
break; |
|||
|
|||
default: |
|||
throw new Error('Invalid format: ' + format); |
|||
} |
|||
} |
@ -0,0 +1,154 @@ |
|||
var fs = require('fs'); |
|||
var marked = require('marked'); |
|||
var path = require('path'); |
|||
|
|||
module.exports = toHTML; |
|||
|
|||
function toHTML(input, filename, template, cb) { |
|||
var lexed = marked.lexer(input); |
|||
fs.readFile(template, 'utf8', function(er, template) { |
|||
if (er) return cb(er); |
|||
render(lexed, filename, template, cb); |
|||
}); |
|||
} |
|||
|
|||
function render(lexed, filename, template, cb) { |
|||
// get the section
|
|||
var section = getSection(lexed); |
|||
|
|||
filename = path.basename(filename, '.markdown'); |
|||
|
|||
lexed = parseLists(lexed); |
|||
|
|||
// generate the table of contents.
|
|||
// this mutates the lexed contents in-place.
|
|||
buildToc(lexed, filename, function(er, toc) { |
|||
if (er) return cb(er); |
|||
|
|||
template = template.replace(/__FILENAME__/g, filename); |
|||
template = template.replace(/__SECTION__/g, section); |
|||
template = template.replace(/__VERSION__/g, process.version); |
|||
template = template.replace(/__TOC__/g, toc); |
|||
|
|||
// content has to be the last thing we do with
|
|||
// the lexed tokens, because it's destructive.
|
|||
content = marked.parser(lexed); |
|||
template = template.replace(/__CONTENT__/g, content); |
|||
|
|||
cb(null, template); |
|||
}); |
|||
} |
|||
|
|||
|
|||
// just update the list item text in-place.
|
|||
// lists that come right after a heading are what we're after.
|
|||
function parseLists(input) { |
|||
var state = null; |
|||
var depth = 0; |
|||
var output = []; |
|||
output.links = input.links; |
|||
input.forEach(function(tok) { |
|||
if (state === null) { |
|||
if (tok.type === 'heading') { |
|||
state = 'AFTERHEADING'; |
|||
} |
|||
output.push(tok); |
|||
return; |
|||
} |
|||
if (state === 'AFTERHEADING') { |
|||
if (tok.type === 'code') return; |
|||
if (tok.type === 'list_start') { |
|||
state = 'LIST'; |
|||
if (depth === 0) { |
|||
output.push({ type:'html', text: '<div class="signature">' }); |
|||
} |
|||
depth++; |
|||
output.push(tok); |
|||
return; |
|||
} |
|||
state = null; |
|||
output.push(tok); |
|||
return; |
|||
} |
|||
if (state === 'LIST') { |
|||
if (tok.type === 'list_start') { |
|||
depth++; |
|||
output.push(tok); |
|||
return; |
|||
} |
|||
if (tok.type === 'list_end') { |
|||
depth--; |
|||
if (depth === 0) { |
|||
state = null; |
|||
output.push({ type:'html', text: '</div>' }); |
|||
} |
|||
output.push(tok); |
|||
return; |
|||
} |
|||
if (tok.text) { |
|||
tok.text = parseListItem(tok.text); |
|||
} |
|||
} |
|||
output.push(tok); |
|||
}); |
|||
|
|||
return output; |
|||
} |
|||
|
|||
|
|||
function parseListItem(text) { |
|||
text = text.replace(/\{([^\}]+)\}/, '<span class="type">$1</span>'); |
|||
//XXX maybe put more stuff here?
|
|||
return text; |
|||
} |
|||
|
|||
|
|||
// section is just the first heading
|
|||
function getSection(lexed) { |
|||
var section = ''; |
|||
for (var i = 0, l = lexed.length; i < l; i++) { |
|||
var tok = lexed[i]; |
|||
if (tok.type === 'heading') return tok.text; |
|||
} |
|||
return ''; |
|||
} |
|||
|
|||
|
|||
function buildToc(lexed, filename, cb) { |
|||
var indent = 0; |
|||
var toc = []; |
|||
var depth = 0; |
|||
lexed.forEach(function(tok) { |
|||
if (tok.type !== 'heading') return; |
|||
if (tok.depth - depth > 1) { |
|||
return cb(new Error('Inappropriate heading level\n' + |
|||
JSON.stringify(tok))); |
|||
} |
|||
|
|||
depth = tok.depth; |
|||
var id = getId(filename + '_' + tok.text.trim()); |
|||
toc.push(new Array((depth - 1) * 2 + 1).join(' ') + |
|||
'* <a href="#' + id + '">' + |
|||
tok.text + '</a>'); |
|||
tok.text += '<span><a class="mark" href="#' + id + '" ' + |
|||
'id="' + id + '">#</a></span>'; |
|||
}); |
|||
|
|||
toc = marked.parse(toc.join('\n')); |
|||
cb(null, toc); |
|||
} |
|||
|
|||
var idCounters = {}; |
|||
function getId(text) { |
|||
text = text.toLowerCase(); |
|||
text = text.replace(/[^a-z0-9]+/g, '_'); |
|||
text = text.replace(/^_+|_+$/, ''); |
|||
text = text.replace(/^([^a-z])/, '_$1'); |
|||
if (idCounters.hasOwnProperty(text)) { |
|||
text += '_' + (++idCounters[text]); |
|||
} else { |
|||
idCounters[text] = 0; |
|||
} |
|||
return text; |
|||
} |
|||
|
@ -0,0 +1,530 @@ |
|||
module.exports = doJSON; |
|||
|
|||
// Take the lexed input, and return a JSON-encoded object
|
|||
// A module looks like this: https://gist.github.com/1777387
|
|||
|
|||
var marked = require('marked'); |
|||
|
|||
function doJSON(input, filename, cb) { |
|||
var root = {source: filename}; |
|||
var stack = [root]; |
|||
var depth = 0; |
|||
var current = root; |
|||
var state = null; |
|||
var lexed = marked.lexer(input); |
|||
lexed.forEach(function (tok) { |
|||
var type = tok.type; |
|||
var text = tok.text; |
|||
|
|||
// <!-- type = module -->
|
|||
// This is for cases where the markdown semantic structure is lacking.
|
|||
if (type === 'paragraph' || type === 'html') { |
|||
var metaExpr = /<!--([^=]+)=([^\-]+)-->\n*/g; |
|||
text = text.replace(metaExpr, function(_0, k, v) { |
|||
current[k.trim()] = v.trim(); |
|||
return ''; |
|||
}); |
|||
text = text.trim(); |
|||
if (!text) return; |
|||
} |
|||
|
|||
if (type === 'heading' && |
|||
!text.trim().match(/^example/i)) { |
|||
if (tok.depth - depth > 1) { |
|||
return cb(new Error('Inappropriate heading level\n'+ |
|||
JSON.stringify(tok))); |
|||
} |
|||
|
|||
// Sometimes we have two headings with a single
|
|||
// blob of description. Treat as a clone.
|
|||
if (current && |
|||
state === 'AFTERHEADING' && |
|||
depth === tok.depth) { |
|||
var clone = current; |
|||
current = newSection(tok); |
|||
current.clone = clone; |
|||
// don't keep it around on the stack.
|
|||
stack.pop(); |
|||
} else { |
|||
// if the level is greater than the current depth,
|
|||
// then it's a child, so we should just leave the stack
|
|||
// as it is.
|
|||
// However, if it's a sibling or higher, then it implies
|
|||
// the closure of the other sections that came before.
|
|||
// root is always considered the level=0 section,
|
|||
// and the lowest heading is 1, so this should always
|
|||
// result in having a valid parent node.
|
|||
var d = tok.depth; |
|||
while (d <= depth) { |
|||
finishSection(stack.pop(), stack[stack.length - 1]); |
|||
d++; |
|||
} |
|||
current = newSection(tok); |
|||
} |
|||
|
|||
depth = tok.depth; |
|||
stack.push(current); |
|||
state = 'AFTERHEADING'; |
|||
return; |
|||
} // heading
|
|||
|
|||
// Immediately after a heading, we can expect the following
|
|||
//
|
|||
// { type: 'code', text: 'Stability: ...' },
|
|||
//
|
|||
// a list: starting with list_start, ending with list_end,
|
|||
// maybe containing other nested lists in each item.
|
|||
//
|
|||
// If one of these isnt' found, then anything that comes between
|
|||
// here and the next heading should be parsed as the desc.
|
|||
var stability |
|||
if (state === 'AFTERHEADING') { |
|||
if (type === 'code' && |
|||
(stability = text.match(/^Stability: ([0-5])(?:\s*-\s*)?(.*)$/))) { |
|||
current.stability = parseInt(stability[1], 10); |
|||
current.stabilityText = stability[2].trim(); |
|||
return; |
|||
} else if (type === 'list_start' && !tok.ordered) { |
|||
state = 'AFTERHEADING_LIST'; |
|||
current.list = current.list || []; |
|||
current.list.push(tok); |
|||
current.list.level = 1; |
|||
} else { |
|||
current.desc = current.desc || []; |
|||
if (!Array.isArray(current.desc)) { |
|||
current.shortDesc = current.desc; |
|||
current.desc = []; |
|||
} |
|||
current.desc.push(tok); |
|||
state = 'DESC'; |
|||
} |
|||
return; |
|||
} |
|||
|
|||
if (state === 'AFTERHEADING_LIST') { |
|||
current.list.push(tok); |
|||
if (type === 'list_start') { |
|||
current.list.level++; |
|||
} else if (type === 'list_end') { |
|||
current.list.level--; |
|||
} |
|||
if (current.list.level === 0) { |
|||
state = 'AFTERHEADING'; |
|||
processList(current); |
|||
} |
|||
return; |
|||
} |
|||
|
|||
current.desc = current.desc || []; |
|||
current.desc.push(tok); |
|||
|
|||
}); |
|||
|
|||
// finish any sections left open
|
|||
while (root !== (current = stack.pop())) { |
|||
finishSection(current, stack[stack.length - 1]); |
|||
} |
|||
|
|||
return cb(null, root) |
|||
} |
|||
|
|||
|
|||
// go from something like this:
|
|||
// [ { type: 'list_item_start' },
|
|||
// { type: 'text',
|
|||
// text: '`settings` Object, Optional' },
|
|||
// { type: 'list_start', ordered: false },
|
|||
// { type: 'list_item_start' },
|
|||
// { type: 'text',
|
|||
// text: 'exec: String, file path to worker file. Default: `__filename`' },
|
|||
// { type: 'list_item_end' },
|
|||
// { type: 'list_item_start' },
|
|||
// { type: 'text',
|
|||
// text: 'args: Array, string arguments passed to worker.' },
|
|||
// { type: 'text',
|
|||
// text: 'Default: `process.argv.slice(2)`' },
|
|||
// { type: 'list_item_end' },
|
|||
// { type: 'list_item_start' },
|
|||
// { type: 'text',
|
|||
// text: 'silent: Boolean, whether or not to send output to parent\'s stdio.' },
|
|||
// { type: 'text', text: 'Default: `false`' },
|
|||
// { type: 'space' },
|
|||
// { type: 'list_item_end' },
|
|||
// { type: 'list_end' },
|
|||
// { type: 'list_item_end' },
|
|||
// { type: 'list_end' } ]
|
|||
// to something like:
|
|||
// [ { name: 'settings',
|
|||
// type: 'object',
|
|||
// optional: true,
|
|||
// settings:
|
|||
// [ { name: 'exec',
|
|||
// type: 'string',
|
|||
// desc: 'file path to worker file',
|
|||
// default: '__filename' },
|
|||
// { name: 'args',
|
|||
// type: 'array',
|
|||
// default: 'process.argv.slice(2)',
|
|||
// desc: 'string arguments passed to worker.' },
|
|||
// { name: 'silent',
|
|||
// type: 'boolean',
|
|||
// desc: 'whether or not to send output to parent\'s stdio.',
|
|||
// default: 'false' } ] } ]
|
|||
|
|||
function processList(section) { |
|||
var list = section.list; |
|||
var values = []; |
|||
var current; |
|||
var stack = []; |
|||
|
|||
// for now, *just* build the heirarchical list
|
|||
list.forEach(function(tok) { |
|||
var type = tok.type; |
|||
if (type === 'space') return; |
|||
if (type === 'list_item_start') { |
|||
if (!current) { |
|||
var n = {}; |
|||
values.push(n); |
|||
current = n; |
|||
} else { |
|||
current.options = current.options || []; |
|||
stack.push(current); |
|||
var n = {}; |
|||
current.options.push(n); |
|||
current = n; |
|||
} |
|||
return; |
|||
} else if (type === 'list_item_end') { |
|||
if (!current) { |
|||
throw new Error('invalid list - end without current item\n' + |
|||
JSON.stringify(tok) + '\n' + |
|||
JSON.stringify(list)); |
|||
} |
|||
current = stack.pop(); |
|||
} else if (type === 'text') { |
|||
if (!current) { |
|||
throw new Error('invalid list - text without current item\n' + |
|||
JSON.stringify(tok) + '\n' + |
|||
JSON.stringify(list)); |
|||
} |
|||
current.textRaw = current.textRaw || ''; |
|||
current.textRaw += tok.text + ' '; |
|||
} |
|||
}); |
|||
|
|||
// shove the name in there for properties, since they are always
|
|||
// just going to be the value etc.
|
|||
if (section.type === 'property' && values[0]) { |
|||
values[0].textRaw = '`' + section.name + '` ' + values[0].textRaw; |
|||
} |
|||
|
|||
// now pull the actual values out of the text bits.
|
|||
values.forEach(parseListItem); |
|||
|
|||
// Now figure out what this list actually means.
|
|||
// depending on the section type, the list could be different things.
|
|||
|
|||
switch (section.type) { |
|||
case 'ctor': |
|||
case 'classMethod': |
|||
case 'method': |
|||
// each item is an argument, unless the name is 'return',
|
|||
// in which case it's the return value.
|
|||
section.signatures = section.signatures || []; |
|||
var sig = {} |
|||
section.signatures.push(sig); |
|||
sig.params = values.filter(function(v) { |
|||
if (v.name === 'return') { |
|||
sig.return = v; |
|||
return false; |
|||
} |
|||
return true; |
|||
}); |
|||
parseSignature(section.textRaw, sig); |
|||
break; |
|||
|
|||
case 'property': |
|||
// there should be only one item, which is the value.
|
|||
// copy the data up to the section.
|
|||
var value = values[0] || {}; |
|||
delete value.name; |
|||
section.typeof = value.type; |
|||
delete value.type; |
|||
Object.keys(value).forEach(function(k) { |
|||
section[k] = value[k]; |
|||
}); |
|||
break; |
|||
|
|||
case 'event': |
|||
// event: each item is an argument.
|
|||
section.params = values; |
|||
break; |
|||
} |
|||
|
|||
// section.listParsed = values;
|
|||
delete section.list; |
|||
} |
|||
|
|||
|
|||
// textRaw = "someobject.someMethod(a, [b=100], [c])"
|
|||
function parseSignature(text, sig) { |
|||
var params = text.match(paramExpr); |
|||
if (!params) return; |
|||
params = params[1]; |
|||
// the ] is irrelevant. [ indicates optionalness.
|
|||
params = params.replace(/\]/g, ''); |
|||
params = params.split(/,/) |
|||
params.forEach(function(p, i, _) { |
|||
p = p.trim(); |
|||
if (!p) return; |
|||
var param = sig.params[i]; |
|||
var optional = false; |
|||
var def; |
|||
// [foo] -> optional
|
|||
if (p.charAt(0) === '[') { |
|||
optional = true; |
|||
p = p.substr(1); |
|||
} |
|||
var eq = p.indexOf('='); |
|||
if (eq !== -1) { |
|||
def = p.substr(eq + 1); |
|||
p = p.substr(0, eq); |
|||
} |
|||
if (!param) { |
|||
param = sig.params[i] = { name: p }; |
|||
} |
|||
// at this point, the name should match.
|
|||
if (p !== param.name) { |
|||
console.error('Warning: invalid param "%s"', p); |
|||
console.error(' > ' + JSON.stringify(param)); |
|||
console.error(' > ' + text); |
|||
} |
|||
if (optional) param.optional = true; |
|||
if (def !== undefined) param.default = def; |
|||
}); |
|||
} |
|||
|
|||
|
|||
function parseListItem(item) { |
|||
if (item.options) item.options.forEach(parseListItem); |
|||
if (!item.textRaw) return; |
|||
|
|||
// the goal here is to find the name, type, default, and optional.
|
|||
// anything left over is 'desc'
|
|||
var text = item.textRaw.trim(); |
|||
// text = text.replace(/^(Argument|Param)s?\s*:?\s*/i, '');
|
|||
|
|||
text = text.replace(/^, /, '').trim(); |
|||
var retExpr = /^returns?\s*:?\s*/i; |
|||
var ret = text.match(retExpr); |
|||
if (ret) { |
|||
item.name = 'return'; |
|||
text = text.replace(retExpr, ''); |
|||
} else { |
|||
var nameExpr = /^['`"]?([^'`": \{]+)['`"]?\s*:?\s*/; |
|||
var name = text.match(nameExpr); |
|||
if (name) { |
|||
item.name = name[1]; |
|||
text = text.replace(nameExpr, ''); |
|||
} |
|||
} |
|||
|
|||
text = text.trim(); |
|||
var defaultExpr = /\(default\s*[:=]?\s*['"`]?([^, '"`]*)['"`]?\)/i; |
|||
var def = text.match(defaultExpr); |
|||
if (def) { |
|||
item.default = def[1]; |
|||
text = text.replace(defaultExpr, ''); |
|||
} |
|||
|
|||
text = text.trim(); |
|||
var typeExpr = /^\{([^\}]+)\}/; |
|||
var type = text.match(typeExpr); |
|||
if (type) { |
|||
item.type = type[1]; |
|||
text = text.replace(typeExpr, ''); |
|||
} |
|||
|
|||
text = text.trim(); |
|||
var optExpr = /^Optional\.|(?:, )?Optional$/; |
|||
var optional = text.match(optExpr); |
|||
if (optional) { |
|||
item.optional = true; |
|||
text = text.replace(optExpr, ''); |
|||
} |
|||
|
|||
text = text.replace(/^\s*-\s*/, ''); |
|||
text = text.trim(); |
|||
if (text) item.desc = text; |
|||
} |
|||
|
|||
|
|||
function finishSection(section, parent) { |
|||
if (!section || !parent) { |
|||
throw new Error('Invalid finishSection call\n'+ |
|||
JSON.stringify(section) + '\n' + |
|||
JSON.stringify(parent)); |
|||
} |
|||
|
|||
if (!section.type) { |
|||
section.type = 'module'; |
|||
section.displayName = section.name; |
|||
section.name = section.name.toLowerCase() |
|||
.trim().replace(/\s+/g, '_'); |
|||
} |
|||
|
|||
if (section.desc && Array.isArray(section.desc)) { |
|||
section.desc.links = section.desc.links || []; |
|||
section.desc = marked.parser(section.desc); |
|||
} |
|||
|
|||
if (!section.list) section.list = []; |
|||
processList(section); |
|||
|
|||
// classes sometimes have various 'ctor' children
|
|||
// which are actually just descriptions of a constructor
|
|||
// class signature.
|
|||
// Merge them into the parent.
|
|||
if (section.type === 'class' && section.ctors) { |
|||
section.signatures = section.signatures || []; |
|||
var sigs = section.signatures; |
|||
section.ctors.forEach(function(ctor) { |
|||
ctor.signatures = ctor.signatures || [{}]; |
|||
ctor.signatures.forEach(function(sig) { |
|||
sig.desc = ctor.desc; |
|||
}); |
|||
sigs.push.apply(sigs, ctor.signatures); |
|||
}); |
|||
delete section.ctors; |
|||
} |
|||
|
|||
// properties are a bit special.
|
|||
// their "type" is the type of object, not "property"
|
|||
if (section.properties) { |
|||
section.properties.forEach(function (p) { |
|||
if (p.typeof) p.type = p.typeof; |
|||
else delete p.type; |
|||
delete p.typeof; |
|||
}); |
|||
} |
|||
|
|||
// handle clones
|
|||
if (section.clone) { |
|||
var clone = section.clone; |
|||
delete section.clone; |
|||
delete clone.clone; |
|||
deepCopy(section, clone); |
|||
finishSection(clone, parent); |
|||
} |
|||
|
|||
var plur; |
|||
if (section.type.slice(-1) === 's') { |
|||
plur = section.type + 'es'; |
|||
} else if (section.type.slice(-1) === 'y') { |
|||
plur = section.type.replace(/y$/, 'ies'); |
|||
} else { |
|||
plur = section.type + 's'; |
|||
} |
|||
|
|||
// if the parent's type is 'misc', then it's just a random
|
|||
// collection of stuff, like the "globals" section.
|
|||
// Make the children top-level items.
|
|||
if (section.type === 'misc') { |
|||
Object.keys(section).forEach(function(k) { |
|||
switch (k) { |
|||
case 'textRaw': |
|||
case 'name': |
|||
case 'type': |
|||
case 'desc': |
|||
case 'miscs': |
|||
return; |
|||
default: |
|||
if (Array.isArray(k) && parent[k]) { |
|||
parent[k] = parent[k].concat(section[k]); |
|||
} else if (!parent[k]) { |
|||
parent[k] = section[k]; |
|||
} else { |
|||
// parent already has, and it's not an array.
|
|||
return; |
|||
} |
|||
} |
|||
}); |
|||
} |
|||
|
|||
parent[plur] = parent[plur] || []; |
|||
parent[plur].push(section); |
|||
} |
|||
|
|||
|
|||
// Not a general purpose deep copy.
|
|||
// But sufficient for these basic things.
|
|||
function deepCopy(src, dest) { |
|||
Object.keys(src).filter(function(k) { |
|||
return !dest.hasOwnProperty(k); |
|||
}).forEach(function(k) { |
|||
dest[k] = deepCopy_(src[k]); |
|||
}); |
|||
} |
|||
|
|||
function deepCopy_(src) { |
|||
if (!src) return src; |
|||
if (Array.isArray(src)) { |
|||
var c = new Array(src.length); |
|||
src.forEach(function(v, i) { |
|||
c[i] = deepCopy_(v); |
|||
}); |
|||
return c; |
|||
} |
|||
if (typeof src === 'object') { |
|||
var c = {}; |
|||
Object.keys(src).forEach(function(k) { |
|||
c[k] = deepCopy_(src[k]); |
|||
}); |
|||
return c; |
|||
} |
|||
return src; |
|||
} |
|||
|
|||
|
|||
// these parse out the contents of an H# tag
|
|||
var eventExpr = /^Event:?\s*['"]?([^"']+).*$/i; |
|||
var classExpr = /^Class:\s*([^ ]+).*?$/i; |
|||
var propExpr = /^(?:property:?\s*)?[^\.]+\.([^ \.\(\)]+)\s*?$/i; |
|||
var braceExpr = /^(?:property:?\s*)?[^\.\[]+(\[[^\]]+\])\s*?$/i; |
|||
var classMethExpr = |
|||
/^class\s*method\s*:?[^\.]+\.([^ \.\(\)]+)\([^\)]*\)\s*?$/i; |
|||
var methExpr = |
|||
/^(?:method:?\s*)?(?:[^\.]+\.)?([^ \.\(\)]+)\([^\)]*\)\s*?$/i; |
|||
var newExpr = /^new ([A-Z][a-z]+)\([^\)]*\)\s*?$/; |
|||
var paramExpr = /\((.*)\);?$/; |
|||
|
|||
function newSection(tok) { |
|||
var section = {}; |
|||
// infer the type from the text.
|
|||
var text = section.textRaw = tok.text; |
|||
if (text.match(eventExpr)) { |
|||
section.type = 'event'; |
|||
section.name = text.replace(eventExpr, '$1'); |
|||
} else if (text.match(classExpr)) { |
|||
section.type = 'class'; |
|||
section.name = text.replace(classExpr, '$1'); |
|||
} else if (text.match(braceExpr)) { |
|||
section.type = 'property'; |
|||
section.name = text.replace(braceExpr, '$1'); |
|||
} else if (text.match(propExpr)) { |
|||
section.type = 'property'; |
|||
section.name = text.replace(propExpr, '$1'); |
|||
} else if (text.match(classMethExpr)) { |
|||
section.type = 'classMethod'; |
|||
section.name = text.replace(classMethExpr, '$1'); |
|||
} else if (text.match(methExpr)) { |
|||
section.type = 'method'; |
|||
section.name = text.replace(methExpr, '$1'); |
|||
} else if (text.match(newExpr)) { |
|||
section.type = 'ctor'; |
|||
section.name = text.replace(newExpr, '$1'); |
|||
} else { |
|||
section.name = text; |
|||
} |
|||
return section; |
|||
} |
@ -0,0 +1 @@ |
|||
../marked/bin/marked |
@ -0,0 +1,2 @@ |
|||
.git* |
|||
test/ |
@ -0,0 +1,19 @@ |
|||
Copyright (c) 2011-2012, Christopher Jeffrey (https://github.com/chjj/) |
|||
|
|||
Permission is hereby granted, free of charge, to any person obtaining a copy |
|||
of this software and associated documentation files (the "Software"), to deal |
|||
in the Software without restriction, including without limitation the rights |
|||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
|||
copies of the Software, and to permit persons to whom the Software is |
|||
furnished to do so, subject to the following conditions: |
|||
|
|||
The above copyright notice and this permission notice shall be included in |
|||
all copies or substantial portions of the Software. |
|||
|
|||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
|||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
|||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
|||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
|||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
|||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
|||
THE SOFTWARE. |
@ -0,0 +1,9 @@ |
|||
all: |
|||
@cp lib/marked.js marked.js |
|||
@uglifyjs -o marked.min.js marked.js |
|||
|
|||
clean: |
|||
@rm marked.js |
|||
@rm marked.min.js |
|||
|
|||
.PHONY: clean all |
@ -0,0 +1,135 @@ |
|||
# marked |
|||
|
|||
A full-featured markdown parser and compiler. |
|||
Built for speed. |
|||
|
|||
## Benchmarks |
|||
|
|||
node v0.4.x |
|||
|
|||
``` bash |
|||
$ node test --bench |
|||
marked completed in 12071ms. |
|||
showdown (reuse converter) completed in 27387ms. |
|||
showdown (new converter) completed in 75617ms. |
|||
markdown-js completed in 70069ms. |
|||
``` |
|||
|
|||
node v0.6.x |
|||
|
|||
``` bash |
|||
$ node test --bench |
|||
marked completed in 6485ms. |
|||
marked (with gfm) completed in 7466ms. |
|||
discount completed in 7169ms. |
|||
showdown (reuse converter) completed in 15937ms. |
|||
showdown (new converter) completed in 18279ms. |
|||
markdown-js completed in 23572ms. |
|||
``` |
|||
|
|||
__Marked is now faster than Discount, which is written in C.__ |
|||
|
|||
For those feeling skeptical: These benchmarks run the entire markdown test suite |
|||
1000 times. The test suite tests every feature. It doesn't cater to specific |
|||
aspects. |
|||
|
|||
Benchmarks for other engines to come (?). |
|||
|
|||
## Install |
|||
|
|||
``` bash |
|||
$ npm install marked |
|||
``` |
|||
|
|||
## Another javascript markdown parser |
|||
|
|||
The point of marked was to create a markdown compiler where it was possible to |
|||
frequently parse huge chunks of markdown without having to worry about |
|||
caching the compiled output somehow...or blocking for an unnecesarily long time. |
|||
|
|||
marked is very concise and still implements all markdown features. It is also |
|||
now fully compatible with the client-side. |
|||
|
|||
marked more or less passes the official markdown test suite in its |
|||
entirety. This is important because a surprising number of markdown compilers |
|||
cannot pass more than a few tests. It was very difficult to get marked as |
|||
compliant as it is. It could have cut corners in several areas for the sake |
|||
of performance, but did not in order to be exactly what you expect in terms |
|||
of a markdown rendering. In fact, this is why marked could be considered at a |
|||
disadvantage in the benchmarks above. |
|||
|
|||
Along with implementing every markdown feature, marked also implements |
|||
[GFM features](http://github.github.com/github-flavored-markdown/). |
|||
|
|||
## Usage |
|||
|
|||
``` js |
|||
var marked = require('marked'); |
|||
console.log(marked('i am using __markdown__.')); |
|||
``` |
|||
|
|||
You also have direct access to the lexer and parser if you so desire. |
|||
|
|||
``` js |
|||
var tokens = marked.lexer(str); |
|||
console.log(marked.parser(tokens)); |
|||
``` |
|||
|
|||
``` bash |
|||
$ node |
|||
> require('marked').lexer('> i am using marked.') |
|||
[ { type: 'blockquote_start' }, |
|||
{ type: 'text', text: ' i am using marked.' }, |
|||
{ type: 'blockquote_end' }, |
|||
links: {} ] |
|||
``` |
|||
|
|||
## CLI |
|||
|
|||
``` bash |
|||
$ marked -o hello.html |
|||
hello world |
|||
^D |
|||
$ cat hello.html |
|||
<p>hello world</p> |
|||
``` |
|||
|
|||
## Syntax Highlighting |
|||
|
|||
Marked has an interface that allows for a syntax highlighter to highlight code |
|||
blocks before they're output. |
|||
|
|||
Example implementation: |
|||
|
|||
``` js |
|||
var highlight = require('my-syntax-highlighter') |
|||
, marked_ = require('marked'); |
|||
|
|||
var marked = function(text) { |
|||
var tokens = marked_.lexer(text) |
|||
, l = tokens.length |
|||
, i = 0 |
|||
, token; |
|||
|
|||
for (; i < l; i++) { |
|||
token = tokens[i]; |
|||
if (token.type === 'code') { |
|||
token.text = highlight(token.text, token.lang); |
|||
// marked should not escape this |
|||
token.escaped = true; |
|||
} |
|||
} |
|||
|
|||
text = marked_.parser(tokens); |
|||
|
|||
return text; |
|||
}; |
|||
|
|||
module.exports = marked; |
|||
``` |
|||
|
|||
## License |
|||
|
|||
Copyright (c) 2011-2012, Christopher Jeffrey. (MIT License) |
|||
|
|||
See LICENSE for more info. |
@ -0,0 +1,115 @@ |
|||
#!/usr/bin/env node |
|||
|
|||
/** |
|||
* Marked CLI |
|||
* Copyright (c) 2011-2012, Christopher Jeffrey (MIT License) |
|||
*/ |
|||
|
|||
var fs = require('fs') |
|||
, util = require('util') |
|||
, marked = require('../'); |
|||
|
|||
/** |
|||
* Man Page |
|||
*/ |
|||
|
|||
var help = function() { |
|||
var spawn = require('child_process').spawn; |
|||
|
|||
var options = { |
|||
cwd: process.cwd(), |
|||
env: process.env, |
|||
setsid: false, |
|||
customFds: [0, 1, 2] |
|||
}; |
|||
|
|||
spawn('man', |
|||
[__dirname + '/../man/marked.1'], |
|||
options); |
|||
}; |
|||
|
|||
/** |
|||
* Main |
|||
*/ |
|||
|
|||
var main = function(argv) { |
|||
var files = [] |
|||
, data = '' |
|||
, input |
|||
, output |
|||
, arg |
|||
, tokens; |
|||
|
|||
var getarg = function() { |
|||
var arg = argv.shift(); |
|||
arg = arg.split('='); |
|||
if (arg.length > 1) { |
|||
argv.unshift(arg.slice(1).join('=')); |
|||
} |
|||
return arg[0]; |
|||
}; |
|||
|
|||
while (argv.length) { |
|||
arg = getarg(); |
|||
switch (arg) { |
|||
case '-o': |
|||
case '--output': |
|||
output = argv.shift(); |
|||
break; |
|||
case '-i': |
|||
case '--input': |
|||
input = argv.shift(); |
|||
break; |
|||
case '-t': |
|||
case '--tokens': |
|||
tokens = true; |
|||
break; |
|||
case '-h': |
|||
case '--help': |
|||
return help(); |
|||
default: |
|||
files.push(arg); |
|||
break; |
|||
} |
|||
} |
|||
|
|||
if (!input) { |
|||
if (files.length <= 2) { |
|||
var stdin = process.stdin; |
|||
|
|||
stdin.setEncoding('utf8'); |
|||
stdin.resume(); |
|||
|
|||
stdin.on('data', function(text) { |
|||
data += text; |
|||
}); |
|||
|
|||
stdin.on('end', write); |
|||
|
|||
return; |
|||
} |
|||
input = files.pop(); |
|||
} |
|||
|
|||
data = fs.readFileSync(input, 'utf8'); |
|||
write(); |
|||
|
|||
function write() { |
|||
data = tokens |
|||
? JSON.stringify(marked.lexer(data), null, 2) |
|||
: marked(data); |
|||
|
|||
if (!output) { |
|||
process.stdout.write(data + '\n'); |
|||
} else { |
|||
fs.writeFileSync(output, data); |
|||
} |
|||
} |
|||
}; |
|||
|
|||
if (!module.parent) { |
|||
process.title = 'marked'; |
|||
main(process.argv.slice()); |
|||
} else { |
|||
module.exports = main; |
|||
} |
@ -0,0 +1 @@ |
|||
module.exports = require('./lib/marked'); |
@ -0,0 +1,662 @@ |
|||
/** |
|||
* marked - A markdown parser (https://github.com/chjj/marked)
|
|||
* Copyright (c) 2011-2012, Christopher Jeffrey. (MIT Licensed) |
|||
*/ |
|||
|
|||
;(function() { |
|||
|
|||
/** |
|||
* Block-Level Grammar |
|||
*/ |
|||
|
|||
var block = { |
|||
newline: /^\n+/, |
|||
code: /^ {4,}[^\n]*(?:\n {4,}[^\n]*|\n)*(?:\n+|$)/, |
|||
gfm_code: /^ *``` *(\w+)? *\n([^\0]+?)\s*``` *(?:\n+|$)/, |
|||
hr: /^( *[\-*_]){3,} *(?:\n+|$)/, |
|||
heading: /^ *(#{1,6}) *([^\n]+?) *#* *(?:\n+|$)/, |
|||
lheading: /^([^\n]+)\n *(=|-){3,} *\n*/, |
|||
blockquote: /^( *>[^\n]+(\n[^\n]+)*\n*)+/, |
|||
list: /^( *)([*+-]|\d+\.) [^\0]+?(?:\n{2,}(?! )|\s*$)(?!\1bullet)\n*/, |
|||
html: /^ *(?:comment|closed|closing) *(?:\n{2,}|\s*$)/, |
|||
def: /^ *\[([^\]]+)\]: *([^\s]+)(?: +["(]([^\n]+)[")])? *(?:\n+|$)/, |
|||
paragraph: /^([^\n]+\n?(?!body))+\n*/, |
|||
text: /^[^\n]+/ |
|||
}; |
|||
|
|||
block.list = (function() { |
|||
var list = block.list.source; |
|||
|
|||
list = list |
|||
.replace('bullet', /(?:[*+-](?!(?: *[-*]){2,})|\d+\.)/.source); |
|||
|
|||
return new RegExp(list); |
|||
})(); |
|||
|
|||
block.html = (function() { |
|||
var html = block.html.source; |
|||
|
|||
html = html |
|||
.replace('comment', /<!--[^\0]*?-->/.source) |
|||
.replace('closed', /<(tag)[^\0]+?<\/\1>/.source) |
|||
.replace('closing', /<tag(?!:\/|@)\b(?:"[^"]*"|'[^']*'|[^'">])*?>/.source) |
|||
.replace(/tag/g, tag()); |
|||
|
|||
return new RegExp(html); |
|||
})(); |
|||
|
|||
block.paragraph = (function() { |
|||
var paragraph = block.paragraph.source |
|||
, body = []; |
|||
|
|||
(function push(rule) { |
|||
rule = block[rule] ? block[rule].source : rule; |
|||
body.push(rule.replace(/(^|[^\[])\^/g, '$1')); |
|||
return push; |
|||
}) |
|||
('gfm_code') |
|||
('hr') |
|||
('heading') |
|||
('lheading') |
|||
('blockquote') |
|||
('<' + tag()) |
|||
('def'); |
|||
|
|||
return new |
|||
RegExp(paragraph.replace('body', body.join('|'))); |
|||
})(); |
|||
|
|||
/** |
|||
* Block Lexer |
|||
*/ |
|||
|
|||
block.lexer = function(src) { |
|||
var tokens = []; |
|||
|
|||
tokens.links = {}; |
|||
|
|||
src = src |
|||
.replace(/\r\n|\r/g, '\n') |
|||
.replace(/\t/g, ' '); |
|||
|
|||
return block.token(src, tokens, true); |
|||
}; |
|||
|
|||
block.token = function(src, tokens, top) { |
|||
var src = src.replace(/^ +$/gm, '') |
|||
, next |
|||
, loose |
|||
, cap |
|||
, item |
|||
, space |
|||
, i |
|||
, l; |
|||
|
|||
while (src) { |
|||
// newline
|
|||
if (cap = block.newline.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
if (cap[0].length > 1) { |
|||
tokens.push({ |
|||
type: 'space' |
|||
}); |
|||
} |
|||
} |
|||
|
|||
// code
|
|||
if (cap = block.code.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
cap = cap[0].replace(/^ {4}/gm, ''); |
|||
tokens.push({ |
|||
type: 'code', |
|||
text: cap.replace(/\n+$/, '') |
|||
}); |
|||
continue; |
|||
} |
|||
|
|||
// gfm_code
|
|||
if (cap = block.gfm_code.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
tokens.push({ |
|||
type: 'code', |
|||
lang: cap[1], |
|||
text: cap[2] |
|||
}); |
|||
continue; |
|||
} |
|||
|
|||
// heading
|
|||
if (cap = block.heading.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
tokens.push({ |
|||
type: 'heading', |
|||
depth: cap[1].length, |
|||
text: cap[2] |
|||
}); |
|||
continue; |
|||
} |
|||
|
|||
// lheading
|
|||
if (cap = block.lheading.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
tokens.push({ |
|||
type: 'heading', |
|||
depth: cap[2] === '=' ? 1 : 2, |
|||
text: cap[1] |
|||
}); |
|||
continue; |
|||
} |
|||
|
|||
// hr
|
|||
if (cap = block.hr.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
tokens.push({ |
|||
type: 'hr' |
|||
}); |
|||
continue; |
|||
} |
|||
|
|||
// blockquote
|
|||
if (cap = block.blockquote.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
tokens.push({ |
|||
type: 'blockquote_start' |
|||
}); |
|||
|
|||
cap = cap[0].replace(/^ *> ?/gm, ''); |
|||
|
|||
// Pass `top` to keep the current
|
|||
// "toplevel" state. This is exactly
|
|||
// how markdown.pl works.
|
|||
block.token(cap, tokens, top); |
|||
|
|||
tokens.push({ |
|||
type: 'blockquote_end' |
|||
}); |
|||
continue; |
|||
} |
|||
|
|||
// list
|
|||
if (cap = block.list.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
|
|||
tokens.push({ |
|||
type: 'list_start', |
|||
ordered: isFinite(cap[2]) |
|||
}); |
|||
|
|||
// Get each top-level item.
|
|||
cap = cap[0].match( |
|||
/^( *)([*+-]|\d+\.)[^\n]*(?:\n(?!\1(?:[*+-]|\d+\.))[^\n]*)*/gm |
|||
); |
|||
|
|||
next = false; |
|||
l = cap.length; |
|||
i = 0; |
|||
|
|||
for (; i < l; i++) { |
|||
item = cap[i]; |
|||
|
|||
// Remove the list item's bullet
|
|||
// so it is seen as the next token.
|
|||
space = item.length; |
|||
item = item.replace(/^ *([*+-]|\d+\.) */, ''); |
|||
|
|||
// Outdent whatever the
|
|||
// list item contains. Hacky.
|
|||
if (~item.indexOf('\n ')) { |
|||
space -= item.length; |
|||
item = item.replace(new RegExp('^ {1,' + space + '}', 'gm'), ''); |
|||
} |
|||
|
|||
// Determine whether item is loose or not.
|
|||
// Use: /(^|\n)(?! )[^\n]+\n\n(?!\s*$)/
|
|||
// for discount behavior.
|
|||
loose = next || /\n\n(?!\s*$)/.test(item); |
|||
if (i !== l - 1) { |
|||
next = item[item.length-1] === '\n'; |
|||
if (!loose) loose = next; |
|||
} |
|||
|
|||
tokens.push({ |
|||
type: loose |
|||
? 'loose_item_start' |
|||
: 'list_item_start' |
|||
}); |
|||
|
|||
// Recurse.
|
|||
block.token(item, tokens); |
|||
|
|||
tokens.push({ |
|||
type: 'list_item_end' |
|||
}); |
|||
} |
|||
|
|||
tokens.push({ |
|||
type: 'list_end' |
|||
}); |
|||
|
|||
continue; |
|||
} |
|||
|
|||
// html
|
|||
if (cap = block.html.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
tokens.push({ |
|||
type: 'html', |
|||
text: cap[0] |
|||
}); |
|||
continue; |
|||
} |
|||
|
|||
// def
|
|||
if (top && (cap = block.def.exec(src))) { |
|||
src = src.substring(cap[0].length); |
|||
tokens.links[cap[1].toLowerCase()] = { |
|||
href: cap[2], |
|||
title: cap[3] |
|||
}; |
|||
continue; |
|||
} |
|||
|
|||
// top-level paragraph
|
|||
if (top && (cap = block.paragraph.exec(src))) { |
|||
src = src.substring(cap[0].length); |
|||
tokens.push({ |
|||
type: 'paragraph', |
|||
text: cap[0] |
|||
}); |
|||
continue; |
|||
} |
|||
|
|||
// text
|
|||
if (cap = block.text.exec(src)) { |
|||
// Top-level should never reach here.
|
|||
src = src.substring(cap[0].length); |
|||
tokens.push({ |
|||
type: 'text', |
|||
text: cap[0] |
|||
}); |
|||
continue; |
|||
} |
|||
} |
|||
|
|||
return tokens; |
|||
}; |
|||
|
|||
/** |
|||
* Inline Processing |
|||
*/ |
|||
|
|||
var inline = { |
|||
escape: /^\\([\\`*{}\[\]()#+\-.!_>])/, |
|||
autolink: /^<([^ >]+(@|:\/)[^ >]+)>/, |
|||
gfm_autolink: /^(\w+:\/\/[^\s]+[^.,:;"')\]\s])/, |
|||
tag: /^<!--[^\0]*?-->|^<\/?\w+(?:"[^"]*"|'[^']*'|[^'">])*?>/, |
|||
link: /^!?\[((?:\[[^\]]*\]|[^\[\]]|\[|\](?=[^[\]]*\]))*)\]\(([^\)]*)\)/, |
|||
reflink: /^!?\[((?:\[[^\]]*\]|[^\[\]]|\[|\](?=[^[\]]*\]))*)\]\s*\[([^\]]*)\]/, |
|||
nolink: /^!?\[((?:\[[^\]]*\]|[^\[\]])*)\]/, |
|||
strong: /^__([^\0]+?)__(?!_)|^\*\*([^\0]+?)\*\*(?!\*)/, |
|||
em: /^\b_([^\0]+?)_\b|^\*((?:\*\*|[^\0])+?)\*(?!\*)/, |
|||
code: /^(`+)([^\0]*?[^`])\1(?!`)/, |
|||
br: /^ {2,}\n(?!\s*$)/, |
|||
text: /^[^\0]+?(?=[\\<!\[_*`]|\w+:\/\/| {2,}\n|$)/ |
|||
}; |
|||
|
|||
/** |
|||
* Inline Lexer |
|||
*/ |
|||
|
|||
inline.lexer = function(src) { |
|||
var out = '' |
|||
, links = tokens.links |
|||
, link |
|||
, text |
|||
, href |
|||
, cap; |
|||
|
|||
while (src) { |
|||
// escape
|
|||
if (cap = inline.escape.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
out += cap[1]; |
|||
continue; |
|||
} |
|||
|
|||
// autolink
|
|||
if (cap = inline.autolink.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
if (cap[2] === '@') { |
|||
text = cap[1][6] === ':' |
|||
? mangle(cap[1].substring(7)) |
|||
: mangle(cap[1]); |
|||
href = mangle('mailto:') + text; |
|||
} else { |
|||
text = escape(cap[1]); |
|||
href = text; |
|||
} |
|||
out += '<a href="' |
|||
+ href |
|||
+ '">' |
|||
+ text |
|||
+ '</a>'; |
|||
continue; |
|||
} |
|||
|
|||
// gfm_autolink
|
|||
if (cap = inline.gfm_autolink.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
text = escape(cap[1]); |
|||
href = text; |
|||
out += '<a href="' |
|||
+ href |
|||
+ '">' |
|||
+ text |
|||
+ '</a>'; |
|||
continue; |
|||
} |
|||
|
|||
// tag
|
|||
if (cap = inline.tag.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
out += cap[0]; |
|||
continue; |
|||
} |
|||
|
|||
// link
|
|||
if (cap = inline.link.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
text = /^\s*<?([^\s]*?)>?(?:\s+"([^\n]+)")?\s*$/.exec(cap[2]); |
|||
if (!text) { |
|||
out += cap[0][0]; |
|||
src = cap[0].substring(1) + src; |
|||
continue; |
|||
} |
|||
out += outputLink(cap, { |
|||
href: text[1], |
|||
title: text[2] |
|||
}); |
|||
continue; |
|||
} |
|||
|
|||
// reflink, nolink
|
|||
if ((cap = inline.reflink.exec(src)) |
|||
|| (cap = inline.nolink.exec(src))) { |
|||
src = src.substring(cap[0].length); |
|||
link = (cap[2] || cap[1]).replace(/\s+/g, ' '); |
|||
link = links[link.toLowerCase()]; |
|||
if (!link || !link.href) { |
|||
out += cap[0][0]; |
|||
src = cap[0].substring(1) + src; |
|||
continue; |
|||
} |
|||
out += outputLink(cap, link); |
|||
continue; |
|||
} |
|||
|
|||
// strong
|
|||
if (cap = inline.strong.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
out += '<strong>' |
|||
+ inline.lexer(cap[2] || cap[1]) |
|||
+ '</strong>'; |
|||
continue; |
|||
} |
|||
|
|||
// em
|
|||
if (cap = inline.em.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
out += '<em>' |
|||
+ inline.lexer(cap[2] || cap[1]) |
|||
+ '</em>'; |
|||
continue; |
|||
} |
|||
|
|||
// code
|
|||
if (cap = inline.code.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
out += '<code>' |
|||
+ escape(cap[2], true) |
|||
+ '</code>'; |
|||
continue; |
|||
} |
|||
|
|||
// br
|
|||
if (cap = inline.br.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
out += '<br>'; |
|||
continue; |
|||
} |
|||
|
|||
// text
|
|||
if (cap = inline.text.exec(src)) { |
|||
src = src.substring(cap[0].length); |
|||
out += escape(cap[0]); |
|||
continue; |
|||
} |
|||
} |
|||
|
|||
return out; |
|||
}; |
|||
|
|||
var outputLink = function(cap, link) { |
|||
if (cap[0][0] !== '!') { |
|||
return '<a href="' |
|||
+ escape(link.href) |
|||
+ '"' |
|||
+ (link.title |
|||
? ' title="' |
|||
+ escape(link.title) |
|||
+ '"' |
|||
: '') |
|||
+ '>' |
|||
+ inline.lexer(cap[1]) |
|||
+ '</a>'; |
|||
} else { |
|||
return '<img src="' |
|||
+ escape(link.href) |
|||
+ '" alt="' |
|||
+ escape(cap[1]) |
|||
+ '"' |
|||
+ (link.title |
|||
? ' title="' |
|||
+ escape(link.title) |
|||
+ '"' |
|||
: '') |
|||
+ '>'; |
|||
} |
|||
}; |
|||
|
|||
/** |
|||
* Parsing |
|||
*/ |
|||
|
|||
var tokens |
|||
, token; |
|||
|
|||
var next = function() { |
|||
return token = tokens.pop(); |
|||
}; |
|||
|
|||
var tok = function() { |
|||
switch (token.type) { |
|||
case 'space': { |
|||
return ''; |
|||
} |
|||
case 'hr': { |
|||
return '<hr>\n'; |
|||
} |
|||
case 'heading': { |
|||
return '<h' |
|||
+ token.depth |
|||
+ '>' |
|||
+ inline.lexer(token.text) |
|||
+ '</h' |
|||
+ token.depth |
|||
+ '>\n'; |
|||
} |
|||
case 'code': { |
|||
return '<pre><code' |
|||
+ (token.lang |
|||
? ' class="' |
|||
+ token.lang |
|||
+ '"' |
|||
: '') |
|||
+ '>' |
|||
+ (token.escaped |
|||
? token.text |
|||
: escape(token.text, true)) |
|||
+ '</code></pre>\n'; |
|||
} |
|||
case 'blockquote_start': { |
|||
var body = ''; |
|||
|
|||
while (next().type !== 'blockquote_end') { |
|||
body += tok(); |
|||
} |
|||
|
|||
return '<blockquote>\n' |
|||
+ body |
|||
+ '</blockquote>\n'; |
|||
} |
|||
case 'list_start': { |
|||
var type = token.ordered ? 'ol' : 'ul' |
|||
, body = ''; |
|||
|
|||
while (next().type !== 'list_end') { |
|||
body += tok(); |
|||
} |
|||
|
|||
return '<' |
|||
+ type |
|||
+ '>\n' |
|||
+ body |
|||
+ '</' |
|||
+ type |
|||
+ '>\n'; |
|||
} |
|||
case 'list_item_start': { |
|||
var body = ''; |
|||
|
|||
while (next().type !== 'list_item_end') { |
|||
body += token.type === 'text' |
|||
? parseText() |
|||
: tok(); |
|||
} |
|||
|
|||
return '<li>' |
|||
+ body |
|||
+ '</li>\n'; |
|||
} |
|||
case 'loose_item_start': { |
|||
var body = ''; |
|||
|
|||
while (next().type !== 'list_item_end') { |
|||
body += tok(); |
|||
} |
|||
|
|||
return '<li>' |
|||
+ body |
|||
+ '</li>\n'; |
|||
} |
|||
case 'html': { |
|||
return inline.lexer(token.text); |
|||
} |
|||
case 'paragraph': { |
|||
return '<p>' |
|||
+ inline.lexer(token.text) |
|||
+ '</p>\n'; |
|||
} |
|||
case 'text': { |
|||
return '<p>' |
|||
+ parseText() |
|||
+ '</p>\n'; |
|||
} |
|||
} |
|||
}; |
|||
|
|||
var parseText = function() { |
|||
var body = token.text |
|||
, top; |
|||
|
|||
while ((top = tokens[tokens.length-1]) |
|||
&& top.type === 'text') { |
|||
body += '\n' + next().text; |
|||
} |
|||
|
|||
return inline.lexer(body); |
|||
}; |
|||
|
|||
var parse = function(src) { |
|||
tokens = src.reverse(); |
|||
|
|||
var out = ''; |
|||
while (next()) { |
|||
out += tok(); |
|||
} |
|||
|
|||
tokens = null; |
|||
token = null; |
|||
|
|||
return out; |
|||
}; |
|||
|
|||
/** |
|||
* Helpers |
|||
*/ |
|||
|
|||
var escape = function(html, encode) { |
|||
return html |
|||
.replace(!encode ? /&(?!#?\w+;)/g : /&/g, '&') |
|||
.replace(/</g, '<') |
|||
.replace(/>/g, '>') |
|||
.replace(/"/g, '"') |
|||
.replace(/'/g, '''); |
|||
}; |
|||
|
|||
var mangle = function(text) { |
|||
var out = '' |
|||
, l = text.length |
|||
, i = 0 |
|||
, ch; |
|||
|
|||
for (; i < l; i++) { |
|||
ch = text.charCodeAt(i); |
|||
if (Math.random() > 0.5) { |
|||
ch = 'x' + ch.toString(16); |
|||
} |
|||
out += '&#' + ch + ';'; |
|||
} |
|||
|
|||
return out; |
|||
}; |
|||
|
|||
function tag() { |
|||
var tag = '(?!(?:' |
|||
+ 'a|em|strong|small|s|cite|q|dfn|abbr|data|time|code' |
|||
+ '|var|samp|kbd|sub|sup|i|b|u|mark|ruby|rt|rp|bdi|bdo' |
|||
+ '|span|br|wbr|ins|del|img)\\b)\\w+'; |
|||
|
|||
return tag; |
|||
} |
|||
|
|||
/** |
|||
* Expose |
|||
*/ |
|||
|
|||
var marked = function(src) { |
|||
return parse(block.lexer(src)); |
|||
}; |
|||
|
|||
marked.parser = parse; |
|||
marked.lexer = block.lexer; |
|||
|
|||
marked.parse = marked; |
|||
|
|||
if (typeof module !== 'undefined') { |
|||
module.exports = marked; |
|||
} else { |
|||
this.marked = marked; |
|||
} |
|||
|
|||
}).call(this); |
@ -0,0 +1,39 @@ |
|||
.ds q \N'34' |
|||
.TH marked 1 |
|||
.SH NAME |
|||
marked \- a javascript markdown parser |
|||
.SH SYNOPSIS |
|||
.nf |
|||
.B marked [\-o output] [\-i input] [\-th] |
|||
.fi |
|||
.SH DESCRIPTION |
|||
.B marked |
|||
is a full-featured javascript markdown parser, built for speed. It also includes |
|||
multiple GFM features. |
|||
.SH OPTIONS |
|||
.TP |
|||
.BI \-o,\ \-\-output\ [output] |
|||
Specify file output. If none is specified, write to stdout. |
|||
.TP |
|||
.BI \-i,\ \-\-input\ [input] |
|||
Specify file input, otherwise use last argument as input file. If no input file |
|||
is specified, read from stdin. |
|||
.TP |
|||
.BI \-t,\ \-\-tokens |
|||
Output a token stream instead of html. |
|||
.TP |
|||
.BI \-h,\ \-\-help |
|||
Display help information. |
|||
.SH EXAMPLES |
|||
.TP |
|||
cat in.md | marked > out.html |
|||
.TP |
|||
echo "hello *world*" | marked |
|||
.TP |
|||
marked -o out.html in.md |
|||
.TP |
|||
marked --output="hello world.html" -i in.md |
|||
.SH BUGS |
|||
Please report any bugs to https://github.com/chjj/marked. |
|||
.SH LICENSE |
|||
Copyright (c) 2011-2012, Christopher Jeffrey (MIT License) |
@ -0,0 +1,15 @@ |
|||
{ |
|||
"name": "marked", |
|||
"description": "A markdown parser built for speed", |
|||
"author": "Christopher Jeffrey", |
|||
"version": "0.1.9", |
|||
"main": "./lib/marked.js", |
|||
"bin": "./bin/marked", |
|||
"man": "./man/marked.1", |
|||
"preferGlobal": false, |
|||
"repository": "git://github.com/chjj/marked.git", |
|||
"homepage": "https://github.com/chjj/marked", |
|||
"bugs": "http://github.com/chjj/marked/issues", |
|||
"keywords": [ "markdown", "markup", "html" ], |
|||
"tags": [ "markdown", "markup", "html" ] |
|||
} |
@ -0,0 +1,15 @@ |
|||
{ |
|||
"author": "Isaac Z. Schlueter <i@izs.me> (http://blog.izs.me/)", |
|||
"name": "node-doc-generator", |
|||
"description": "Internal tool for generating Node.js API docs", |
|||
"version": "0.0.0", |
|||
"engines": { |
|||
"node": ">=0.6.10" |
|||
}, |
|||
"dependencies": { |
|||
"marked": "~0.1.9" |
|||
}, |
|||
"devDependencies": {}, |
|||
"optionalDependencies": {}, |
|||
"bin": "./generate.js" |
|||
} |
Loading…
Reference in new issue