code
github

Dockit

Dockit is a documentation generator, inspired by docco, written in Literate JavaScript. It produces a set of HTML documents that display your comments intermingled with your code, for your entire project.

All prose is passed through Markdown, and code is passed through Highlight.js syntax highlighting. This page is the result of running Dockit against its own source files.

Dockit source is available on github

Installation

npm install -g dockit

Usage

Run inside a directory with a .dockit.json[5] file or pass the config location as a command line option, see dockit --help

github

Configuration

Dockit relies on a json5 configuration file to tell it how to parse a project

project: Project name

github: url and text to display for github repo link

assets: an array of filenames in the project to copy to the __assets directory to be available in the generated documentation, such as the favicon image. Assets copied in this way are available at __assets/asset.ext

favicon: filename to use for favicon. Original image should be an entry in above assets config

files: a series of glob matches to indicate which files to parse glob matches are specified in separate properites. These namings are arbitrary and exist to allow finer control over order by grouping

ctypes: an array of comment types to ignore (display as comments in the code sections, instead of pulling out and parsing with markdown). Supported comment types:

 HASH: #
 SLASH: //
 DOC_BLOCK: /**
            *
            */
 SLASH_STAR: /* */ (multi and singleline)
 BANG: <!-- --> (multi and singleline)
 BRACKET_BANG: {! !} (multi and singleline)

ignores: any comments that should be treated as execptions to the above matching based on a simple substring match of the start of a line (singleline only supported)

all: boolean whether to generate the all page, generated by default. Otherwise the top level project README will be used

index: remap the index page to another generated html page. By default, with all set to true, this will point to the all page.

allHash: add an optional hash to the url for the all page (to link to an anchor)

{
  "project": "ockit",
  "github" : {
    "url" : "https://github.com/diffsky/dockit",
    "text" : "github"
  },
  "assets" : [ "dockit-assets/dockit.png" ],
  "favicon": "dockit.png",
  "files": {
    "first" : [
      "README.md",
      ".dockit.json5"
    ],
    "second": [
      "**/*.{js,md}",
      "!README.md",
      "!node_modules/**",
      "!docs/**",
      "!templates/**",
      "!assets/**"
    ]
  },
  "ctypes": ["BRACKET_BANG"],
  "ignores": [
    "/* jshint"
  ],
  "allHash" : "#readme_md-s1"
}
github

Dockit Lib

Require the necessary libraries. Actual file parsing for code and comments is handled by the noddocco lib.

A marked instance is configured tp parse strings as markdown using github flavored markdown, passing code highlight callbacks to highlight.js

A Dust instance is configured and templates loaded and compiled to be used to generate HTML files.

All templates from templates directory are compiled into the dust.cache via duster - preserving any whitespace in the templates by using a format optimizer.


/* jshint loopfunc: true, unused: false */
var noddocco = require("noddocco"),
path = require('path'),
fs = require('fs'),
duster = require('duster'),
dust = duster.dust,
ncp = require('ncp').ncp,
marked = require('marked'),
hl = require('highlight.js'),
expand = require('glob-expand'),
mkdirp = require('mkdirp');

marked.setOptions({
  gfm: true,
  pedantic: false,
  sanitize: false,
  highlight: function(code, lang) {
    if(lang === undefined) {
      return code;
    }
    return hl.highlight(lang, code).value;
  }
});

dust.optimizers.format = function(ctx, node) {return node;};
duster.prime(path.join(__dirname, 'templates'));

Process files and generate documentation

A config object passed to the dockit function contains a glob match for all the files to be processed. These matches can be split into sections allowing for finer control over the order in which files are processed and then displayed.

For each file in the match, it's name is used to generate a key that will serve as the genrated filename (with directory slashes replaced with dashes).

Any markdown files are treated as a special case - with h1, h2 and h3 headings extracted to be added to the pages navigation. The headings are anchorized so that scrollspy can capture them clientside and update the pages and files navigation.

Other files are passed to noddocco to be processed, with returns an object with comment and code properties for the parsed file. The comment section is checked for any h1, h2 and h3 headings (as above) to be added to the pages navigation.

var blocks = {}, pages = {}, files = [],
dir, fileRepo, key, comment, page, details, ext, input, foundsection;

function anchorize(match, p1, p2, offset, string){
  return '</div><div id="'+key+'-s'+(foundsection++)+'" class="section md"><h'+p1+'>'+p2+'</h'+p1+'>';
}

module.exports = function(config) {
  var opts = {},
      owd = process.cwd();

  if(!fs.existsSync(config.outputAbsolute)) {
    mkdirp.sync(config.outputAbsolute);
  }
  ncp(path.join(__dirname, 'assets'), path.join(config.outputAbsolute, '__assets'), function (err) {
    if (err) {
      console.log(err);
    }
  });
  var matches = [];
  process.chdir(config.configDir);
  for(var section in config.files){
    expand({filter: 'isFile'}, config.files[section]).forEach(function(f){
      matches.push(path.join(config.configDir, f));
    });
  }
  process.chdir(owd);

  config.assets = config.assets || [];
  config.assets.forEach(function(asset){
    ncp(path.join(config.configDir, asset), path.join(config.outputAbsolute, '__assets', path.basename(asset)), function (err) {
      if (err) {
        console.log(err);
      }
    });
  });

  matches.forEach(function(file){
    ext = path.extname(file).slice(1);
    input = fs.readFileSync(file, 'utf8');
    fileRepo = path.relative(process.cwd(), file);
    key = fileRepo.replace(/\//g, "_").replace(/\./g, "_").toLowerCase();
    dir = path.dirname(path.relative(process.cwd(), file)).toLowerCase();
    if (dir === '.') {
      dir = '';
    }
    dir = dir + '/';
    files.push({
      file: path.basename(path.relative(process.cwd(), file)),
      dir: dir,
      key: key
    });
    if(ext === 'md'){
      foundsection = 1;
      var i = 1;
      input = marked(input);
      page = input.match(/<h([1-3])>(.*)<\/h[1-3]>/gi);
      for(var j in page){
        details = {
          page: page[j].match(/<h[1-3]>(.*)<\/h[1-3]>/)[1],
          section: i++,
          h: page[j].match(/<h([1-3])>/)[1],
          key: key
        };
        pages[key] = pages[key] || [];
        pages[key].push(details);
      }

      var content = '<div>' + input.replace(/<h([1-3])>(.*)<\/h[1-3]>/gi, anchorize) + '</div>';
      blocks[key] = {
        md: true,
        file: file,
        fileRepo: fileRepo,
        key: key,
        block: content
      };
    } else {
      opts.ext = ext;
      opts.ctypes = config.ctypes;
      opts.encode = false;
      opts.ignores = config.ignores || {};
      noddocco.process(input, opts, function (err, noddoccoData) {
        blocks[key] = {
          file: file,
          fileRepo: fileRepo,
          key: key,
          blocks: noddoccoData
        };
        for (var i in noddoccoData){
          comment = noddoccoData[i].comments;
          page = comment.match(/<h([1-3])>(.*)<\/h[1-3]>/i);
          if(page) {
            details = {
              page: page[2],
              section: (+i + 1),
              h: page[1],
              key: key
            };
            pages[key] = pages[key] || [];
            pages[key].push(details);
          }
        }
      });
    }
  });

Write the documentation to disk

With the noddocco data generated for each file. Loop through the files and write their contents to disk.

By default, alongised individual file pages, Dockit will create a page which is all the files processed into one, long html page. This page will be the index.html This behaviour can be altered by the dockit configuration

As the files are written to disk the progress is sent to the console.

  var displaypages = [];
  var orderedblocks = [];

  for(var i in pages){
    orderedblocks.push(blocks[pages[i][0].key]);
    for (var j in pages[i]){
      displaypages.push(pages[i][j]);
    }
  }

  console.log('writing...');
  config.project = config.project || 'dockit generated docs';
  var all, dest,
      generated = new Date();

  all = 'all.html';
  if(config.index === 'all.html'){
    config.index = 'index.html';
    all = 'index.html';
  }

  for (i in blocks){
    dest = blocks[i].key + '.html';

    dust.render('file', {
      md: blocks[i].md,
      title: config.project,
      index: config.index,
      github: config.github,
      showall: config.all,
      all: all,
      allHash: config.allHash,
      current: blocks[i].key,
      files: files,
      favicon: config.favicon,
      generated: generated,
      pages: displaypages,
      data: blocks[i]},
      function(err, output){
        if(dest === config.index) {
          dest = 'index.html';
        } else if (dest.slice(0, 6) === 'readme'){
          fs.writeFileSync(path.join(config.outputAbsolute,'index.html'), output);
        }
        fs.writeFileSync(path.join(config.outputAbsolute, dest), output);
        console.log(path.join(config.output, dest));
      });
  }

  if(config.all) {
    dust.render('file', {
      all: all,
      allHash: config.allHash,
      onAll: true,
      title: config.project,
      index: config.index,
      github: config.github,
      showall: config.all,
      files: files,
      favicon: config.favicon,
      generated: generated,
      pages: displaypages,
      data: orderedblocks},
      function(err, output){
        fs.writeFileSync(path.join(config.outputAbsolute, all), output);
        console.log(path.join(config.output, all));
      });
  }
  console.log('...done!');
};