changeset 759:9c47f377ca0b

Revised module cmd.Generate. Added class TagMapLoader. Fixed StringExpression.getString() and related code in the Parser. Added options 'xml_map' and 'html_map' to config.d
author Aziz K?ksal <aziz.koeksal@gmail.com>
date Fri, 15 Feb 2008 02:07:53 +0100
parents f4b9680c0e16
children ea9e8b141742
files trunk/src/cmd/Generate.d trunk/src/config.d trunk/src/dil/Information.d trunk/src/dil/Settings.d trunk/src/dil/SettingsLoader.d trunk/src/dil/ast/Expressions.d trunk/src/dil/parser/Parser.d trunk/src/html_map.d trunk/src/main.d trunk/src/xml_map.d
diffstat 10 files changed, 428 insertions(+), 341 deletions(-) [+]
line wrap: on
line diff
--- a/trunk/src/cmd/Generate.d	Thu Feb 14 03:31:19 2008 +0100
+++ b/trunk/src/cmd/Generate.d	Fri Feb 15 02:07:53 2008 +0100
@@ -13,8 +13,12 @@
 import dil.lexer.Lexer;
 import dil.parser.Parser;
 import dil.SourceText;
+import dil.Information;
+import dil.SettingsLoader;
+import dil.Settings;
 import common;
 
+import tango.io.GrowBuffer;
 import tango.io.Print;
 
 /// Options for the generate command.
@@ -28,13 +32,21 @@
 }
 
 /// Executes the command.
-void execute(string filePath, DocOption options)
+void execute(string filePath, DocOption options, InfoManager infoMan)
 {
   assert(options != DocOption.Empty);
+  auto mapFilePath = options & DocOption.HTML ? GlobalSettings.htmlMapFile
+                                              : GlobalSettings.xmlMapFile;
+  auto map = TagMapLoader(infoMan).load(mapFilePath);
+  auto tags = new TagMap(map);
+
+  if (infoMan.hasInfo)
+    return;
+
   if (options & DocOption.Syntax)
-    syntaxToDoc(filePath, Stdout, options);
+    syntaxToDoc(filePath, tags, Stdout, options);
   else
-    tokensToDoc(filePath, Stdout, options);
+    tokensToDoc(filePath, tags, Stdout, options);
 }
 
 /// Escapes the characters '<', '>' and '&' with named character entities.
@@ -56,30 +68,66 @@
   return text;
 }
 
+class TagMap
+{
+  string[string] table;
 
-/// Find object in subject and return position.
-/// Returns -1 if no match was found.
-/+int find(char[] subject, char[] object)
-{
-  if (object.length > subject.length)
-    return -1;
-  foreach (i, c; subject)
+  this(string[string] table)
   {
-    if (c == object[0])
-    {
-      if (object.length > (subject.length - i))
-        return -1;
-      if (object == subject[i..i+object.length])
-        return i;
-    }
+    this.table = table;
+    Identifier   = this["Identifier", "{0}"];
+    String       = this["String", "{0}"];
+    Char         = this["Char", "{0}"];
+    Number       = this["Number", "{0}"];
+    Keyword      = this["Keyword", "{0}"];
+    LineC        = this["LineC", "{0}"];
+    BlockC       = this["BlockC", "{0}"];
+    NestedC      = this["NestedC", "{0}"];
+    Shebang      = this["Shebang", "{0}"];
+    HLine        = this["HLine", "{0}"];
+    Filespec     = this["Filespec", "{0}"];
+    Illegal      = this["Illegal", "{0}"];
+    Newline      = this["Newline", "{0}"];
+    SpecialToken = this["SpecialToken", "{0}"];
+    Declaration  = this["Declaration", "d"];
+    Statement    = this["Statement", "s"];
+    Expression   = this["Expression", "e"];
+    Type         = this["Type", "t"];
+    Other        = this["Other", "o"];
+    EOF          = this["EOF", ""];
   }
-  return -1;
-}+/
+
+  string opIndex(string str, string fallback = "")
+  {
+    auto p = str in table;
+    if (p)
+      return *p;
+    return fallback;
+  }
+
+  string Identifier, String, Char, Number, Keyword, LineC, BlockC,
+         NestedC, Shebang, HLine, Filespec, Illegal, Newline, SpecialToken,
+         Declaration, Statement, Expression, Type, Other, EOF;
 
-/++
-  Find the last occurrence of object in subject.
-  Returns the index if found, or -1 if not.
-+/
+  /// Returns the tag for the category 'nc'.
+  string getTag(NodeCategory nc)
+  {
+    string tag;
+    switch (nc)
+    { alias NodeCategory NC;
+    case NC.Declaration: tag = Declaration; break;
+    case NC.Statement:   tag = Statement; break;
+    case NC.Expression:  tag = Expression; break;
+    case NC.Type:        tag = Type; break;
+    case NC.Other:       tag = Other; break;
+    default: assert(0);
+    }
+    return tag;
+  }
+}
+
+/// Find the last occurrence of object in subject.
+/// Returns: the index if found, or -1 if not.
 int rfind(char[] subject, char object)
 {
   foreach_reverse(i, c; subject)
@@ -130,174 +178,6 @@
   return name;
 }
 
-/// Indices into the XML and HTML tag arrays.
-enum DocPart
-{
-  Head,
-  CompBegin,
-  CompEnd,
-  Error,
-  SyntaxBegin,
-  SyntaxEnd,
-  SrcBegin,
-  SrcEnd,
-  Tail,
-  // Tokens:
-  Identifier,
-  Comment,
-  StringLiteral,
-  CharLiteral,
-  Operator,
-  LorG,
-  LessEqual,
-  GreaterEqual,
-  AndLogical,
-  OrLogical,
-  NotEqual,
-  Not,
-  Number,
-  Bracket,
-  SpecialToken,
-  Shebang,
-  Keyword,
-  HLineBegin,
-  HLineEnd,
-  Filespec,
-}
-
-auto html_tags = [
-  // Head
-  `<html>`\n
-  `<head>`\n
-  `<meta http-equiv="Content-Type" content="text/html; charset=utf-8">`\n
-  `<link href="html.css" rel="stylesheet" type="text/css">`\n
-  `</head>`\n
-  `<body>`[],
-  // CompBegin
-  `<div class="compilerinfo">`,
-  // CompEnd
-  `</div>`,
-  // Error
-  `<p class="error {0}">{1}({2}){3}: {4}</p>`,
-  // SyntaxBegin
-  `<span class="{0} {1}">`,
-  // SyntaxEnd
-  `</span>`,
-  // SrcBegin
-  `<pre class="sourcecode">`,
-  // SrcEnd
-  `</pre>`,
-  // Tail
-  `</html>`,
-  // Identifier
-  `<span class="i">{0}</span>`,
-  // Comment
-  `<span class="c{0}">{1}</span>`,
-  // StringLiteral
-  `<span class="sl">{0}</span>`,
-  // CharLiteral
-  `<span class="chl">{0}</span>`,
-  // Operator
-  `<span class="op">{0}</span>`,
-  // LorG
-  `<span class="oplg">&lt;&gt;</span>`,
-  // LessEqual
-  `<span class="ople">&lt;=</span>`,
-  // GreaterEqual
-  `<span class="opge">&gt;=</span>`,
-  // AndLogical
-  `<span class="opaa">&amp;&amp;</span>`,
-  // OrLogical
-  `<span class="opoo">||</span>`,
-  // NotEqual
-  `<span class="opne">!=</span>`,
-  // Not
-  `<span class="opn">!</span>`,
-  // Number
-  `<span class="n">{0}</span>`,
-  // Bracket
-  `<span class="br">{0}</span>`,
-  // SpecialToken
-  `<span class="st">{0}</span>`,
-  // Shebang
-  `<span class="shebang">{0}</span>`,
-  // Keyword
-  `<span class="k">{0}</span>`,
-  // HLineBegin
-  `<span class="hl">`,
-  // HLineEnd
-  "</span>",
-  // Filespec
-  `<span class="fs">{0}</span>`,
-];
-
-auto xml_tags = [
-  // Head
-  `<?xml version="1.0"?>`\n
-  `<?xml-stylesheet href="xml.css" type="text/css"?>`\n
-  `<root>`[],
-  // CompBegin
-  `<compilerinfo>`,
-  // CompEnd
-  `</compilerinfo>`,
-  // Error
-  `<error t="{0}">{1}({2}){3}: {4}</error>`,
-  // SyntaxBegin
-  `<{0} t="{1}">`,
-  // SyntaxEnd
-  `</{0}>`,
-  // SrcBegin
-  `<sourcecode>`,
-  // SrcEnd
-  `</sourcecode>`,
-  // Tail
-  `</root>`,
-  // Identifier
-  "<i>{0}</i>",
-  // Comment
-  `<c t="{0}">{1}</c>`,
-  // StringLiteral
-  "<sl>{0}</sl>",
-  // CharLiteral
-  "<cl>{0}</cl>",
-  // Operator
-  "<op>{0}</op>",
-  // LorG
-  `<op t="lg">&lt;&gt;</op>`,
-  // LessEqual
-  `<op t="le">&lt;=</op>`,
-  // GreaterEqual
-  `<op t="ge">&gt;=</op>`,
-  // AndLogical
-  `<op t="aa">&amp;&amp;</op>`,
-  // OrLogical
-  `<op t="oo">||</op>`,
-  // NotEqual
-  `<op t="ne">!=</op>`,
-  // Not
-  `<op t="n">!</op>`,
-  // Number
-  "<n>{0}</n>",
-  // Bracket
-  "<br>{0}</br>",
-  // SpecialToken
-  "<st>{0}</st>",
-  // Shebang
-  "<shebang>{0}</shebang>",
-  // Keyword
-  "<k>{0}</k>",
-  // HLineBegin
-  "<hl>",
-  // HLineEnd
-  "</hl>",
-  // Filespec
-  "<fs>{0}</fs>",
-];
-
-// The size of the arrays must equal the number of members in enum DocPart.
-static assert(html_tags.length == DocPart.max+1);
-static assert(xml_tags.length == DocPart.max+1);
-
 /// Extended token structure.
 struct TokenEx
 {
@@ -364,42 +244,20 @@
   }
 }
 
-char getTag(NodeCategory nc)
+void printErrors(Lexer lx, TagMap tags, Print!(char) print)
 {
-  char tag;
-  switch (nc)
-  {
-  alias NodeCategory NC;
-  case NC.Declaration: tag = 'd'; break;
-  case NC.Statement:   tag = 's'; break;
-  case NC.Expression:  tag = 'e'; break;
-  case NC.Type:        tag = 't'; break;
-  case NC.Other:       tag = 'o'; break;
-  default:
-    assert(0);
-  }
-  return tag;
+  foreach (e; lx.errors)
+    print.format(tags["LexerError"], e.filePath, e.loc, e.col, xml_escape(e.getMsg));
 }
 
-void printErrors(Lexer lx, string[] tags, Print!(char) print)
+void printErrors(Parser parser, TagMap tags, Print!(char) print)
 {
-  foreach (error; lx.errors)
-  {
-    print.formatln(tags[DocPart.Error], "L", error.filePath, Format("{0},{1}", error.loc, error.col), "L", xml_escape(error.getMsg));
-  }
+  foreach (e; parser.errors)
+    print.format(tags["ParserError"], e.filePath, e.loc, e.col, xml_escape(e.getMsg));
 }
 
-void printErrors(Parser parser, string[] tags, Print!(char) print)
+void syntaxToDoc(string filePath, TagMap tags, Print!(char) print, DocOption options)
 {
-  foreach (error; parser.errors)
-  {
-    print.formatln(tags[DocPart.Error], "P", error.filePath, Format("{0},{1}", error.loc, error.col), "P", xml_escape(error.getMsg));
-  }
-}
-
-void syntaxToDoc(string filePath, Print!(char) print, DocOption options)
-{
-  auto tags = options & DocOption.HTML ? html_tags : xml_tags;
   auto parser = new Parser(new SourceText(filePath, true));
   auto root = parser.start();
   auto lx = parser.lexer;
@@ -407,196 +265,156 @@
   auto builder = new TokenExBuilder();
   auto tokenExList = builder.build(root, lx.firstToken());
 
-  print(tags[DocPart.Head]~\n);
+  print(tags["DocHead"]);
   if (lx.errors.length || parser.errors.length)
   { // Output error messages.
-    print(tags[DocPart.CompBegin]~\n);
+    print(tags["CompBegin"]);
     printErrors(lx, tags, print);
     printErrors(parser, tags, print);
-    print(tags[DocPart.CompEnd]~\n);
+    print(tags["CompEnd"]);
   }
-  print(tags[DocPart.SrcBegin]);
+  print(tags["SourceBegin"]);
+
+  auto tagNodeBegin = tags["NodeBegin"];
+  auto tagNodeEnd = tags["NodeEnd"];
 
   // Iterate over list of tokens.
   foreach (ref tokenEx; tokenExList)
   {
     auto token = tokenEx.token;
-    // Print whitespace.
-    if (token.ws)
-      print(token.wsChars);
 
+    token.ws && print(token.wsChars); // Print preceding whitespace.
+    // <node>
     foreach (node; tokenEx.beginNodes)
-      print.format(tags[DocPart.SyntaxBegin], getTag(node.category), getShortClassName(node));
-
+      print.format(tagNodeBegin, tags.getTag(node.category), getShortClassName(node));
+    // Token text.
     printToken(token, tags, print);
-
+    // </node>
     if (options & DocOption.HTML)
       foreach_reverse (node; tokenEx.endNodes)
-        print(tags[DocPart.SyntaxEnd]);
+        print(tagNodeEnd);
     else
       foreach_reverse (node; tokenEx.endNodes)
-        print.format(tags[DocPart.SyntaxEnd], getTag(node.category));
+        print.format(tagNodeEnd, tags.getTag(node.category));
   }
-  print(\n~tags[DocPart.SrcEnd])(\n~tags[DocPart.Tail]);
+  print(tags["SourceEnd"]);
+  print(tags["DocEnd"]);
 }
 
 /// Prints all tokens of a source file using the buffer print.
-void tokensToDoc(string filePath, Print!(char) print, DocOption options)
+void tokensToDoc(string filePath, TagMap tags, Print!(char) print, DocOption options)
 {
-  auto tags = options & DocOption.HTML ? html_tags : xml_tags;
   auto lx = new Lexer(new SourceText(filePath, true));
   lx.scanAll();
 
-  print(tags[DocPart.Head]~\n);
+  print(tags["DocHead"]);
   if (lx.errors.length)
   {
-    print(tags[DocPart.CompBegin]~\n);
+    print(tags["CompBegin"]);
     printErrors(lx, tags, print);
-    print(tags[DocPart.CompEnd]~\n);
+    print(tags["CompEnd"]);
   }
-  print(tags[DocPart.SrcBegin]);
+  print(tags["SourceBegin"]);
 
   // Traverse linked list and print tokens.
   auto token = lx.firstToken();
-  while (token.kind != TOK.EOF)
+  while (token)
   {
-    // Print whitespace.
-    if (token.ws)
-      print(token.wsChars);
+    token.ws && print(token.wsChars); // Print preceding whitespace.
     printToken(token, tags, print);
     token = token.next;
   }
-  print(\n~tags[DocPart.SrcEnd])(\n~tags[DocPart.Tail]);
+  print(tags["SourceEnd"]);
+  print(tags["DocEnd"]);
 }
 
+void printToken(Token* token, string[] tags, Print!(char) print)
+{}
+
 /// Prints a token with tags using the buffer print.
-void printToken(Token* token, string[] tags, Print!(char) print)
+void printToken(Token* token, TagMap tags, Print!(char) print)
 {
-  alias DocPart DP;
-  string srcText = xml_escape(token.srcText);
-
   switch(token.kind)
   {
   case TOK.Identifier:
-    print.format(tags[DP.Identifier], srcText);
+    print.format(tags.Identifier, token.srcText);
     break;
   case TOK.Comment:
-    string t;
+    string formatStr;
     switch (token.start[1])
     {
-    case '/': t = "l"; break;
-    case '*': t = "b"; break;
-    case '+': t = "n"; break;
-    default:
-      assert(0);
+    case '/': formatStr = tags.LineC; break;
+    case '*': formatStr = tags.BlockC; break;
+    case '+': formatStr = tags.NestedC; break;
+    default: assert(0);
     }
-    print.format(tags[DP.Comment], t, srcText);
+    print.format(formatStr, xml_escape(token.srcText));
     break;
   case TOK.String:
-    print.format(tags[DP.StringLiteral], srcText);
+    print.format(tags.String, xml_escape(token.srcText));
     break;
   case TOK.CharLiteral:
-    print.format(tags[DP.CharLiteral], srcText);
-    break;
-  case TOK.Assign,        TOK.Equal,
-       TOK.Less,          TOK.Greater,
-       TOK.LShiftAssign,  TOK.LShift,
-       TOK.RShiftAssign,  TOK.RShift,
-       TOK.URShiftAssign, TOK.URShift,
-       TOK.OrAssign,      TOK.OrBinary,
-       TOK.AndAssign,     TOK.AndBinary,
-       TOK.PlusAssign,    TOK.PlusPlus,   TOK.Plus,
-       TOK.MinusAssign,   TOK.MinusMinus, TOK.Minus,
-       TOK.DivAssign,     TOK.Div,
-       TOK.MulAssign,     TOK.Mul,
-       TOK.ModAssign,     TOK.Mod,
-       TOK.XorAssign,     TOK.Xor,
-       TOK.CatAssign,
-       TOK.Tilde,
-       TOK.Unordered,
-       TOK.UorE,
-       TOK.UorG,
-       TOK.UorGorE,
-       TOK.UorL,
-       TOK.UorLorE,
-       TOK.LorEorG:
-    print.format(tags[DP.Operator], srcText);
-    break;
-  case TOK.LorG:
-    print(tags[DP.LorG]);
-    break;
-  case TOK.LessEqual:
-    print(tags[DP.LessEqual]);
-    break;
-  case TOK.GreaterEqual:
-    print(tags[DP.GreaterEqual]);
-    break;
-  case TOK.AndLogical:
-    print(tags[DP.AndLogical]);
-    break;
-  case TOK.OrLogical:
-    print(tags[DP.OrLogical]);
-    break;
-  case TOK.NotEqual:
-    print(tags[DP.NotEqual]);
-    break;
-  case TOK.Not:
-    // Check if this is part of a template instantiation.
-    if (token.prevNWS.kind == TOK.Identifier && token.nextNWS.kind == TOK.LParen)
-      goto default;
-    print(tags[DP.Not]);
+    print.format(tags.Char, xml_escape(token.srcText));
     break;
   case TOK.Int32, TOK.Int64, TOK.Uint32, TOK.Uint64,
        TOK.Float32, TOK.Float64, TOK.Float80,
        TOK.Imaginary32, TOK.Imaginary64, TOK.Imaginary80:
-    print.format(tags[DP.Number], srcText);
-    break;
-  case TOK.LParen, TOK.RParen, TOK.LBracket,
-       TOK.RBracket, TOK.LBrace, TOK.RBrace:
-    print.format(tags[DP.Bracket], srcText);
+    print.format(tags.Number, token.srcText);
     break;
   case TOK.Shebang:
-    print.format(tags[DP.Shebang], srcText);
+    print.format(tags.Shebang, xml_escape(token.srcText));
     break;
   case TOK.HashLine:
+    auto formatStr = tags.HLine;
+    // The text to be inserted into formatStr.
+    auto buffer = new GrowBuffer;
+    auto print2 = new Print!(char)(Format, buffer);
+
     void printWS(char* start, char* end)
     {
-      if (start != end)
-        print(start[0 .. end - start]);
+      start != end && print2(start[0 .. end - start]);
     }
-    print(tags[DP.HLineBegin]);
+
     auto num = token.tokLineNum;
     if (num is null)
-    {
-      print(token.srcText);
-      print(tags[DP.HLineEnd]);
+    { // Malformed #line
+      print.format(formatStr, token.srcText);
       break;
     }
-    // Print whitespace between #line and number
+
+    // Print whitespace between #line and number.
     auto ptr = token.start;
-    printWS(ptr, num.start); // prints "#line" as well
-    printToken(num, tags, print);
-    if (token.tokLineFilespec)
-    {
-      auto filespec = token.tokLineFilespec;
-      // Print whitespace between number and filespec
+    printWS(ptr, num.start); // Prints "#line" as well.
+    printToken(num, tags, print2);
+
+    if (auto filespec = token.tokLineFilespec)
+    { // Print whitespace between number and filespec.
       printWS(num.end, filespec.start);
-      print.format(tags[DP.Filespec], xml_escape(filespec.srcText));
-
+      print2.format(tags.Filespec, xml_escape(filespec.srcText));
       ptr = filespec.end;
     }
     else
       ptr = num.end;
     // Print remaining whitespace
     printWS(ptr, token.end);
-    print(tags[DP.HLineEnd]);
+    // Finally print the whole token.
+    print.format(formatStr, cast(char[])buffer.slice());
+    break;
+  case TOK.Illegal:
+    print.format(tags.Illegal, token.srcText());
+    break;
+  case TOK.Newline:
+    print.format(tags.Newline, token.srcText());
+    break;
+  case TOK.EOF:
+    print(tags.EOF);
     break;
   default:
     if (token.isKeyword())
-      print.format(tags[DP.Keyword], srcText);
+      print.format(tags.Keyword, token.srcText);
     else if (token.isSpecialToken)
-      print.format(tags[DP.SpecialToken], srcText);
+      print.format(tags.SpecialToken, token.srcText);
     else
-      print(srcText);
+      print(tags[token.srcText]);
   }
 }
--- a/trunk/src/config.d	Thu Feb 14 03:31:19 2008 +0100
+++ b/trunk/src/config.d	Fri Feb 15 02:07:53 2008 +0100
@@ -12,6 +12,9 @@
 /// DDoc macro file paths.
 var ddoc_files = []; /// E.g.: ["src/mymacros.ddoc", "othermacros.ddoc"]
 
+var xml_map = "xml_map.d";
+var html_map = "html_map.d";
+
 /// Customizable formats for error messages.
 ///
 /// <ul>
--- a/trunk/src/dil/Information.d	Thu Feb 14 03:31:19 2008 +0100
+++ b/trunk/src/dil/Information.d	Fri Feb 15 02:07:53 2008 +0100
@@ -18,6 +18,11 @@
 {
   Information[] info;
 
+  bool hasInfo()
+  {
+    return info.length != 0;
+  }
+
   void opCatAssign(Information info)
   {
     this.info ~= info;
--- a/trunk/src/dil/Settings.d	Thu Feb 14 03:31:19 2008 +0100
+++ b/trunk/src/dil/Settings.d	Fri Feb 15 02:07:53 2008 +0100
@@ -18,6 +18,8 @@
   string[] importPaths;
   /// Array of DDoc macro file paths.
   string[] ddocFilePaths;
+  string xmlMapFile = "xml_map.d";
+  string htmlMapFile = "html_map.d";
   string lexerErrorFormat = "{0}({1},{2})L: {3}";
   string parserErrorFormat = "{0}({1},{2})P: {3}";
   string semanticErrorFormat = "{0}({1},{2})S: {3}";
--- a/trunk/src/dil/SettingsLoader.d	Thu Feb 14 03:31:19 2008 +0100
+++ b/trunk/src/dil/SettingsLoader.d	Fri Feb 15 02:07:53 2008 +0100
@@ -16,16 +16,19 @@
 
 import tango.io.FilePath;
 
-struct SettingsLoader
+class SettingsLoader
 {
   InfoManager infoMan;
   Module mod; /// Current module.
 
+  this(InfoManager infoMan)
+  {
+    this.infoMan = infoMan;
+  }
+
   static SettingsLoader opCall(InfoManager infoMan)
   {
-    SettingsLoader sl;
-    sl.infoMan = infoMan;
-    return sl;
+    return new SettingsLoader(infoMan);
   }
 
   void error(Token* token, char[] formatMsg, ...)
@@ -79,7 +82,6 @@
 
     if (auto val = getValue!(StringExpression)("langfile"))
       GlobalSettings.langFile = val.getString();
-
     if (auto array = getValue!(ArrayInitExpression)("import_paths"))
       foreach (value; array.values)
         if (auto str = castTo!(StringExpression)(value))
@@ -88,6 +90,10 @@
       foreach (value; array.values)
         if (auto str = castTo!(StringExpression)(value))
           GlobalSettings.ddocFilePaths ~= resolvePath(execPath, str.getString());
+    if (auto val = getValue!(StringExpression)("xml_map"))
+      GlobalSettings.xmlMapFile = val.getString();
+    if (auto val = getValue!(StringExpression)("html_map"))
+      GlobalSettings.htmlMapFile = val.getString();
     if (auto val = getValue!(StringExpression)("lexer_error"))
       GlobalSettings.lexerErrorFormat = val.getString();
     if (auto val = getValue!(StringExpression)("parser_error"))
@@ -124,6 +130,43 @@
   }
 }
 
+class TagMapLoader : SettingsLoader
+{
+  this(InfoManager infoMan)
+  {
+    super(infoMan);
+  }
+
+  static TagMapLoader opCall(InfoManager infoMan)
+  {
+    return new TagMapLoader(infoMan);
+  }
+
+  string[string] load(string filePath)
+  {
+    mod = new Module(filePath, infoMan);
+    mod.parse();
+    if (mod.hasErrors)
+      return null;
+
+    auto pass1 = new SemanticPass1(mod);
+    pass1.start();
+
+    string[string] map;
+    if (auto array = getValue!(ArrayInitExpression)("map"))
+      foreach (i, value; array.values)
+      {
+        auto key = array.keys[i];
+        if (auto valExp = castTo!(StringExpression)(value))
+          if (!key)
+            error(value.begin, "expected key : value");
+          else if (auto keyExp = castTo!(StringExpression)(key))
+            map[keyExp.getString()] = valExp.getString();
+      }
+    return map;
+  }
+}
+
 string resolvePath(FilePath execPath, string filePath)
 {
   if ((new FilePath(filePath)).isAbsolute())
--- a/trunk/src/dil/ast/Expressions.d	Thu Feb 14 03:31:19 2008 +0100
+++ b/trunk/src/dil/ast/Expressions.d	Fri Feb 15 02:07:53 2008 +0100
@@ -777,10 +777,11 @@
     this(cast(ubyte[])str, Types.Dchar);
   }
 
+  /// Returns the string excluding the terminating 0.
   char[] getString()
   {
     // TODO: convert to char[] if charType !is Types.Char.
-    return cast(char[])str;
+    return cast(char[])str[0..$-1];
   }
 }
 
--- a/trunk/src/dil/parser/Parser.d	Thu Feb 14 03:31:19 2008 +0100
+++ b/trunk/src/dil/parser/Parser.d	Fri Feb 15 02:07:53 2008 +0100
@@ -3214,6 +3214,7 @@
             postfix = token.pf;
         else if (token.pf && token.pf != postfix)
           error(token, MSG.StringPostfixMismatch);
+        str.length = str.length - 1;
         str ~= token.str;
         nT();
       }
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trunk/src/html_map.d	Fri Feb 15 02:07:53 2008 +0100
@@ -0,0 +1,108 @@
+/// A map of document elements and D tokens to format strings.
+string[string] map = [
+  "DocHead" : `<html>`\n
+              `<head>`\n
+              `  <meta http-equiv="Content-Type" content="text/html; charset=utf-8">`\n
+              `  <link href="html.css" rel="stylesheet" type="text/css">`\n
+              `</head>`\n
+              `<body>`\n,
+  "DocEnd"  : "\n</body>"
+              "\n</html>",
+  "SourceBegin" : `<pre class="sourcecode">`,
+  "SourceEnd"   : "\n</pre>",
+  "CompBegin"   : `<div class="compilerinfo">`\n,
+  "CompEnd"     : "</div>\n",
+  "LexerError"  : `<p class="error L">{0}({1},{2})L: {3}</p>`\n,
+  "ParserError" : `<p class="error P">{0}({1},{2})P: {3}</p>`\n,
+
+  // Node categories:
+  "Declaration" : "d",
+  "Statement"   : "s",
+  "Expression"  : "e",
+  "Type"        : "t",
+  "Other"       : "o",
+
+  // {0} = node category.
+  // {1} = node class name: "Call", "If", "Class" etc.
+  // E.g.: <span class="d Struct">...</d>
+  "NodeBegin" : `<span class="{0} {1}">`,
+  "NodeEnd"   : `</span>`,
+
+  "Identifier" : `<span class="i">{0}</span>`,
+  "String"     : `<span class="sl">{0}</span>`,
+  "Char"       : `<span class="cl">{0}</span>`,
+  "Number"     : `<span class="n">{0}</span>`,
+  "Keyword"    : `<span class="k">{0}</span>`,
+
+  "LineC"   : `<span class="lc">{0}</span>`,
+  "BlockC"  : `<span class="bc">{0}</span>`,
+  "NestedC" : `<span class="nc">{0}</span>`,
+
+  "Shebang"  : `<span class="shebang">{0}</span>`,
+  "HLine"    : `<span class="hl">{0}</span>`, // #line
+  "Filespec" : `<span class="fs">{0}</span>`, // #line N "filespec"
+  "Newline"  : "{0}", // \n | \r | \r\n | LS | PS
+  "Illegal"  : `<span class="ill">{0}</span>`, // A character not recognized by the lexer.
+
+  "SpecialToken" : `<span class="st">{0}</span>`, // __FILE__, __LINE__ etc.
+
+  "("    : "<br>(</br>",
+  ")"    : "<br>)</br>",
+  "["    : "<br>[</br>",
+  "]"    : "<br>]</br>",
+  "{"    : "<br>{</br>",
+  "}"    : "<br>}</br>",
+  "."    : ".",
+  ".."   : "..",
+  "..."  : "...",
+  "!<>=" : "!&lt;&gt;=", // Unordered
+  "!<>"  : "!&lt;&gt;",  // UorE
+  "!<="  : "!&lt;=",     // UorG
+  "!<"   : "!&lt;",      // UorGorE
+  "!>="  : "!&gt;=",     // UorL
+  "!>"   : "!&gt;",      // UorLorE
+  "<>="  : "&lt;&gt;=",  // LorEorG
+  "<>"   : "&lt;&gt;",   // LorG
+  "="    : "=",
+  "=="   : "==",
+  "!"    : "!",
+  "!="   : "!=",
+  "<="   : "&lt;=",
+  "<"    : "&lt;",
+  ">="   : "&gt;=",
+  ">"    : "&gt;",
+  "<<="  : "&lt;&lt;=",
+  "<<"   : "&lt;&lt;",
+  ">>="  : "&gt;&gt;=",
+  ">>"   : "&gt;&gt;",
+  ">>>=" : "&gt;&gt;&gt;=",
+  ">>>"  : "&gt;&gt;&gt;",
+  "|"    : "|",
+  "||"   : "||",
+  "|="   : "|=",
+  "&"    : "&amp;",
+  "&&"   : "&amp;&amp;",
+  "&="   : "&amp;=",
+  "+"    : "+",
+  "++"   : "++",
+  "+="   : "+=",
+  "-"    : "-",
+  "--"   : "--",
+  "-="   : "-=",
+  "/"    : "/",
+  "/="   : "/=",
+  "*"    : "*",
+  "*="   : "*=",
+  "%"    : "%",
+  "%="   : "%=",
+  "^"    : "^",
+  "^="   : "^=",
+  "~"    : "~",
+  "~="   : "~=",
+  ":"    : ":",
+  ";"    : ";",
+  "?"    : "?",
+  ","    : ",",
+  "$"    : "$",
+  "EOF"  : ""
+];
--- a/trunk/src/main.d	Thu Feb 14 03:31:19 2008 +0100
+++ b/trunk/src/main.d	Fri Feb 15 02:07:53 2008 +0100
@@ -43,7 +43,7 @@
 {
   auto infoMan = new InfoManager();
   SettingsLoader(infoMan).load();
-  if (infoMan.info.length)
+  if (infoMan.hasInfo)
     return printErrors(infoMan);
 
   if (args.length <= 1)
@@ -114,8 +114,8 @@
     infoMan = new InfoManager();
     // Execute command.
     cmd.DDoc.execute(filePaths, destination, macroPaths, incUndoc, verbose, infoMan);
-    if (infoMan.info.length)
-      return printErrors(infoMan);
+    if (infoMan.hasInfo)
+      printErrors(infoMan);
     break;
   case "gen", "generate":
     char[] fileName;
@@ -136,7 +136,9 @@
     }
     if (!(options & (DocOption.XML | DocOption.HTML)))
       options |= DocOption.XML; // Default to XML.
-    cmd.Generate.execute(fileName, options);
+    cmd.Generate.execute(fileName, options, infoMan);
+    if (infoMan.hasInfo)
+      printErrors(infoMan);
     break;
   case "importgraph", "igraph":
     string filePath;
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/trunk/src/xml_map.d	Fri Feb 15 02:07:53 2008 +0100
@@ -0,0 +1,104 @@
+/// A map of document elements and D tokens to format strings.
+string[string] map = [
+  "DocHead" : `<?xml version="1.0"?>`\n
+              `<?xml-stylesheet href="xml.css" type="text/css"?>`\n
+              "<root>\n",
+  "DocEnd"  : "\n</root>",
+  "SourceBegin" : "<sourcecode>",
+  "SourceEnd"   : "\n</sourcecode>",
+  "CompBegin"   : "<compiler>\n",
+  "CompEnd"     : "</compiler>\n",
+  "LexerError"  : `<error t="L">{0}({1},{2})L: {3}</error>`\n,
+  "ParserError" : `<error t="P">{0}({1},{2})P: {3}</error>`\n,
+
+  // Node categories:
+  "Declaration" : "d",
+  "Statement"   : "s",
+  "Expression"  : "e",
+  "Type"        : "t",
+  "Other"       : "o",
+
+  // {0} = node category.
+  // {1} = node class name: "Call", "If", "Class" etc.
+  // E.g.: <d t="Struct">...</d>
+  "NodeBegin" : `<{0} t="{1}">`,
+  "NodeEnd"   : `</{0}>`,
+
+  "Identifier" : "<i>{0}</i>",
+  "String"     : "<sl>{0}</sl>",
+  "Char"       : "<cl>{0}</cl>",
+  "Number"     : "<n>{0}</n>",
+  "Keyword"    : "<k>{0}</k>",
+
+  "LineC"   : "<lc>{0}</lc>",
+  "BlockC"  : "<bc>{0}</bc>",
+  "NestedC" : "<nc>{0}</nc>",
+
+  "Shebang"  : "<shebang>{0}</shebang>",
+  "HLine"    : "<hl>{0}</hl>", // #line
+  "Filespec" : "<fs>{0}</fs>", // #line N "filespec"
+  "Newline"  : "{0}", // \n | \r | \r\n | LS | PS
+  "Illegal"  : "<ill>{0}</ill>", // A character not recognized by the lexer.
+
+  "SpecialToken" : "<st>{0}</st>", // __FILE__, __LINE__ etc.
+
+  "("    : "<br>(</br>",
+  ")"    : "<br>)</br>",
+  "["    : "<br>[</br>",
+  "]"    : "<br>]</br>",
+  "{"    : "<br>{</br>",
+  "}"    : "<br>}</br>",
+  "."    : ".",
+  ".."   : "..",
+  "..."  : "...",
+  "!<>=" : "!&lt;&gt;=", // Unordered
+  "!<>"  : "!&lt;&gt;",  // UorE
+  "!<="  : "!&lt;=",     // UorG
+  "!<"   : "!&lt;",      // UorGorE
+  "!>="  : "!&gt;=",     // UorL
+  "!>"   : "!&gt;",      // UorLorE
+  "<>="  : "&lt;&gt;=",  // LorEorG
+  "<>"   : "&lt;&gt;",   // LorG
+  "="    : "=",
+  "=="   : "==",
+  "!"    : "!",
+  "!="   : "!=",
+  "<="   : "&lt;=",
+  "<"    : "&lt;",
+  ">="   : "&gt;=",
+  ">"    : "&gt;",
+  "<<="  : "&lt;&lt;=",
+  "<<"   : "&lt;&lt;",
+  ">>="  : "&gt;&gt;=",
+  ">>"   : "&gt;&gt;",
+  ">>>=" : "&gt;&gt;&gt;=",
+  ">>>"  : "&gt;&gt;&gt;",
+  "|"    : "|",
+  "||"   : "||",
+  "|="   : "|=",
+  "&"    : "&amp;",
+  "&&"   : "&amp;&amp;",
+  "&="   : "&amp;=",
+  "+"    : "+",
+  "++"   : "++",
+  "+="   : "+=",
+  "-"    : "-",
+  "--"   : "--",
+  "-="   : "-=",
+  "/"    : "/",
+  "/="   : "/=",
+  "*"    : "*",
+  "*="   : "*=",
+  "%"    : "%",
+  "%="   : "%=",
+  "^"    : "^",
+  "^="   : "^=",
+  "~"    : "~",
+  "~="   : "~=",
+  ":"    : ":",
+  ";"    : ";",
+  "?"    : "?",
+  ","    : ",",
+  "$"    : "$",
+  "EOF"  : ""
+];