diff trunk/src/cmd/Generate.d @ 485:ea8c7459f1c4

Changed a lot of things in the Lexer. Newlines are tokenized now, instead of being treated as whitespace. Newline tokens store location info as well, which make quite a few functions unnecessary. Added a static method getLocation() which returns a Location instance for any given token. This will also be very useful for finding the location of AST nodes (through Node.begin,) which is needed for reporting parser and semantic errors and emitting documentation. Removed rescanNewlines(), LocState, getState(), restoreState(), evaluateHashLine() and updateErrorLoc(). Added isUnicodeNewlineChar(), isUnicodeNewline(), isNewline(), isNewlineEnd(), isEndOfLine(), scanNewline(), getLocation() and error_(). Replaced some clunky expressions with isascii(), isNewlineEnd(), isEndOfLine(), isUnicodeNewline(), isUnicodeNewlineChar(). Fix in scanNormalStringLiteral(): scanPostfix() must be before label Lreturn. Fixed Lexer unittest. Fix in parseDeclarationDefinitionsBlock(): 'token' should be 'begin'. Added method isMultiline() to Token and added documentation comments.: File name too long abort: file /home/aziz/dil/trunk/Changed a lot of things in the Lexer. Newlines are tokenized now, instead of being treated as whitespace. Newline tokens store location info as well, which make quite a few functions unnecessary. Added a static method getLocation() which returns a Location instance for any given token. This will also be very useful for finding the location of AST nodes (through Node.begin,) which is needed for reporting parser and semantic errors and emitting documentation. Removed rescanNewlines(), LocState, getState(), restoreState(), evaluateHashLine() and updateErrorLoc(). Added isUnicodeNewlineChar(), isUnicodeNewline(), isNewline(), isNewlineEnd(), isEndOfLine(), scanNewline(), getLocation() and error_(). Replaced some clunky expressions with isascii(), isNewlineEnd(), isEndOfLine(), isUnicodeNewline(), isUnicodeNewlineChar(). Fix in scanNormalStringLiteral(): scanPostfix() must be before label Lreturn. Fixed Lexer unittest. Fix in parseDeclarationDefinitionsBlock(): 'token' should be 'begin'. Added method isMultiline() to Token and added documentation comments.
author Aziz K?ksal <aziz.koeksal@gmail.com>
date Fri, 30 Nov 2007 20:17:29 +0100
parents ad7977fe315a
children 996041463028
line wrap: on
line diff
--- a/trunk/src/cmd/Generate.d	Mon Nov 26 20:18:54 2007 +0100
+++ b/trunk/src/cmd/Generate.d	Fri Nov 30 20:17:29 2007 +0100
@@ -78,7 +78,11 @@
   switch (n.category)
   {
   alias NodeCategory NC;
-  case NC.Declaration: remove = "Declaration"; break;
+  case NC.Declaration:
+    if (n.kind == NodeKind.Declarations)
+      return name;
+    remove = "Declaration";
+    break;
   case NC.Statement:
     if (n.kind == NodeKind.Statements)
       return name;
@@ -504,7 +508,7 @@
         print(start[0 .. end - start]);
     }
     print(tags[DP.HLineBegin]);
-    auto num = token.line_num;
+    auto num = token.tokLineNum;
     if (num is null)
     {
       print(token.srcText);
@@ -515,9 +519,9 @@
     auto ptr = token.start;
     printWS(ptr, num.start); // prints "#line" as well
     printToken(num, tags, print);
-    if (token.line_filespec)
+    if (token.tokLineFilespec)
     {
-      auto filespec = token.line_filespec;
+      auto filespec = token.tokLineFilespec;
       // Print whitespace between number and filespec
       printWS(num.end, filespec.start);
       print.format(tags[DP.Filespec], xml_escape(filespec.srcText));