changeset 756:804111ec8213

Improved command 'tokenize'.
author Aziz K?ksal <aziz.koeksal@gmail.com>
date Wed, 13 Feb 2008 21:46:24 +0100
parents 90668b83ae5e
children e4b60543c5e8
files trunk/src/main.d
diffstat 1 files changed, 25 insertions(+), 8 deletions(-) [+]
line wrap: on
line diff
--- a/trunk/src/main.d	Wed Feb 13 20:21:25 2008 +0100
+++ b/trunk/src/main.d	Wed Feb 13 21:46:24 2008 +0100
@@ -33,6 +33,7 @@
 import common;
 
 import Integer = tango.text.convert.Integer;
+import tango.stdc.stdio;
 import tango.io.File;
 import tango.text.Util;
 import tango.time.StopWatch;
@@ -195,18 +196,18 @@
     cmd.Statistics.execute(filePaths, printTokensTable, printNodesTable);
     break;
   case "tok", "tokenize":
+    SourceText sourceText;
     char[] filePath;
-    SourceText sourceText;
     char[] separator;
     bool ignoreWSToks;
     bool printWS;
 
     foreach (arg; args[2..$])
     {
-      if (strbeg(arg, "-t"))
-        sourceText = new SourceText("-t", arg[2..$]);
-      else if (strbeg(arg, "-s"))
+      if (strbeg(arg, "-s"))
         separator = arg[2..$];
+      else if (arg == "-")
+        sourceText = new SourceText("stdin", readStdin());
       else if (arg == "-i")
         ignoreWSToks = true;
       else if (arg == "-ws")
@@ -216,10 +217,11 @@
     }
 
     separator || (separator = "\n");
-    if (sourceText)
+    if (!sourceText)
       sourceText = new SourceText(filePath, true);
 
-    auto lx = new Lexer(sourceText, null);
+    auto infoMan = new InfoManager();
+    auto lx = new Lexer(sourceText, infoMan);
     lx.scanAll();
     auto token = lx.firstToken();
 
@@ -231,6 +233,8 @@
         Stdout(token.wsChars);
       Stdout(token.srcText)(separator);
     }
+
+    printErrors(infoMan);
     break;
   case "trans", "translate":
     if (args.length < 3)
@@ -282,6 +286,19 @@
   }
 }
 
+char[] readStdin()
+{
+  char[] text;
+  while (1)
+  {
+    auto c = getc(stdin);
+    if (c == EOF)
+      break;
+    text ~= c;
+  }
+  return text;
+}
+
 const char[] COMMANDS =
   "  compile (c)\n"
   "  ddoc (d)\n"
@@ -405,13 +422,13 @@
   dil tok file.d [Options]
 
 Options:
-  -tTEXT          : tokenize TEXT instead of a file.
+  -               : reads text from the standard input.
   -sSEPARATOR     : print SEPARATOR instead of newline between tokens.
   -i              : ignore whitespace tokens (e.g. comments, shebang etc.)
   -ws             : print a token's preceding whitespace characters.
 
 Example:
-  dil tok -t"module foo; void func(){}"
+  echo "module foo; void func(){}" | dil tok -
   dil tok main.d | grep ^[0-9]`;
     break;
   case "stats", "statistics":