Mercurial > projects > dil
comparison trunk/src/main.d @ 487:bccca748d745
Added 'tokenize' command.
author | Aziz K?ksal <aziz.koeksal@gmail.com> |
---|---|
date | Sat, 01 Dec 2007 20:20:44 +0100 |
parents | ea8c7459f1c4 |
children | 6160ab7b1816 |
comparison
equal
deleted
inserted
replaced
486:bd176bc73e43 | 487:bccca748d745 |
---|---|
91 cmd.ImportGraph.execute(filePath, includePaths, regexps, levels, options); | 91 cmd.ImportGraph.execute(filePath, includePaths, regexps, levels, options); |
92 break; | 92 break; |
93 case "stats", "statistics": | 93 case "stats", "statistics": |
94 cmd.Statistics.execute(args[2..$]); | 94 cmd.Statistics.execute(args[2..$]); |
95 break; | 95 break; |
96 case "tok", "tokenize": | |
97 char[] filePath; | |
98 char[] sourceText; | |
99 char[] separator; | |
100 bool ignoreWSToks; | |
101 bool printWS; | |
102 | |
103 foreach (arg; args[2..$]) | |
104 { | |
105 if (strbeg(arg, "-t")) | |
106 sourceText = arg[2..$]; | |
107 else if (strbeg(arg, "-s")) | |
108 separator = arg[2..$]; | |
109 else if (arg == "-i") | |
110 ignoreWSToks = true; | |
111 else if (arg == "-ws") | |
112 printWS = true; | |
113 else | |
114 filePath = arg; | |
115 } | |
116 | |
117 separator || (separator = "\n"); | |
118 sourceText || (sourceText = loadFile(filePath)); | |
119 | |
120 auto lx = new Lexer(sourceText, null); | |
121 lx.scanAll(); | |
122 auto token = lx.firstToken(); | |
123 | |
124 for (; token.type != TOK.EOF; token = token.next) | |
125 { | |
126 if (token.type == TOK.Newline || ignoreWSToks && token.isWhitespace) | |
127 continue; | |
128 if (printWS && token.ws) | |
129 Stdout(token.wsChars); | |
130 Stdout(token.srcText)(separator); | |
131 } | |
132 break; | |
96 case "parse": | 133 case "parse": |
97 if (args.length == 3) | 134 if (args.length == 3) |
98 parse(args[2]); | 135 parse(args[2]); |
99 break; | 136 break; |
100 case "?", "help": | 137 case "?", "help": |
109 | 146 |
110 const char[] COMMANDS = | 147 const char[] COMMANDS = |
111 " generate (gen)\n" | 148 " generate (gen)\n" |
112 " help (?)\n" | 149 " help (?)\n" |
113 " importgraph (igraph)\n" | 150 " importgraph (igraph)\n" |
114 " statistics (stats)\n"; | 151 " statistics (stats)\n" |
152 " tokenize (tok)\n"; | |
115 | 153 |
116 bool strbeg(char[] str, char[] begin) | 154 bool strbeg(char[] str, char[] begin) |
117 { | 155 { |
118 if (str.length >= begin.length) | 156 if (str.length >= begin.length) |
119 { | 157 { |
136 case "gen", "generate": | 174 case "gen", "generate": |
137 msg = GetMsg(MID.HelpGenerate); | 175 msg = GetMsg(MID.HelpGenerate); |
138 break; | 176 break; |
139 case "importgraph", "igraph": | 177 case "importgraph", "igraph": |
140 msg = GetMsg(MID.HelpImportGraph); | 178 msg = GetMsg(MID.HelpImportGraph); |
179 break; | |
180 case "tok", "tokenize": | |
181 msg = `Print the tokens of a D source file. | |
182 Usage: | |
183 dil tok file.d [Options] | |
184 | |
185 Options: | |
186 -tTEXT : tokenize TEXT instead of a file. | |
187 -sSEPARATOR : print SEPARATOR instead of newline between tokens. | |
188 -i : ignore whitespace tokens (e.g. comments, shebang etc.) | |
189 -ws : print a token's preceding whitespace characters. | |
190 | |
191 Example: | |
192 dil tok -t"module foo; void func(){}" | |
193 dil tok main.d | grep ^[0-9]`; | |
141 break; | 194 break; |
142 default: | 195 default: |
143 msg = helpMain(); | 196 msg = helpMain(); |
144 } | 197 } |
145 Stdout(msg).newline; | 198 Stdout(msg).newline; |