comparison src/cmd/Generate.d @ 806:bcb74c9b895c

Moved out files in the trunk folder to the root.
author Aziz K?ksal <aziz.koeksal@gmail.com>
date Sun, 09 Mar 2008 00:12:19 +0100
parents trunk/src/cmd/Generate.d@dcd30b0ba711
children
comparison
equal deleted inserted replaced
805:a3fab8b74a7d 806:bcb74c9b895c
1 /++
2 Author: Aziz Köksal
3 License: GPL3
4 +/
5 module cmd.Generate;
6
7 import dil.ast.DefaultVisitor;
8 import dil.ast.Node,
9 dil.ast.Declaration,
10 dil.ast.Statement,
11 dil.ast.Expression,
12 dil.ast.Types;
13 import dil.lexer.Lexer;
14 import dil.parser.Parser;
15 import dil.semantic.Module;
16 import dil.SourceText;
17 import dil.Information;
18 import SettingsLoader;
19 import Settings;
20 import common;
21
22 import tango.io.GrowBuffer;
23 import tango.io.Print;
24
25 /// Options for the generate command.
26 enum GenOption
27 {
28 Empty,
29 Tokens = 1,
30 Syntax = 1<<1,
31 HTML = 1<<2,
32 XML = 1<<3,
33 PrintLines = 1<<4
34 }
35
36 /// Executes the generate command.
37 void execute(string filePath, GenOption options, InfoManager infoMan)
38 {
39 assert(options != GenOption.Empty);
40 auto mapFilePath = options & GenOption.HTML ? GlobalSettings.htmlMapFile
41 : GlobalSettings.xmlMapFile;
42 auto map = TagMapLoader(infoMan).load(mapFilePath);
43 auto tags = new TagMap(map);
44
45 if (infoMan.hasInfo)
46 return;
47
48 if (options & GenOption.Syntax)
49 highlightSyntax(filePath, tags, Stdout, options);
50 else
51 highlightTokens(filePath, tags, Stdout, options);
52 }
53
54 /// Escapes the characters '<', '>' and '&' with named character entities.
55 char[] xml_escape(char[] text)
56 {
57 char[] result;
58 foreach(c; text)
59 switch(c)
60 {
61 case '<': result ~= "&lt;"; break;
62 case '>': result ~= "&gt;"; break;
63 case '&': result ~= "&amp;"; break;
64 default: result ~= c;
65 }
66 if (result.length != text.length)
67 return result;
68 // Nothing escaped. Return original text.
69 delete result;
70 return text;
71 }
72
73 /// Maps tokens to (format) strings.
74 class TagMap
75 {
76 string[string] table;
77 string[TOK.MAX] tokenTable;
78
79 this(string[string] table)
80 {
81 this.table = table;
82 Identifier = this["Identifier", "{0}"];
83 String = this["String", "{0}"];
84 Char = this["Char", "{0}"];
85 Number = this["Number", "{0}"];
86 Keyword = this["Keyword", "{0}"];
87 LineC = this["LineC", "{0}"];
88 BlockC = this["BlockC", "{0}"];
89 NestedC = this["NestedC", "{0}"];
90 Shebang = this["Shebang", "{0}"];
91 HLine = this["HLine", "{0}"];
92 Filespec = this["Filespec", "{0}"];
93 Illegal = this["Illegal", "{0}"];
94 Newline = this["Newline", "{0}"];
95 SpecialToken = this["SpecialToken", "{0}"];
96 Declaration = this["Declaration", "d"];
97 Statement = this["Statement", "s"];
98 Expression = this["Expression", "e"];
99 Type = this["Type", "t"];
100 Other = this["Other", "o"];
101 EOF = this["EOF", ""];
102
103 foreach (i, tokStr; tokToString)
104 if (auto pStr = tokStr in this.table)
105 tokenTable[i] = *pStr;
106 }
107
108 /// Returns the value for str, or 'fallback' if str is not in the table.
109 string opIndex(string str, string fallback = "")
110 {
111 auto p = str in table;
112 if (p)
113 return *p;
114 return fallback;
115 }
116
117 /// Returns the value for tok in O(1) time.
118 string opIndex(TOK tok)
119 {
120 return tokenTable[tok];
121 }
122
123 /// Shortcuts for quick access.
124 string Identifier, String, Char, Number, Keyword, LineC, BlockC,
125 NestedC, Shebang, HLine, Filespec, Illegal, Newline, SpecialToken,
126 Declaration, Statement, Expression, Type, Other, EOF;
127
128 /// Returns the tag for the category 'nc'.
129 string getTag(NodeCategory nc)
130 {
131 string tag;
132 switch (nc)
133 { alias NodeCategory NC;
134 case NC.Declaration: tag = Declaration; break;
135 case NC.Statement: tag = Statement; break;
136 case NC.Expression: tag = Expression; break;
137 case NC.Type: tag = Type; break;
138 case NC.Other: tag = Other; break;
139 default: assert(0);
140 }
141 return tag;
142 }
143 }
144
145 /// Find the last occurrence of object in subject.
146 /// Returns: the index if found, or -1 if not.
147 int rfind(char[] subject, char object)
148 {
149 foreach_reverse(i, c; subject)
150 if (c == object)
151 return i;
152 return -1;
153 }
154
155 /// Returns the short class name of a class descending from Node.$(BR)
156 /// E.g.: dil.ast.Declarations.ClassDeclaration -> Class
157 char[] getShortClassName(Node node)
158 {
159 static char[][] name_table;
160 if (name_table is null)
161 name_table = new char[][NodeKind.max+1]; // Create a new table.
162 // Look up in table.
163 char[] name = name_table[node.kind];
164 if (name !is null)
165 return name; // Return cached name.
166
167 name = node.classinfo.name; // Get the fully qualified name of the class.
168 name = name[rfind(name, '.')+1 .. $]; // Remove package and module name.
169
170 uint suffixLength;
171 switch (node.category)
172 {
173 alias NodeCategory NC;
174 case NC.Declaration:
175 suffixLength = "Declaration".length;
176 break;
177 case NC.Statement:
178 suffixLength = "Statement".length;
179 break;
180 case NC.Expression:
181 suffixLength = "Expression".length;
182 break;
183 case NC.Type:
184 suffixLength = "Type".length;
185 break;
186 case NC.Other:
187 break;
188 default:
189 assert(0);
190 }
191 // Remove common suffix.
192 name = name[0 .. $ - suffixLength];
193 // Store the name in the table.
194 name_table[node.kind] = name;
195 return name;
196 }
197
198 /// Extended token structure.
199 struct TokenEx
200 {
201 Token* token; /// The lexer token.
202 Node[] beginNodes; /// beginNodes[n].begin == token
203 Node[] endNodes; /// endNodes[n].end == token
204 }
205
206 /// Builds an array of TokenEx items.
207 class TokenExBuilder : DefaultVisitor
208 {
209 private TokenEx*[Token*] tokenTable;
210
211 TokenEx[] build(Node root, Token* first)
212 {
213 auto token = first;
214
215 uint count; // Count tokens.
216 for (; token; token = token.next)
217 count++;
218 // Creat the exact number of TokenEx instances.
219 auto toks = new TokenEx[count];
220 token = first;
221 foreach (ref tokEx; toks)
222 {
223 tokEx.token = token;
224 if (!token.isWhitespace)
225 tokenTable[token] = &tokEx;
226 token = token.next;
227 }
228
229 super.visitN(root);
230 tokenTable = null;
231 return toks;
232 }
233
234 TokenEx* getTokenEx()(Token* t)
235 {
236 auto p = t in tokenTable;
237 assert(p, t.srcText~" is not in tokenTable");
238 return *p;
239 }
240
241 // Override dispatch function.
242 override Node dispatch(Node n)
243 {
244 auto begin = n.begin;
245 if (begin)
246 { assert(n.end);
247 auto txbegin = getTokenEx(begin);
248 auto txend = getTokenEx(n.end);
249 txbegin.beginNodes ~= n;
250 txend.endNodes ~= n;
251 }
252 return super.dispatch(n);
253 }
254 }
255
256 void printErrors(Lexer lx, TagMap tags, Print!(char) print)
257 {
258 foreach (e; lx.errors)
259 print.format(tags["LexerError"], e.filePath, e.loc, e.col, xml_escape(e.getMsg));
260 }
261
262 void printErrors(Parser parser, TagMap tags, Print!(char) print)
263 {
264 foreach (e; parser.errors)
265 print.format(tags["ParserError"], e.filePath, e.loc, e.col, xml_escape(e.getMsg));
266 }
267
268 void printLines(uint lines, TagMap tags, Print!(char) print)
269 {
270 auto lineNumberFormat = tags["LineNumber"];
271 for (auto lineNum = 1; lineNum <= lines; lineNum++)
272 print.format(lineNumberFormat, lineNum);
273 }
274
275 // void printMultiline(Token* token, TagMap tags, Print!(char) print)
276 // {
277 // }
278
279 /// Highlights the syntax in a source file.
280 void highlightSyntax(string filePath, TagMap tags, Print!(char) print, GenOption options)
281 {
282 auto parser = new Parser(new SourceText(filePath, true));
283 auto root = parser.start();
284 auto lx = parser.lexer;
285
286 auto builder = new TokenExBuilder();
287 auto tokenExList = builder.build(root, lx.firstToken());
288
289 print(tags["DocHead"]);
290 if (lx.errors.length || parser.errors.length)
291 { // Output error messages.
292 print(tags["CompBegin"]);
293 printErrors(lx, tags, print);
294 printErrors(parser, tags, print);
295 print(tags["CompEnd"]);
296 }
297
298 if (options & GenOption.PrintLines)
299 {
300 print(tags["LineNumberBegin"]);
301 printLines(lx.lineNum, tags, print);
302 print(tags["LineNumberEnd"]);
303 }
304
305 print(tags["SourceBegin"]);
306
307 auto tagNodeBegin = tags["NodeBegin"];
308 auto tagNodeEnd = tags["NodeEnd"];
309
310 // Iterate over list of tokens.
311 foreach (ref tokenEx; tokenExList)
312 {
313 auto token = tokenEx.token;
314
315 token.ws && print(token.wsChars); // Print preceding whitespace.
316 if (token.isWhitespace) {
317 printToken(token, tags, print);
318 continue;
319 }
320 // <node>
321 foreach (node; tokenEx.beginNodes)
322 print.format(tagNodeBegin, tags.getTag(node.category), getShortClassName(node));
323 // Token text.
324 printToken(token, tags, print);
325 // </node>
326 if (options & GenOption.HTML)
327 foreach_reverse (node; tokenEx.endNodes)
328 print(tagNodeEnd);
329 else
330 foreach_reverse (node; tokenEx.endNodes)
331 print.format(tagNodeEnd, tags.getTag(node.category));
332 }
333 print(tags["SourceEnd"]);
334 print(tags["DocEnd"]);
335 }
336
337 /// Highlights all tokens of a source file.
338 void highlightTokens(string filePath, TagMap tags, Print!(char) print, GenOption options)
339 {
340 auto lx = new Lexer(new SourceText(filePath, true));
341 lx.scanAll();
342
343 print(tags["DocHead"]);
344 if (lx.errors.length)
345 {
346 print(tags["CompBegin"]);
347 printErrors(lx, tags, print);
348 print(tags["CompEnd"]);
349 }
350
351 if (options & GenOption.PrintLines)
352 {
353 print(tags["LineNumberBegin"]);
354 printLines(lx.lineNum, tags, print);
355 print(tags["LineNumberEnd"]);
356 }
357
358 print(tags["SourceBegin"]);
359 // Traverse linked list and print tokens.
360 for (auto token = lx.firstToken(); token; token = token.next) {
361 token.ws && print(token.wsChars); // Print preceding whitespace.
362 printToken(token, tags, print);
363 }
364 print(tags["SourceEnd"]);
365 print(tags["DocEnd"]);
366 }
367
368 /// A token highlighter designed for DDoc.
369 class TokenHighlighter
370 {
371 TagMap tags;
372 this(InfoManager infoMan, bool useHTML = true)
373 {
374 string filePath = GlobalSettings.htmlMapFile;
375 if (!useHTML)
376 filePath = GlobalSettings.xmlMapFile;
377 auto map = TagMapLoader(infoMan).load(filePath);
378 tags = new TagMap(map);
379 }
380
381 /// Highlights tokens in a DDoc code section.
382 /// Returns: a string with the highlighted tokens (in HTML tags.)
383 string highlight(string text, string filePath)
384 {
385 auto buffer = new GrowBuffer(text.length);
386 auto print = new Print!(char)(Format, buffer);
387
388 auto lx = new Lexer(new SourceText(filePath, text));
389 lx.scanAll();
390
391 // Traverse linked list and print tokens.
392 print("$(D_CODE\n");
393 if (lx.errors.length)
394 { // Output error messages.
395 print(tags["CompBegin"]);
396 printErrors(lx, tags, print);
397 print(tags["CompEnd"]);
398 }
399 // Traverse linked list and print tokens.
400 for (auto token = lx.firstToken(); token; token = token.next) {
401 token.ws && print(token.wsChars); // Print preceding whitespace.
402 printToken(token, tags, print);
403 }
404 print("\n)");
405 return cast(char[])buffer.slice();
406 }
407 }
408
409 /// Prints a token to the stream print.
410 void printToken(Token* token, TagMap tags, Print!(char) print)
411 {
412 switch(token.kind)
413 {
414 case TOK.Identifier:
415 print.format(tags.Identifier, token.srcText);
416 break;
417 case TOK.Comment:
418 string formatStr;
419 switch (token.start[1])
420 {
421 case '/': formatStr = tags.LineC; break;
422 case '*': formatStr = tags.BlockC; break;
423 case '+': formatStr = tags.NestedC; break;
424 default: assert(0);
425 }
426 print.format(formatStr, xml_escape(token.srcText));
427 break;
428 case TOK.String:
429 print.format(tags.String, xml_escape(token.srcText));
430 break;
431 case TOK.CharLiteral:
432 print.format(tags.Char, xml_escape(token.srcText));
433 break;
434 case TOK.Int32, TOK.Int64, TOK.Uint32, TOK.Uint64,
435 TOK.Float32, TOK.Float64, TOK.Float80,
436 TOK.Imaginary32, TOK.Imaginary64, TOK.Imaginary80:
437 print.format(tags.Number, token.srcText);
438 break;
439 case TOK.Shebang:
440 print.format(tags.Shebang, xml_escape(token.srcText));
441 break;
442 case TOK.HashLine:
443 auto formatStr = tags.HLine;
444 // The text to be inserted into formatStr.
445 auto buffer = new GrowBuffer;
446 auto print2 = new Print!(char)(Format, buffer);
447
448 void printWS(char* start, char* end)
449 {
450 start != end && print2(start[0 .. end - start]);
451 }
452
453 auto num = token.tokLineNum;
454 if (num is null)
455 { // Malformed #line
456 print.format(formatStr, token.srcText);
457 break;
458 }
459
460 // Print whitespace between #line and number.
461 printWS(token.start, num.start); // Prints "#line" as well.
462 printToken(num, tags, print2); // Print the number.
463
464 if (auto filespec = token.tokLineFilespec)
465 { // Print whitespace between number and filespec.
466 printWS(num.end, filespec.start);
467 print2.format(tags.Filespec, xml_escape(filespec.srcText));
468 }
469 // Finally print the whole token.
470 print.format(formatStr, cast(char[])buffer.slice());
471 break;
472 case TOK.Illegal:
473 print.format(tags.Illegal, token.srcText());
474 break;
475 case TOK.Newline:
476 print.format(tags.Newline, token.srcText());
477 break;
478 case TOK.EOF:
479 print(tags.EOF);
480 break;
481 default:
482 if (token.isKeyword())
483 print.format(tags.Keyword, token.srcText);
484 else if (token.isSpecialToken)
485 print.format(tags.SpecialToken, token.srcText);
486 else
487 print(tags[token.kind]);
488 }
489 }