Mercurial > projects > qtd
comparison generator/parser/lexer.cpp @ 1:e78566595089
initial import
author | mandel |
---|---|
date | Mon, 11 May 2009 16:01:50 +0000 |
parents | |
children | 09a0f1d048f2 |
comparison
equal
deleted
inserted
replaced
0:36fb74dc547d | 1:e78566595089 |
---|---|
1 /**************************************************************************** | |
2 ** | |
3 ** Copyright (C) 1992-2008 Nokia. All rights reserved. | |
4 ** Copyright (C) 2002-2005 Roberto Raggi <roberto@kdevelop.org> | |
5 ** | |
6 ** This file is part of Qt Jambi. | |
7 ** | |
8 ** * Commercial Usage | |
9 * Licensees holding valid Qt Commercial licenses may use this file in | |
10 * accordance with the Qt Commercial License Agreement provided with the | |
11 * Software or, alternatively, in accordance with the terms contained in | |
12 * a written agreement between you and Nokia. | |
13 * | |
14 * | |
15 * GNU General Public License Usage | |
16 * Alternatively, this file may be used under the terms of the GNU | |
17 * General Public License versions 2.0 or 3.0 as published by the Free | |
18 * Software Foundation and appearing in the file LICENSE.GPL included in | |
19 * the packaging of this file. Please review the following information | |
20 * to ensure GNU General Public Licensing requirements will be met: | |
21 * http://www.fsf.org/licensing/licenses/info/GPLv2.html and | |
22 * http://www.gnu.org/copyleft/gpl.html. In addition, as a special | |
23 * exception, Nokia gives you certain additional rights. These rights | |
24 * are described in the Nokia Qt GPL Exception version 1.2, included in | |
25 * the file GPL_EXCEPTION.txt in this package. | |
26 * | |
27 * Qt for Windows(R) Licensees | |
28 * As a special exception, Nokia, as the sole copyright holder for Qt | |
29 * Designer, grants users of the Qt/Eclipse Integration plug-in the | |
30 * right for the Qt/Eclipse Integration to link to functionality | |
31 * provided by Qt Designer and its related libraries. | |
32 * | |
33 * | |
34 * If you are unsure which license is appropriate for your use, please | |
35 * contact the sales department at qt-sales@nokia.com. | |
36 | |
37 ** | |
38 ** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE | |
39 ** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. | |
40 ** | |
41 ****************************************************************************/ | |
42 | |
43 | |
44 #include "lexer.h" | |
45 #include "tokens.h" | |
46 #include "control.h" | |
47 | |
48 #include <cctype> | |
49 #include <iostream> | |
50 | |
51 scan_fun_ptr Lexer::s_scan_keyword_table[] = { | |
52 &Lexer::scanKeyword0, &Lexer::scanKeyword0, | |
53 &Lexer::scanKeyword2, &Lexer::scanKeyword3, | |
54 &Lexer::scanKeyword4, &Lexer::scanKeyword5, | |
55 &Lexer::scanKeyword6, &Lexer::scanKeyword7, | |
56 &Lexer::scanKeyword8, &Lexer::scanKeyword9, | |
57 &Lexer::scanKeyword10, &Lexer::scanKeyword11, | |
58 &Lexer::scanKeyword12, &Lexer::scanKeyword13, | |
59 &Lexer::scanKeyword14, &Lexer::scanKeyword0, | |
60 &Lexer::scanKeyword16 | |
61 }; | |
62 | |
63 void LocationManager::extract_line(int offset, int *line, QString *filename) const | |
64 { | |
65 *line = 0; | |
66 if (token_stream.size () < 1) | |
67 return; | |
68 | |
69 const unsigned char *begin_buffer = reinterpret_cast<const unsigned char *>(token_stream[0].text); | |
70 const unsigned char *cursor = begin_buffer + offset; | |
71 | |
72 ++cursor; // skip '#' | |
73 if (std::isspace(*cursor) && std::isdigit(*(cursor + 1))) | |
74 { | |
75 ++cursor; | |
76 char buffer[1024], *cp = buffer; | |
77 do { | |
78 *cp++ = *cursor++; | |
79 } while (std::isdigit(*cursor)); | |
80 *cp = '\0'; | |
81 int l = strtol(buffer, 0, 0); | |
82 | |
83 Q_ASSERT(std::isspace(*cursor)); | |
84 ++cursor; | |
85 | |
86 Q_ASSERT(*cursor == '"'); | |
87 ++cursor; | |
88 | |
89 cp = buffer; | |
90 while (*cursor && *cursor != '"') { | |
91 *cp++ = *cursor++; | |
92 } | |
93 *cp = '\0'; | |
94 Q_ASSERT(*cursor == '"'); | |
95 ++cursor; | |
96 | |
97 *filename = buffer; | |
98 *line = l; | |
99 // printf("filename: %s line: %d\n", buffer, line); | |
100 } | |
101 } | |
102 | |
103 void LocationManager::positionAt(std::size_t offset, int *line, int *column, | |
104 QString *filename) const | |
105 { | |
106 int ppline, ppcolumn; | |
107 line_table.positionAt(offset, &ppline, &ppcolumn); | |
108 | |
109 int base_line; | |
110 extract_line((int) line_table[ppline-1], &base_line, filename); | |
111 | |
112 int line2, column2; | |
113 location_table.positionAt((int) line_table[ppline-1], &line2, &column2); | |
114 | |
115 location_table.positionAt(offset, line, column); | |
116 *line = base_line + *line - line2 - 1; | |
117 } | |
118 | |
119 scan_fun_ptr Lexer::s_scan_table[256]; | |
120 bool Lexer::s_initialized = false; | |
121 | |
122 void Lexer::tokenize(const char *contents, std::size_t size) | |
123 { | |
124 if (!s_initialized) | |
125 initialize_scan_table(); | |
126 | |
127 token_stream.resize(1024); | |
128 token_stream[0].kind = Token_EOF; | |
129 token_stream[0].text = contents; | |
130 | |
131 index = 1; | |
132 | |
133 cursor = (const unsigned char *) contents; | |
134 begin_buffer = (const unsigned char *) contents; | |
135 end_buffer = cursor + size; | |
136 | |
137 location_table.resize(1024); | |
138 location_table[0] = 0; | |
139 location_table.current_line = 1; | |
140 | |
141 line_table.resize(1024); | |
142 line_table[0] = 0; | |
143 line_table.current_line = 1; | |
144 | |
145 do { | |
146 if (index == token_stream.size()) | |
147 token_stream.resize(token_stream.size() * 2); | |
148 | |
149 Token *current_token = &token_stream[(int) index]; | |
150 current_token->text = reinterpret_cast<const char*>(begin_buffer); | |
151 current_token->position = cursor - begin_buffer; | |
152 (this->*s_scan_table[*cursor])(); | |
153 current_token->size = cursor - begin_buffer - current_token->position; | |
154 } while (cursor < end_buffer); | |
155 | |
156 if (index == token_stream.size()) | |
157 token_stream.resize(token_stream.size() * 2); | |
158 | |
159 Q_ASSERT(index < token_stream.size()); | |
160 token_stream[(int) index].position = cursor - begin_buffer; | |
161 token_stream[(int) index].kind = Token_EOF; | |
162 } | |
163 | |
164 void Lexer::reportError(const QString& msg) | |
165 { | |
166 int line, column; | |
167 QString fileName; | |
168 | |
169 std::size_t tok = token_stream.cursor(); | |
170 _M_location.positionAt(token_stream.position(tok), | |
171 &line, &column, &fileName); | |
172 | |
173 Control::ErrorMessage errmsg; | |
174 errmsg.setLine(line + 1); | |
175 errmsg.setColumn(column); | |
176 errmsg.setFileName(fileName); | |
177 errmsg.setMessage(QLatin1String("** LEXER ERROR ") + msg); | |
178 control->reportError(errmsg); | |
179 } | |
180 | |
181 void Lexer::initialize_scan_table() | |
182 { | |
183 s_initialized = true; | |
184 | |
185 for (int i=0; i<256; ++i) | |
186 { | |
187 if (isspace(i)) | |
188 s_scan_table[i] = &Lexer::scan_white_spaces; | |
189 else if (isalpha(i) || i == '_') | |
190 s_scan_table[i] = &Lexer::scan_identifier_or_keyword; | |
191 else if (isdigit(i)) | |
192 s_scan_table[i] = &Lexer::scan_int_constant; | |
193 else | |
194 s_scan_table[i] = &Lexer::scan_invalid_input; | |
195 } | |
196 | |
197 s_scan_table[int('L')] = &Lexer::scan_identifier_or_literal; | |
198 s_scan_table[int('\n')] = &Lexer::scan_newline; | |
199 s_scan_table[int('#')] = &Lexer::scan_preprocessor; | |
200 | |
201 s_scan_table[int('\'')] = &Lexer::scan_char_constant; | |
202 s_scan_table[int('"')] = &Lexer::scan_string_constant; | |
203 | |
204 s_scan_table[int('.')] = &Lexer::scan_int_constant; | |
205 | |
206 s_scan_table[int('!')] = &Lexer::scan_not; | |
207 s_scan_table[int('%')] = &Lexer::scan_remainder; | |
208 s_scan_table[int('&')] = &Lexer::scan_and; | |
209 s_scan_table[int('(')] = &Lexer::scan_left_paren; | |
210 s_scan_table[int(')')] = &Lexer::scan_right_paren; | |
211 s_scan_table[int('*')] = &Lexer::scan_star; | |
212 s_scan_table[int('+')] = &Lexer::scan_plus; | |
213 s_scan_table[int(',')] = &Lexer::scan_comma; | |
214 s_scan_table[int('-')] = &Lexer::scan_minus; | |
215 s_scan_table[int('/')] = &Lexer::scan_divide; | |
216 s_scan_table[int(':')] = &Lexer::scan_colon; | |
217 s_scan_table[int(';')] = &Lexer::scan_semicolon; | |
218 s_scan_table[int('<')] = &Lexer::scan_less; | |
219 s_scan_table[int('=')] = &Lexer::scan_equal; | |
220 s_scan_table[int('>')] = &Lexer::scan_greater; | |
221 s_scan_table[int('?')] = &Lexer::scan_question; | |
222 s_scan_table[int('[')] = &Lexer::scan_left_bracket; | |
223 s_scan_table[int(']')] = &Lexer::scan_right_bracket; | |
224 s_scan_table[int('^')] = &Lexer::scan_xor; | |
225 s_scan_table[int('{')] = &Lexer::scan_left_brace; | |
226 s_scan_table[int('|')] = &Lexer::scan_or; | |
227 s_scan_table[int('}')] = &Lexer::scan_right_brace; | |
228 s_scan_table[int('~')] = &Lexer::scan_tilde; | |
229 | |
230 s_scan_table[0] = &Lexer::scan_EOF; | |
231 } | |
232 | |
233 void Lexer::scan_preprocessor() | |
234 { | |
235 if (line_table.current_line == line_table.size()) | |
236 line_table.resize(line_table.current_line * 2); | |
237 | |
238 line_table[(int) line_table.current_line++] = (cursor - begin_buffer); | |
239 | |
240 while (*cursor && *cursor != '\n') | |
241 ++cursor; | |
242 | |
243 if (*cursor != '\n') | |
244 reportError("expected newline"); | |
245 } | |
246 | |
247 void Lexer::scan_char_constant() | |
248 { | |
249 const unsigned char *begin = cursor; | |
250 | |
251 ++cursor; | |
252 while (*cursor && *cursor != '\'') | |
253 { | |
254 if (*cursor == '\n') | |
255 reportError("did not expect newline"); | |
256 | |
257 if (*cursor == '\\') | |
258 ++cursor; | |
259 ++cursor; | |
260 } | |
261 | |
262 if (*cursor != '\'') | |
263 reportError("expected \'"); | |
264 | |
265 ++cursor; | |
266 | |
267 token_stream[(int) index].extra.symbol = | |
268 control->findOrInsertName((const char*) begin, cursor - begin); | |
269 | |
270 token_stream[(int) index++].kind = Token_char_literal; | |
271 } | |
272 | |
273 void Lexer::scan_string_constant() | |
274 { | |
275 const unsigned char *begin = cursor; | |
276 | |
277 ++cursor; | |
278 while (*cursor && *cursor != '"') | |
279 { | |
280 if (*cursor == '\n') | |
281 reportError("did not expect newline"); | |
282 | |
283 if (*cursor == '\\') | |
284 ++cursor; | |
285 ++cursor; | |
286 } | |
287 | |
288 if (*cursor != '"') | |
289 reportError("expected \""); | |
290 | |
291 ++cursor; | |
292 | |
293 token_stream[(int) index].extra.symbol = | |
294 control->findOrInsertName((const char*) begin, cursor - begin); | |
295 | |
296 token_stream[(int) index++].kind = Token_string_literal; | |
297 } | |
298 | |
299 void Lexer::scan_newline() | |
300 { | |
301 if (location_table.current_line == location_table.size()) | |
302 location_table.resize(location_table.current_line * 2); | |
303 | |
304 location_table[(int) location_table.current_line++] = (cursor - begin_buffer); | |
305 ++cursor; | |
306 } | |
307 | |
308 void Lexer::scan_white_spaces() | |
309 { | |
310 while (isspace(*cursor)) | |
311 { | |
312 if (*cursor == '\n') | |
313 scan_newline(); | |
314 else | |
315 ++cursor; | |
316 } | |
317 } | |
318 | |
319 void Lexer::scan_identifier_or_literal() | |
320 { | |
321 switch (*(cursor + 1)) | |
322 { | |
323 case '\'': | |
324 ++cursor; | |
325 scan_char_constant(); | |
326 break; | |
327 | |
328 case '\"': | |
329 ++cursor; | |
330 scan_string_constant(); | |
331 break; | |
332 | |
333 default: | |
334 scan_identifier_or_keyword(); | |
335 break; | |
336 } | |
337 } | |
338 | |
339 void Lexer::scan_identifier_or_keyword() | |
340 { | |
341 const unsigned char *skip = cursor; | |
342 while (isalnum(*skip) || *skip== '_') | |
343 ++skip; | |
344 | |
345 int n = skip - cursor; | |
346 Token *current_token = &token_stream[(int) index]; | |
347 (this->*s_scan_keyword_table[n < 17 ? n : 0])(); | |
348 | |
349 if (current_token->kind == Token_identifier) | |
350 { | |
351 current_token->extra.symbol = | |
352 control->findOrInsertName((const char*) cursor, n); | |
353 } | |
354 | |
355 cursor = skip; | |
356 } | |
357 | |
358 void Lexer::scan_int_constant() | |
359 { | |
360 if (*cursor == '.' && !std::isdigit(*(cursor + 1))) | |
361 { | |
362 scan_dot(); | |
363 return; | |
364 } | |
365 | |
366 const unsigned char *begin = cursor; | |
367 | |
368 while (isalnum(*cursor) || *cursor == '.') | |
369 ++cursor; | |
370 | |
371 token_stream[(int) index].extra.symbol = | |
372 control->findOrInsertName((const char*) begin, cursor - begin); | |
373 | |
374 token_stream[(int) index++].kind = Token_number_literal; | |
375 } | |
376 | |
377 void Lexer::scan_not() | |
378 { | |
379 /* | |
380 '!' ::= not | |
381 '!=' ::= not_equal | |
382 */ | |
383 | |
384 ++cursor; | |
385 | |
386 if (*cursor == '=') | |
387 { | |
388 ++cursor; | |
389 token_stream[(int) index++].kind = Token_not_eq; | |
390 } | |
391 else | |
392 { | |
393 token_stream[(int) index++].kind = '!'; | |
394 } | |
395 } | |
396 | |
397 void Lexer::scan_remainder() | |
398 { | |
399 /* | |
400 '%' ::= remainder | |
401 '%=' ::= remainder_equal | |
402 */ | |
403 | |
404 ++cursor; | |
405 | |
406 if (*cursor == '=') | |
407 { | |
408 ++cursor; | |
409 token_stream[(int) index++].kind = Token_assign; | |
410 } | |
411 else | |
412 { | |
413 token_stream[(int) index++].kind = '%'; | |
414 } | |
415 } | |
416 | |
417 void Lexer::scan_and() | |
418 { | |
419 /* | |
420 '&&' ::= and_and | |
421 '&' ::= and | |
422 '&=' ::= and_equal | |
423 */ | |
424 | |
425 ++cursor; | |
426 if (*cursor == '=') | |
427 { | |
428 ++cursor; | |
429 token_stream[(int) index++].kind = Token_assign; | |
430 } | |
431 else if (*cursor == '&') | |
432 { | |
433 ++cursor; | |
434 token_stream[(int) index++].kind = Token_and; | |
435 } | |
436 else | |
437 { | |
438 token_stream[(int) index++].kind = '&'; | |
439 } | |
440 } | |
441 | |
442 void Lexer::scan_left_paren() | |
443 { | |
444 ++cursor; | |
445 token_stream[(int) index++].kind = '('; | |
446 } | |
447 | |
448 void Lexer::scan_right_paren() | |
449 { | |
450 ++cursor; | |
451 token_stream[(int) index++].kind = ')'; | |
452 } | |
453 | |
454 void Lexer::scan_star() | |
455 { | |
456 /* | |
457 '*' ::= star | |
458 '*=' ::= star_equal | |
459 */ | |
460 | |
461 ++cursor; | |
462 | |
463 if (*cursor == '=') | |
464 { | |
465 ++cursor; | |
466 token_stream[(int) index++].kind = Token_assign; | |
467 } | |
468 else | |
469 { | |
470 token_stream[(int) index++].kind = '*'; | |
471 } | |
472 } | |
473 | |
474 void Lexer::scan_plus() | |
475 { | |
476 /* | |
477 '+' ::= plus | |
478 '++' ::= incr | |
479 '+=' ::= plus_equal | |
480 */ | |
481 | |
482 ++cursor; | |
483 if (*cursor == '=') | |
484 { | |
485 ++cursor; | |
486 token_stream[(int) index++].kind = Token_assign; | |
487 } | |
488 else if (*cursor == '+') | |
489 { | |
490 ++cursor; | |
491 token_stream[(int) index++].kind = Token_incr; | |
492 } | |
493 else | |
494 { | |
495 token_stream[(int) index++].kind = '+'; | |
496 } | |
497 } | |
498 | |
499 void Lexer::scan_comma() | |
500 { | |
501 ++cursor; | |
502 token_stream[(int) index++].kind = ','; | |
503 } | |
504 | |
505 void Lexer::scan_minus() | |
506 { | |
507 /* | |
508 '-' ::= minus | |
509 '--' ::= decr | |
510 '-=' ::= minus_equal | |
511 '->' ::= left_arrow | |
512 */ | |
513 | |
514 ++cursor; | |
515 if (*cursor == '=') | |
516 { | |
517 ++cursor; | |
518 token_stream[(int) index++].kind = Token_assign; | |
519 } | |
520 else if (*cursor == '-') | |
521 { | |
522 ++cursor; | |
523 token_stream[(int) index++].kind = Token_decr; | |
524 } | |
525 else if (*cursor == '>') | |
526 { | |
527 ++cursor; | |
528 token_stream[(int) index++].kind = Token_arrow; | |
529 if (*cursor == '*') | |
530 { | |
531 ++cursor; | |
532 token_stream[(int) index++].kind = Token_ptrmem; | |
533 } | |
534 } | |
535 else | |
536 { | |
537 token_stream[(int) index++].kind = '-'; | |
538 } | |
539 } | |
540 | |
541 void Lexer::scan_dot() | |
542 { | |
543 /* | |
544 '.' ::= dot | |
545 '...' ::= ellipsis | |
546 */ | |
547 | |
548 ++cursor; | |
549 if (*cursor == '.' && *(cursor + 1) == '.') | |
550 { | |
551 cursor += 2; | |
552 token_stream[(int) index++].kind = Token_ellipsis; | |
553 } | |
554 else if (*cursor == '.' && *(cursor + 1) == '*') | |
555 { | |
556 cursor += 2; | |
557 token_stream[(int) index++].kind = Token_ptrmem; | |
558 } | |
559 else | |
560 token_stream[(int) index++].kind = '.'; | |
561 } | |
562 | |
563 void Lexer::scan_divide() | |
564 { | |
565 /* | |
566 '/' ::= divide | |
567 '/=' ::= divide_equal | |
568 */ | |
569 | |
570 ++cursor; | |
571 | |
572 if (*cursor == '=') | |
573 { | |
574 ++cursor; | |
575 token_stream[(int) index++].kind = Token_assign; | |
576 } | |
577 else | |
578 { | |
579 token_stream[(int) index++].kind = '/'; | |
580 } | |
581 } | |
582 | |
583 void Lexer::scan_colon() | |
584 { | |
585 ++cursor; | |
586 if (*cursor == ':') | |
587 { | |
588 ++cursor; | |
589 token_stream[(int) index++].kind = Token_scope; | |
590 } | |
591 else | |
592 { | |
593 token_stream[(int) index++].kind = ':'; | |
594 } | |
595 } | |
596 | |
597 void Lexer::scan_semicolon() | |
598 { | |
599 ++cursor; | |
600 token_stream[(int) index++].kind = ';'; | |
601 } | |
602 | |
603 void Lexer::scan_less() | |
604 { | |
605 /* | |
606 '<' ::= less | |
607 '<<' ::= left_shift | |
608 '<<=' ::= left_shift_equal | |
609 '<=' ::= less_equal | |
610 */ | |
611 | |
612 ++cursor; | |
613 if (*cursor == '=') | |
614 { | |
615 ++cursor; | |
616 token_stream[(int) index++].kind = Token_leq; | |
617 } | |
618 else if (*cursor == '<') | |
619 { | |
620 ++cursor; | |
621 if (*cursor == '=') | |
622 { | |
623 ++cursor; | |
624 token_stream[(int) index++].kind = Token_assign; | |
625 } | |
626 else | |
627 { | |
628 token_stream[(int) index++].kind = Token_shift; | |
629 } | |
630 } | |
631 else | |
632 { | |
633 token_stream[(int) index++].kind = '<'; | |
634 } | |
635 } | |
636 | |
637 void Lexer::scan_equal() | |
638 { | |
639 /* | |
640 '=' ::= equal | |
641 '==' ::= equal_equal | |
642 */ | |
643 ++cursor; | |
644 | |
645 if (*cursor == '=') | |
646 { | |
647 ++cursor; | |
648 token_stream[(int) index++].kind = Token_eq; | |
649 } | |
650 else | |
651 { | |
652 token_stream[(int) index++].kind = '='; | |
653 } | |
654 } | |
655 | |
656 void Lexer::scan_greater() | |
657 { | |
658 /* | |
659 '>' ::= greater | |
660 '>=' ::= greater_equal | |
661 '>>' ::= right_shift | |
662 '>>=' ::= right_shift_equal | |
663 */ | |
664 | |
665 ++cursor; | |
666 if (*cursor == '=') | |
667 { | |
668 ++cursor; | |
669 token_stream[(int) index++].kind = Token_geq; | |
670 } | |
671 else if (*cursor == '>') | |
672 { | |
673 ++cursor; | |
674 if (*cursor == '=') | |
675 { | |
676 ++cursor; | |
677 token_stream[(int) index++].kind = Token_assign; | |
678 } | |
679 else | |
680 { | |
681 token_stream[(int) index++].kind = Token_shift; | |
682 } | |
683 } | |
684 else | |
685 { | |
686 token_stream[(int) index++].kind = '>'; | |
687 } | |
688 } | |
689 | |
690 void Lexer::scan_question() | |
691 { | |
692 ++cursor; | |
693 token_stream[(int) index++].kind = '?'; | |
694 } | |
695 | |
696 void Lexer::scan_left_bracket() | |
697 { | |
698 ++cursor; | |
699 token_stream[(int) index++].kind = '['; | |
700 } | |
701 | |
702 void Lexer::scan_right_bracket() | |
703 { | |
704 ++cursor; | |
705 token_stream[(int) index++].kind = ']'; | |
706 } | |
707 | |
708 void Lexer::scan_xor() | |
709 { | |
710 /* | |
711 '^' ::= xor | |
712 '^=' ::= xor_equal | |
713 */ | |
714 ++cursor; | |
715 | |
716 if (*cursor == '=') | |
717 { | |
718 ++cursor; | |
719 token_stream[(int) index++].kind = Token_assign; | |
720 } | |
721 else | |
722 { | |
723 token_stream[(int) index++].kind = '^'; | |
724 } | |
725 } | |
726 | |
727 void Lexer::scan_left_brace() | |
728 { | |
729 ++cursor; | |
730 token_stream[(int) index++].kind = '{'; | |
731 } | |
732 | |
733 void Lexer::scan_or() | |
734 { | |
735 /* | |
736 '|' ::= or | |
737 '|=' ::= or_equal | |
738 '||' ::= or_or | |
739 */ | |
740 ++cursor; | |
741 if (*cursor == '=') | |
742 { | |
743 ++cursor; | |
744 token_stream[(int) index++].kind = Token_assign; | |
745 } | |
746 else if (*cursor == '|') | |
747 { | |
748 ++cursor; | |
749 token_stream[(int) index++].kind = Token_or; | |
750 } | |
751 else | |
752 { | |
753 token_stream[(int) index++].kind = '|'; | |
754 } | |
755 } | |
756 | |
757 void Lexer::scan_right_brace() | |
758 { | |
759 ++cursor; | |
760 token_stream[(int) index++].kind = '}'; | |
761 } | |
762 | |
763 void Lexer::scan_tilde() | |
764 { | |
765 ++cursor; | |
766 token_stream[(int) index++].kind = '~'; | |
767 } | |
768 | |
769 void Lexer::scan_EOF() | |
770 { | |
771 ++cursor; | |
772 token_stream[(int) index++].kind = Token_EOF; | |
773 } | |
774 | |
775 void Lexer::scan_invalid_input() | |
776 { | |
777 QString errmsg("invalid input: %1"); | |
778 errmsg.arg(int(*cursor)); | |
779 reportError(errmsg); | |
780 ++cursor; | |
781 } | |
782 | |
783 void LocationTable::positionAt(std::size_t offset, int max_line, | |
784 int *line, int *column) const | |
785 { | |
786 if (!(line && column && max_line != 0)) | |
787 return; | |
788 | |
789 int first = 0; | |
790 int len = max_line; | |
791 int half; | |
792 int middle; | |
793 | |
794 while (len > 0) | |
795 { | |
796 half = len >> 1; | |
797 middle = first; | |
798 | |
799 middle += half; | |
800 | |
801 if (lines[middle] < offset) | |
802 { | |
803 first = middle; | |
804 ++first; | |
805 len = len - half - 1; | |
806 } | |
807 else | |
808 len = half; | |
809 } | |
810 | |
811 *line = std::max(first, 1); | |
812 *column = (int) (offset - lines[*line - 1] - 1); | |
813 | |
814 if (*column < 0) | |
815 { | |
816 *column = 0; | |
817 } | |
818 } | |
819 | |
820 void Lexer::scanKeyword0() | |
821 { | |
822 token_stream[(int) index++].kind = Token_identifier; | |
823 } | |
824 | |
825 void Lexer::scanKeyword2() | |
826 { | |
827 switch (*cursor) | |
828 { | |
829 case 'i': | |
830 if (*(cursor + 1) == 'f') | |
831 { | |
832 token_stream[(int) index++].kind = Token_if; | |
833 return; | |
834 } | |
835 break; | |
836 | |
837 case 'd': | |
838 if (*(cursor + 1) == 'o') | |
839 { | |
840 token_stream[(int) index++].kind = Token_do; | |
841 return; | |
842 } | |
843 break; | |
844 | |
845 case 'o': | |
846 if (*(cursor + 1) == 'r') | |
847 { | |
848 token_stream[(int) index++].kind = Token_or; | |
849 return; | |
850 } | |
851 break; | |
852 | |
853 } | |
854 token_stream[(int) index++].kind = Token_identifier; | |
855 } | |
856 | |
857 void Lexer::scanKeyword3() | |
858 { | |
859 switch (*cursor) | |
860 { | |
861 case 'a': | |
862 if (*(cursor + 1) == 'n' && | |
863 *(cursor + 2) == 'd') | |
864 { | |
865 token_stream[(int) index++].kind = Token_and; | |
866 return; | |
867 } | |
868 if (*(cursor + 1) == 's' && | |
869 *(cursor + 2) == 'm') | |
870 { | |
871 token_stream[(int) index++].kind = Token_asm; | |
872 return; | |
873 } | |
874 break; | |
875 | |
876 case 'f': | |
877 if (*(cursor + 1) == 'o' && | |
878 *(cursor + 2) == 'r') | |
879 { | |
880 token_stream[(int) index++].kind = Token_for; | |
881 return; | |
882 } | |
883 break; | |
884 | |
885 case 'i': | |
886 if (*(cursor + 1) == 'n' && | |
887 *(cursor + 2) == 't') | |
888 { | |
889 token_stream[(int) index++].kind = Token_int; | |
890 return; | |
891 } | |
892 break; | |
893 | |
894 case 'n': | |
895 if (*(cursor + 1) == 'e' && | |
896 *(cursor + 2) == 'w') | |
897 { | |
898 token_stream[(int) index++].kind = Token_new; | |
899 return; | |
900 } | |
901 if (*(cursor + 1) == 'o' && | |
902 *(cursor + 2) == 't') | |
903 { | |
904 token_stream[(int) index++].kind = Token_not; | |
905 return; | |
906 } | |
907 break; | |
908 | |
909 case 't': | |
910 if (*(cursor + 1) == 'r' && | |
911 *(cursor + 2) == 'y') | |
912 { | |
913 token_stream[(int) index++].kind = Token_try; | |
914 return; | |
915 } | |
916 break; | |
917 | |
918 case 'x': | |
919 if (*(cursor + 1) == 'o' && | |
920 *(cursor + 2) == 'r') | |
921 { | |
922 token_stream[(int) index++].kind = Token_xor; | |
923 return; | |
924 } | |
925 break; | |
926 | |
927 } | |
928 token_stream[(int) index++].kind = Token_identifier; | |
929 } | |
930 | |
931 void Lexer::scanKeyword4() | |
932 { | |
933 switch (*cursor) | |
934 { | |
935 case 'a': | |
936 if (*(cursor + 1) == 'u' && | |
937 *(cursor + 2) == 't' && | |
938 *(cursor + 3) == 'o') | |
939 { | |
940 token_stream[(int) index++].kind = Token_auto; | |
941 return; | |
942 } | |
943 break; | |
944 | |
945 case 'c': | |
946 if (*(cursor + 1) == 'a' && | |
947 *(cursor + 2) == 's' && | |
948 *(cursor + 3) == 'e') | |
949 { | |
950 token_stream[(int) index++].kind = Token_case; | |
951 return; | |
952 } | |
953 if (*(cursor + 1) == 'h' && | |
954 *(cursor + 2) == 'a' && | |
955 *(cursor + 3) == 'r') | |
956 { | |
957 token_stream[(int) index++].kind = Token_char; | |
958 return; | |
959 } | |
960 break; | |
961 | |
962 case 'b': | |
963 if (*(cursor + 1) == 'o' && | |
964 *(cursor + 2) == 'o' && | |
965 *(cursor + 3) == 'l') | |
966 { | |
967 token_stream[(int) index++].kind = Token_bool; | |
968 return; | |
969 } | |
970 break; | |
971 | |
972 case 'e': | |
973 if (*(cursor + 1) == 'l' && | |
974 *(cursor + 2) == 's' && | |
975 *(cursor + 3) == 'e') | |
976 { | |
977 token_stream[(int) index++].kind = Token_else; | |
978 return; | |
979 } | |
980 if (*(cursor + 1) == 'm' && | |
981 *(cursor + 2) == 'i' && | |
982 *(cursor + 3) == 't') | |
983 { | |
984 token_stream[(int) index++].kind = Token_emit; | |
985 return; | |
986 } | |
987 if (*(cursor + 1) == 'n' && | |
988 *(cursor + 2) == 'u' && | |
989 *(cursor + 3) == 'm') | |
990 { | |
991 token_stream[(int) index++].kind = Token_enum; | |
992 return; | |
993 } | |
994 break; | |
995 | |
996 case 'g': | |
997 if (*(cursor + 1) == 'o' && | |
998 *(cursor + 2) == 't' && | |
999 *(cursor + 3) == 'o') | |
1000 { | |
1001 token_stream[(int) index++].kind = Token_goto; | |
1002 return; | |
1003 } | |
1004 break; | |
1005 | |
1006 case 'l': | |
1007 if (*(cursor + 1) == 'o' && | |
1008 *(cursor + 2) == 'n' && | |
1009 *(cursor + 3) == 'g') | |
1010 { | |
1011 token_stream[(int) index++].kind = Token_long; | |
1012 return; | |
1013 } | |
1014 break; | |
1015 | |
1016 case 't': | |
1017 if (*(cursor + 1) == 'h' && | |
1018 *(cursor + 2) == 'i' && | |
1019 *(cursor + 3) == 's') | |
1020 { | |
1021 token_stream[(int) index++].kind = Token_this; | |
1022 return; | |
1023 } | |
1024 break; | |
1025 | |
1026 case 'v': | |
1027 if (*(cursor + 1) == 'o' && | |
1028 *(cursor + 2) == 'i' && | |
1029 *(cursor + 3) == 'd') | |
1030 { | |
1031 token_stream[(int) index++].kind = Token_void; | |
1032 return; | |
1033 } | |
1034 break; | |
1035 | |
1036 } | |
1037 token_stream[(int) index++].kind = Token_identifier; | |
1038 } | |
1039 | |
1040 void Lexer::scanKeyword5() | |
1041 { | |
1042 switch (*cursor) | |
1043 { | |
1044 case 'c': | |
1045 if (*(cursor + 1) == 'a' && | |
1046 *(cursor + 2) == 't' && | |
1047 *(cursor + 3) == 'c' && | |
1048 *(cursor + 4) == 'h') | |
1049 { | |
1050 token_stream[(int) index++].kind = Token_catch; | |
1051 return; | |
1052 } | |
1053 if (*(cursor + 1) == 'l' && | |
1054 *(cursor + 2) == 'a' && | |
1055 *(cursor + 3) == 's' && | |
1056 *(cursor + 4) == 's') | |
1057 { | |
1058 token_stream[(int) index++].kind = Token_class; | |
1059 return; | |
1060 } | |
1061 if (*(cursor + 1) == 'o' && | |
1062 *(cursor + 2) == 'm' && | |
1063 *(cursor + 3) == 'p' && | |
1064 *(cursor + 4) == 'l') | |
1065 { | |
1066 token_stream[(int) index++].kind = Token_compl; | |
1067 return; | |
1068 } | |
1069 if (*(cursor + 1) == 'o' && | |
1070 *(cursor + 2) == 'n' && | |
1071 *(cursor + 3) == 's' && | |
1072 *(cursor + 4) == 't') | |
1073 { | |
1074 token_stream[(int) index++].kind = Token_const; | |
1075 return; | |
1076 } | |
1077 break; | |
1078 | |
1079 case 'b': | |
1080 if (*(cursor + 1) == 'i' && | |
1081 *(cursor + 2) == 't' && | |
1082 *(cursor + 3) == 'o' && | |
1083 *(cursor + 4) == 'r') | |
1084 { | |
1085 token_stream[(int) index++].kind = Token_bitor; | |
1086 return; | |
1087 } | |
1088 if (*(cursor + 1) == 'r' && | |
1089 *(cursor + 2) == 'e' && | |
1090 *(cursor + 3) == 'a' && | |
1091 *(cursor + 4) == 'k') | |
1092 { | |
1093 token_stream[(int) index++].kind = Token_break; | |
1094 return; | |
1095 } | |
1096 break; | |
1097 | |
1098 case 'f': | |
1099 if (*(cursor + 1) == 'l' && | |
1100 *(cursor + 2) == 'o' && | |
1101 *(cursor + 3) == 'a' && | |
1102 *(cursor + 4) == 't') | |
1103 { | |
1104 token_stream[(int) index++].kind = Token_float; | |
1105 return; | |
1106 } | |
1107 break; | |
1108 | |
1109 case 'o': | |
1110 if (*(cursor + 1) == 'r' && | |
1111 *(cursor + 2) == '_' && | |
1112 *(cursor + 3) == 'e' && | |
1113 *(cursor + 4) == 'q') | |
1114 { | |
1115 token_stream[(int) index++].kind = Token_or_eq; | |
1116 return; | |
1117 } | |
1118 break; | |
1119 | |
1120 case 's': | |
1121 if (*(cursor + 1) == 'h' && | |
1122 *(cursor + 2) == 'o' && | |
1123 *(cursor + 3) == 'r' && | |
1124 *(cursor + 4) == 't') | |
1125 { | |
1126 token_stream[(int) index++].kind = Token_short; | |
1127 return; | |
1128 } | |
1129 if (*(cursor + 1) == 'l' && | |
1130 *(cursor + 2) == 'o' && | |
1131 *(cursor + 3) == 't' && | |
1132 *(cursor + 4) == 's') | |
1133 { | |
1134 token_stream[(int) index++].kind = Token_slots; | |
1135 return; | |
1136 } | |
1137 break; | |
1138 | |
1139 case 'u': | |
1140 if (*(cursor + 1) == 'n' && | |
1141 *(cursor + 2) == 'i' && | |
1142 *(cursor + 3) == 'o' && | |
1143 *(cursor + 4) == 'n') | |
1144 { | |
1145 token_stream[(int) index++].kind = Token_union; | |
1146 return; | |
1147 } | |
1148 if (*(cursor + 1) == 's' && | |
1149 *(cursor + 2) == 'i' && | |
1150 *(cursor + 3) == 'n' && | |
1151 *(cursor + 4) == 'g') | |
1152 { | |
1153 token_stream[(int) index++].kind = Token_using; | |
1154 return; | |
1155 } | |
1156 break; | |
1157 | |
1158 case 't': | |
1159 if (*(cursor + 1) == 'h' && | |
1160 *(cursor + 2) == 'r' && | |
1161 *(cursor + 3) == 'o' && | |
1162 *(cursor + 4) == 'w') | |
1163 { | |
1164 token_stream[(int) index++].kind = Token_throw; | |
1165 return; | |
1166 } | |
1167 break; | |
1168 | |
1169 case 'w': | |
1170 if (*(cursor + 1) == 'h' && | |
1171 *(cursor + 2) == 'i' && | |
1172 *(cursor + 3) == 'l' && | |
1173 *(cursor + 4) == 'e') | |
1174 { | |
1175 token_stream[(int) index++].kind = Token_while; | |
1176 return; | |
1177 } | |
1178 break; | |
1179 | |
1180 } | |
1181 token_stream[(int) index++].kind = Token_identifier; | |
1182 } | |
1183 | |
1184 void Lexer::scanKeyword6() | |
1185 { | |
1186 switch (*cursor) | |
1187 { | |
1188 case 'a': | |
1189 if (*(cursor + 1) == 'n' && | |
1190 *(cursor + 2) == 'd' && | |
1191 *(cursor + 3) == '_' && | |
1192 *(cursor + 4) == 'e' && | |
1193 *(cursor + 5) == 'q') | |
1194 { | |
1195 token_stream[(int) index++].kind = Token_and_eq; | |
1196 return; | |
1197 } | |
1198 break; | |
1199 | |
1200 case 'b': | |
1201 if (*(cursor + 1) == 'i' && | |
1202 *(cursor + 2) == 't' && | |
1203 *(cursor + 3) == 'a' && | |
1204 *(cursor + 4) == 'n' && | |
1205 *(cursor + 5) == 'd') | |
1206 { | |
1207 token_stream[(int) index++].kind = Token_bitand; | |
1208 return; | |
1209 } | |
1210 break; | |
1211 | |
1212 case 'e': | |
1213 if (*(cursor + 1) == 'x' && | |
1214 *(cursor + 2) == 'p' && | |
1215 *(cursor + 3) == 'o' && | |
1216 *(cursor + 4) == 'r' && | |
1217 *(cursor + 5) == 't') | |
1218 { | |
1219 token_stream[(int) index++].kind = Token_export; | |
1220 return; | |
1221 } | |
1222 if (*(cursor + 1) == 'x' && | |
1223 *(cursor + 2) == 't' && | |
1224 *(cursor + 3) == 'e' && | |
1225 *(cursor + 4) == 'r' && | |
1226 *(cursor + 5) == 'n') | |
1227 { | |
1228 token_stream[(int) index++].kind = Token_extern; | |
1229 return; | |
1230 } | |
1231 break; | |
1232 | |
1233 case 'd': | |
1234 if (*(cursor + 1) == 'e' && | |
1235 *(cursor + 2) == 'l' && | |
1236 *(cursor + 3) == 'e' && | |
1237 *(cursor + 4) == 't' && | |
1238 *(cursor + 5) == 'e') | |
1239 { | |
1240 token_stream[(int) index++].kind = Token_delete; | |
1241 return; | |
1242 } | |
1243 if (*(cursor + 1) == 'o' && | |
1244 *(cursor + 2) == 'u' && | |
1245 *(cursor + 3) == 'b' && | |
1246 *(cursor + 4) == 'l' && | |
1247 *(cursor + 5) == 'e') | |
1248 { | |
1249 token_stream[(int) index++].kind = Token_double; | |
1250 return; | |
1251 } | |
1252 break; | |
1253 | |
1254 case 'f': | |
1255 if (*(cursor + 1) == 'r' && | |
1256 *(cursor + 2) == 'i' && | |
1257 *(cursor + 3) == 'e' && | |
1258 *(cursor + 4) == 'n' && | |
1259 *(cursor + 5) == 'd') | |
1260 { | |
1261 token_stream[(int) index++].kind = Token_friend; | |
1262 return; | |
1263 } | |
1264 break; | |
1265 | |
1266 case 'i': | |
1267 if (*(cursor + 1) == 'n' && | |
1268 *(cursor + 2) == 'l' && | |
1269 *(cursor + 3) == 'i' && | |
1270 *(cursor + 4) == 'n' && | |
1271 *(cursor + 5) == 'e') | |
1272 { | |
1273 token_stream[(int) index++].kind = Token_inline; | |
1274 return; | |
1275 } | |
1276 break; | |
1277 | |
1278 case 'K': | |
1279 if (*(cursor + 1) == '_' && | |
1280 *(cursor + 2) == 'D' && | |
1281 *(cursor + 3) == 'C' && | |
1282 *(cursor + 4) == 'O' && | |
1283 *(cursor + 5) == 'P') | |
1284 { | |
1285 token_stream[(int) index++].kind = Token_K_DCOP; | |
1286 return; | |
1287 } | |
1288 break; | |
1289 | |
1290 case 'n': | |
1291 if (*(cursor + 1) == 'o' && | |
1292 *(cursor + 2) == 't' && | |
1293 *(cursor + 3) == '_' && | |
1294 *(cursor + 4) == 'e' && | |
1295 *(cursor + 5) == 'q') | |
1296 { | |
1297 token_stream[(int) index++].kind = Token_not_eq; | |
1298 return; | |
1299 } | |
1300 break; | |
1301 | |
1302 case 'p': | |
1303 if (*(cursor + 1) == 'u' && | |
1304 *(cursor + 2) == 'b' && | |
1305 *(cursor + 3) == 'l' && | |
1306 *(cursor + 4) == 'i' && | |
1307 *(cursor + 5) == 'c') | |
1308 { | |
1309 token_stream[(int) index++].kind = Token_public; | |
1310 return; | |
1311 } | |
1312 break; | |
1313 | |
1314 case 's': | |
1315 if (*(cursor + 1) == 'i' && | |
1316 *(cursor + 2) == 'g' && | |
1317 *(cursor + 3) == 'n' && | |
1318 *(cursor + 4) == 'e' && | |
1319 *(cursor + 5) == 'd') | |
1320 { | |
1321 token_stream[(int) index++].kind = Token_signed; | |
1322 return; | |
1323 } | |
1324 if (*(cursor + 1) == 'i' && | |
1325 *(cursor + 2) == 'z' && | |
1326 *(cursor + 3) == 'e' && | |
1327 *(cursor + 4) == 'o' && | |
1328 *(cursor + 5) == 'f') | |
1329 { | |
1330 token_stream[(int) index++].kind = Token_sizeof; | |
1331 return; | |
1332 } | |
1333 if (*(cursor + 1) == 't' && | |
1334 *(cursor + 2) == 'a' && | |
1335 *(cursor + 3) == 't' && | |
1336 *(cursor + 4) == 'i' && | |
1337 *(cursor + 5) == 'c') | |
1338 { | |
1339 token_stream[(int) index++].kind = Token_static; | |
1340 return; | |
1341 } | |
1342 if (*(cursor + 1) == 't' && | |
1343 *(cursor + 2) == 'r' && | |
1344 *(cursor + 3) == 'u' && | |
1345 *(cursor + 4) == 'c' && | |
1346 *(cursor + 5) == 't') | |
1347 { | |
1348 token_stream[(int) index++].kind = Token_struct; | |
1349 return; | |
1350 } | |
1351 if (*(cursor + 1) == 'w' && | |
1352 *(cursor + 2) == 'i' && | |
1353 *(cursor + 3) == 't' && | |
1354 *(cursor + 4) == 'c' && | |
1355 *(cursor + 5) == 'h') | |
1356 { | |
1357 token_stream[(int) index++].kind = Token_switch; | |
1358 return; | |
1359 } | |
1360 break; | |
1361 | |
1362 case 'r': | |
1363 if (*(cursor + 1) == 'e' && | |
1364 *(cursor + 2) == 't' && | |
1365 *(cursor + 3) == 'u' && | |
1366 *(cursor + 4) == 'r' && | |
1367 *(cursor + 5) == 'n') | |
1368 { | |
1369 token_stream[(int) index++].kind = Token_return; | |
1370 return; | |
1371 } | |
1372 break; | |
1373 | |
1374 case 't': | |
1375 if (*(cursor + 1) == 'y' && | |
1376 *(cursor + 2) == 'p' && | |
1377 *(cursor + 3) == 'e' && | |
1378 *(cursor + 4) == 'i' && | |
1379 *(cursor + 5) == 'd') | |
1380 { | |
1381 token_stream[(int) index++].kind = Token_typeid; | |
1382 return; | |
1383 } | |
1384 break; | |
1385 | |
1386 case 'x': | |
1387 if (*(cursor + 1) == 'o' && | |
1388 *(cursor + 2) == 'r' && | |
1389 *(cursor + 3) == '_' && | |
1390 *(cursor + 4) == 'e' && | |
1391 *(cursor + 5) == 'q') | |
1392 { | |
1393 token_stream[(int) index++].kind = Token_xor_eq; | |
1394 return; | |
1395 } | |
1396 break; | |
1397 | |
1398 case 'k': | |
1399 if (*(cursor + 1) == '_' && | |
1400 *(cursor + 2) == 'd' && | |
1401 *(cursor + 3) == 'c' && | |
1402 *(cursor + 4) == 'o' && | |
1403 *(cursor + 5) == 'p') | |
1404 { | |
1405 token_stream[(int) index++].kind = Token_k_dcop; | |
1406 return; | |
1407 } | |
1408 break; | |
1409 | |
1410 } | |
1411 token_stream[(int) index++].kind = Token_identifier; | |
1412 } | |
1413 | |
1414 void Lexer::scanKeyword7() | |
1415 { | |
1416 switch (*cursor) | |
1417 { | |
1418 case 'd': | |
1419 if (*(cursor + 1) == 'e' && | |
1420 *(cursor + 2) == 'f' && | |
1421 *(cursor + 3) == 'a' && | |
1422 *(cursor + 4) == 'u' && | |
1423 *(cursor + 5) == 'l' && | |
1424 *(cursor + 6) == 't') | |
1425 { | |
1426 token_stream[(int) index++].kind = Token_default; | |
1427 return; | |
1428 } | |
1429 break; | |
1430 | |
1431 case 'm': | |
1432 if (*(cursor + 1) == 'u' && | |
1433 *(cursor + 2) == 't' && | |
1434 *(cursor + 3) == 'a' && | |
1435 *(cursor + 4) == 'b' && | |
1436 *(cursor + 5) == 'l' && | |
1437 *(cursor + 6) == 'e') | |
1438 { | |
1439 token_stream[(int) index++].kind = Token_mutable; | |
1440 return; | |
1441 } | |
1442 break; | |
1443 | |
1444 case 'p': | |
1445 if (*(cursor + 1) == 'r' && | |
1446 *(cursor + 2) == 'i' && | |
1447 *(cursor + 3) == 'v' && | |
1448 *(cursor + 4) == 'a' && | |
1449 *(cursor + 5) == 't' && | |
1450 *(cursor + 6) == 'e') | |
1451 { | |
1452 token_stream[(int) index++].kind = Token_private; | |
1453 return; | |
1454 } | |
1455 break; | |
1456 case 's': | |
1457 if (*(cursor + 1) == 'i' && | |
1458 *(cursor + 2) == 'g' && | |
1459 *(cursor + 3) == 'n' && | |
1460 *(cursor + 4) == 'a' && | |
1461 *(cursor + 5) == 'l' && | |
1462 *(cursor + 6) == 's') | |
1463 { | |
1464 token_stream[(int) index++].kind = Token_signals; | |
1465 return; | |
1466 } | |
1467 break; | |
1468 case 't': | |
1469 if (*(cursor + 1) == 'y' && | |
1470 *(cursor + 2) == 'p' && | |
1471 *(cursor + 3) == 'e' && | |
1472 *(cursor + 4) == 'd' && | |
1473 *(cursor + 5) == 'e' && | |
1474 *(cursor + 6) == 'f') | |
1475 { | |
1476 token_stream[(int) index++].kind = Token_typedef; | |
1477 return; | |
1478 } | |
1479 break; | |
1480 | |
1481 case 'v': | |
1482 if (*(cursor + 1) == 'i' && | |
1483 *(cursor + 2) == 'r' && | |
1484 *(cursor + 3) == 't' && | |
1485 *(cursor + 4) == 'u' && | |
1486 *(cursor + 5) == 'a' && | |
1487 *(cursor + 6) == 'l') | |
1488 { | |
1489 token_stream[(int) index++].kind = Token_virtual; | |
1490 return; | |
1491 } | |
1492 break; | |
1493 | |
1494 case 'Q': | |
1495 if (*(cursor + 1) == '_' && | |
1496 *(cursor + 2) == 'E' && | |
1497 *(cursor + 3) == 'N' && | |
1498 *(cursor + 4) == 'U' && | |
1499 *(cursor + 5) == 'M' && | |
1500 *(cursor + 6) == 'S') | |
1501 { | |
1502 token_stream[(int) index++].kind = Token_Q_ENUMS; | |
1503 return; | |
1504 } | |
1505 break; | |
1506 | |
1507 } | |
1508 token_stream[(int) index++].kind = Token_identifier; | |
1509 } | |
1510 | |
1511 void Lexer::scanKeyword8() | |
1512 { | |
1513 switch (*cursor) | |
1514 { | |
1515 case '_': | |
1516 if (*(cursor + 1) == '_' && | |
1517 *(cursor + 2) == 't' && | |
1518 *(cursor + 3) == 'y' && | |
1519 *(cursor + 4) == 'p' && | |
1520 *(cursor + 5) == 'e' && | |
1521 *(cursor + 6) == 'o' && | |
1522 *(cursor + 7) == 'f') | |
1523 { | |
1524 token_stream[(int) index++].kind = Token___typeof; | |
1525 return; | |
1526 } | |
1527 break; | |
1528 | |
1529 case 'c': | |
1530 if (*(cursor + 1) == 'o' && | |
1531 *(cursor + 2) == 'n' && | |
1532 *(cursor + 3) == 't' && | |
1533 *(cursor + 4) == 'i' && | |
1534 *(cursor + 5) == 'n' && | |
1535 *(cursor + 6) == 'u' && | |
1536 *(cursor + 7) == 'e') | |
1537 { | |
1538 token_stream[(int) index++].kind = Token_continue; | |
1539 return; | |
1540 } | |
1541 break; | |
1542 | |
1543 case 'e': | |
1544 if (*(cursor + 1) == 'x' && | |
1545 *(cursor + 2) == 'p' && | |
1546 *(cursor + 3) == 'l' && | |
1547 *(cursor + 4) == 'i' && | |
1548 *(cursor + 5) == 'c' && | |
1549 *(cursor + 6) == 'i' && | |
1550 *(cursor + 7) == 't') | |
1551 { | |
1552 token_stream[(int) index++].kind = Token_explicit; | |
1553 return; | |
1554 } | |
1555 break; | |
1556 | |
1557 case 'o': | |
1558 if (*(cursor + 1) == 'p' && | |
1559 *(cursor + 2) == 'e' && | |
1560 *(cursor + 3) == 'r' && | |
1561 *(cursor + 4) == 'a' && | |
1562 *(cursor + 5) == 't' && | |
1563 *(cursor + 6) == 'o' && | |
1564 *(cursor + 7) == 'r') | |
1565 { | |
1566 token_stream[(int) index++].kind = Token_operator; | |
1567 return; | |
1568 } | |
1569 break; | |
1570 | |
1571 case 'Q': | |
1572 if (*(cursor + 1) == '_' && | |
1573 *(cursor + 2) == 'O' && | |
1574 *(cursor + 3) == 'B' && | |
1575 *(cursor + 4) == 'J' && | |
1576 *(cursor + 5) == 'E' && | |
1577 *(cursor + 6) == 'C' && | |
1578 *(cursor + 7) == 'T') | |
1579 { | |
1580 token_stream[(int) index++].kind = Token_Q_OBJECT; | |
1581 return; | |
1582 } | |
1583 break; | |
1584 | |
1585 case 'r': | |
1586 if (*(cursor + 1) == 'e' && | |
1587 *(cursor + 2) == 'g' && | |
1588 *(cursor + 3) == 'i' && | |
1589 *(cursor + 4) == 's' && | |
1590 *(cursor + 5) == 't' && | |
1591 *(cursor + 6) == 'e' && | |
1592 *(cursor + 7) == 'r') | |
1593 { | |
1594 token_stream[(int) index++].kind = Token_register; | |
1595 return; | |
1596 } | |
1597 break; | |
1598 | |
1599 case 'u': | |
1600 if (*(cursor + 1) == 'n' && | |
1601 *(cursor + 2) == 's' && | |
1602 *(cursor + 3) == 'i' && | |
1603 *(cursor + 4) == 'g' && | |
1604 *(cursor + 5) == 'n' && | |
1605 *(cursor + 6) == 'e' && | |
1606 *(cursor + 7) == 'd') | |
1607 { | |
1608 token_stream[(int) index++].kind = Token_unsigned; | |
1609 return; | |
1610 } | |
1611 break; | |
1612 | |
1613 case 't': | |
1614 if (*(cursor + 1) == 'e' && | |
1615 *(cursor + 2) == 'm' && | |
1616 *(cursor + 3) == 'p' && | |
1617 *(cursor + 4) == 'l' && | |
1618 *(cursor + 5) == 'a' && | |
1619 *(cursor + 6) == 't' && | |
1620 *(cursor + 7) == 'e') | |
1621 { | |
1622 token_stream[(int) index++].kind = Token_template; | |
1623 return; | |
1624 } | |
1625 if (*(cursor + 1) == 'y' && | |
1626 *(cursor + 2) == 'p' && | |
1627 *(cursor + 3) == 'e' && | |
1628 *(cursor + 4) == 'n' && | |
1629 *(cursor + 5) == 'a' && | |
1630 *(cursor + 6) == 'm' && | |
1631 *(cursor + 7) == 'e') | |
1632 { | |
1633 token_stream[(int) index++].kind = Token_typename; | |
1634 return; | |
1635 } | |
1636 break; | |
1637 | |
1638 case 'v': | |
1639 if (*(cursor + 1) == 'o' && | |
1640 *(cursor + 2) == 'l' && | |
1641 *(cursor + 3) == 'a' && | |
1642 *(cursor + 4) == 't' && | |
1643 *(cursor + 5) == 'i' && | |
1644 *(cursor + 6) == 'l' && | |
1645 *(cursor + 7) == 'e') | |
1646 { | |
1647 token_stream[(int) index++].kind = Token_volatile; | |
1648 return; | |
1649 } | |
1650 break; | |
1651 | |
1652 } | |
1653 token_stream[(int) index++].kind = Token_identifier; | |
1654 } | |
1655 | |
1656 void Lexer::scanKeyword9() | |
1657 { | |
1658 switch (*cursor) | |
1659 { | |
1660 case 'p': | |
1661 if (*(cursor + 1) == 'r' && | |
1662 *(cursor + 2) == 'o' && | |
1663 *(cursor + 3) == 't' && | |
1664 *(cursor + 4) == 'e' && | |
1665 *(cursor + 5) == 'c' && | |
1666 *(cursor + 6) == 't' && | |
1667 *(cursor + 7) == 'e' && | |
1668 *(cursor + 8) == 'd') | |
1669 { | |
1670 token_stream[(int) index++].kind = Token_protected; | |
1671 return; | |
1672 } | |
1673 break; | |
1674 | |
1675 case 'n': | |
1676 if (*(cursor + 1) == 'a' && | |
1677 *(cursor + 2) == 'm' && | |
1678 *(cursor + 3) == 'e' && | |
1679 *(cursor + 4) == 's' && | |
1680 *(cursor + 5) == 'p' && | |
1681 *(cursor + 6) == 'a' && | |
1682 *(cursor + 7) == 'c' && | |
1683 *(cursor + 8) == 'e') | |
1684 { | |
1685 token_stream[(int) index++].kind = Token_namespace; | |
1686 return; | |
1687 } | |
1688 break; | |
1689 | |
1690 } | |
1691 token_stream[(int) index++].kind = Token_identifier; | |
1692 } | |
1693 | |
1694 void Lexer::scanKeyword10() | |
1695 { | |
1696 switch (*cursor) | |
1697 { | |
1698 case 'c': | |
1699 if (*(cursor + 1) == 'o' && | |
1700 *(cursor + 2) == 'n' && | |
1701 *(cursor + 3) == 's' && | |
1702 *(cursor + 4) == 't' && | |
1703 *(cursor + 5) == '_' && | |
1704 *(cursor + 6) == 'c' && | |
1705 *(cursor + 7) == 'a' && | |
1706 *(cursor + 8) == 's' && | |
1707 *(cursor + 9) == 't') | |
1708 { | |
1709 token_stream[(int) index++].kind = Token_const_cast; | |
1710 return; | |
1711 } | |
1712 break; | |
1713 | |
1714 case 'Q': | |
1715 if (*(cursor + 1) == '_' && | |
1716 *(cursor + 2) == 'P' && | |
1717 *(cursor + 3) == 'R' && | |
1718 *(cursor + 4) == 'O' && | |
1719 *(cursor + 5) == 'P' && | |
1720 *(cursor + 6) == 'E' && | |
1721 *(cursor + 7) == 'R' && | |
1722 *(cursor + 8) == 'T' && | |
1723 *(cursor + 9) == 'Y') | |
1724 { | |
1725 token_stream[(int) index++].kind = Token_Q_PROPERTY; | |
1726 return; | |
1727 } | |
1728 | |
1729 break; | |
1730 } | |
1731 | |
1732 token_stream[(int) index++].kind = Token_identifier; | |
1733 } | |
1734 | |
1735 void Lexer::scanKeyword11() | |
1736 { | |
1737 switch (*cursor) | |
1738 { | |
1739 case 'Q': | |
1740 if (*(cursor + 1) == '_' && | |
1741 *(cursor + 2) == 'I' && | |
1742 *(cursor + 3) == 'N' && | |
1743 *(cursor + 4) == 'V' && | |
1744 *(cursor + 5) == 'O' && | |
1745 *(cursor + 6) == 'K' && | |
1746 *(cursor + 7) == 'A' && | |
1747 *(cursor + 8) == 'B' && | |
1748 *(cursor + 9) == 'L' && | |
1749 *(cursor + 10) == 'E') | |
1750 { | |
1751 token_stream[(int) index++].kind = Token_Q_INVOKABLE; | |
1752 return; | |
1753 } | |
1754 break; | |
1755 | |
1756 case 's': | |
1757 if (*(cursor + 1) == 't' && | |
1758 *(cursor + 2) == 'a' && | |
1759 *(cursor + 3) == 't' && | |
1760 *(cursor + 4) == 'i' && | |
1761 *(cursor + 5) == 'c' && | |
1762 *(cursor + 6) == '_' && | |
1763 *(cursor + 7) == 'c' && | |
1764 *(cursor + 8) == 'a' && | |
1765 *(cursor + 9) == 's' && | |
1766 *(cursor + 10) == 't') | |
1767 { | |
1768 token_stream[(int) index++].kind = Token_static_cast; | |
1769 return; | |
1770 } | |
1771 break; | |
1772 | |
1773 } | |
1774 token_stream[(int) index++].kind = Token_identifier; | |
1775 } | |
1776 | |
1777 void Lexer::scanKeyword12() | |
1778 { | |
1779 switch (*cursor) | |
1780 { | |
1781 case 'd': | |
1782 if (*(cursor + 1) == 'y' && | |
1783 *(cursor + 2) == 'n' && | |
1784 *(cursor + 3) == 'a' && | |
1785 *(cursor + 4) == 'm' && | |
1786 *(cursor + 5) == 'i' && | |
1787 *(cursor + 6) == 'c' && | |
1788 *(cursor + 7) == '_' && | |
1789 *(cursor + 8) == 'c' && | |
1790 *(cursor + 9) == 'a' && | |
1791 *(cursor + 10) == 's' && | |
1792 *(cursor + 11) == 't') | |
1793 { | |
1794 token_stream[(int) index++].kind = Token_dynamic_cast; | |
1795 return; | |
1796 } | |
1797 break; | |
1798 | |
1799 } | |
1800 token_stream[(int) index++].kind = Token_identifier; | |
1801 } | |
1802 | |
1803 void Lexer::scanKeyword13() | |
1804 { | |
1805 switch (*cursor) | |
1806 { | |
1807 case '_': | |
1808 if (*(cursor + 1) == '_' && | |
1809 *(cursor + 2) == 'a' && | |
1810 *(cursor + 3) == 't' && | |
1811 *(cursor + 4) == 't' && | |
1812 *(cursor + 5) == 'r' && | |
1813 *(cursor + 6) == 'i' && | |
1814 *(cursor + 7) == 'b' && | |
1815 *(cursor + 8) == 'u' && | |
1816 *(cursor + 9) == 't' && | |
1817 *(cursor + 10) == 'e' && | |
1818 *(cursor + 11) == '_' && | |
1819 *(cursor + 12) == '_') | |
1820 { | |
1821 token_stream[(int) index++].kind = Token___attribute__; | |
1822 return; | |
1823 } | |
1824 break; | |
1825 } | |
1826 token_stream[(int) index++].kind = Token_identifier; | |
1827 } | |
1828 | |
1829 void Lexer::scanKeyword14() | |
1830 { | |
1831 switch (*cursor) | |
1832 { | |
1833 case 'k': | |
1834 if (*(cursor + 1) == '_' && | |
1835 *(cursor + 2) == 'd' && | |
1836 *(cursor + 3) == 'c' && | |
1837 *(cursor + 4) == 'o' && | |
1838 *(cursor + 5) == 'p' && | |
1839 *(cursor + 6) == '_' && | |
1840 *(cursor + 7) == 's' && | |
1841 *(cursor + 8) == 'i' && | |
1842 *(cursor + 9) == 'g' && | |
1843 *(cursor + 10) == 'n' && | |
1844 *(cursor + 11) == 'a' && | |
1845 *(cursor + 12) == 'l' && | |
1846 *(cursor + 13) == 's') | |
1847 { | |
1848 token_stream[(int) index++].kind = Token_k_dcop_signals; | |
1849 return; | |
1850 } | |
1851 break; | |
1852 } | |
1853 token_stream[(int) index++].kind = Token_identifier; | |
1854 } | |
1855 | |
1856 void Lexer::scanKeyword16() | |
1857 { | |
1858 switch (*cursor) | |
1859 { | |
1860 case 'r': | |
1861 if (*(cursor + 1) == 'e' && | |
1862 *(cursor + 2) == 'i' && | |
1863 *(cursor + 3) == 'n' && | |
1864 *(cursor + 4) == 't' && | |
1865 *(cursor + 5) == 'e' && | |
1866 *(cursor + 6) == 'r' && | |
1867 *(cursor + 7) == 'p' && | |
1868 *(cursor + 8) == 'r' && | |
1869 *(cursor + 9) == 'e' && | |
1870 *(cursor + 10) == 't' && | |
1871 *(cursor + 11) == '_' && | |
1872 *(cursor + 12) == 'c' && | |
1873 *(cursor + 13) == 'a' && | |
1874 *(cursor + 14) == 's' && | |
1875 *(cursor + 15) == 't') | |
1876 { | |
1877 token_stream[(int) index++].kind = Token_reinterpret_cast; | |
1878 return; | |
1879 } | |
1880 break; | |
1881 } | |
1882 | |
1883 token_stream[(int) index++].kind = Token_identifier; | |
1884 } | |
1885 | |
1886 // kate: space-indent on; indent-width 2; replace-tabs on; |