Mercurial > projects > qtd
annotate generator/parser/lexer.cpp @ 337:5896535a03cd
moved enums back to classes
author | maxter <spambox@d-coding.com> |
---|---|
date | Sat, 13 Mar 2010 00:38:42 +0200 |
parents | 09a0f1d048f2 |
children |
rev | line source |
---|---|
1 | 1 /**************************************************************************** |
2 ** | |
52
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
3 ** Copyright (C) 1992-2009 Nokia. All rights reserved. |
1 | 4 ** Copyright (C) 2002-2005 Roberto Raggi <roberto@kdevelop.org> |
5 ** | |
6 ** This file is part of Qt Jambi. | |
7 ** | |
52
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
8 ** Commercial Usage |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
9 Licensees holding valid Qt Commercial licenses may use this file in |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
10 accordance with the Qt Commercial License Agreement provided with the |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
11 Software or, alternatively, in accordance with the terms contained in |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
12 a written agreement between you and Nokia. |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
13 |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
14 GNU Lesser General Public License Usage |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
15 Alternatively, this file may be used under the terms of the GNU Lesser |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
16 General Public License version 2.1 as published by the Free Software |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
17 Foundation and appearing in the file LICENSE.LGPL included in the |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
18 packaging of this file. Please review the following information to |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
19 ensure the GNU Lesser General Public License version 2.1 requirements |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
20 will be met: http://www.gnu.org/licenses/old-licenses/lgpl-2.1.html. |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
21 |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
22 In addition, as a special exception, Nokia gives you certain |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
23 additional rights. These rights are described in the Nokia Qt LGPL |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
24 Exception version 1.0, included in the file LGPL_EXCEPTION.txt in this |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
25 package. |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
26 |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
27 GNU General Public License Usage |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
28 Alternatively, this file may be used under the terms of the GNU |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
29 General Public License version 3.0 as published by the Free Software |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
30 Foundation and appearing in the file LICENSE.GPL included in the |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
31 packaging of this file. Please review the following information to |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
32 ensure the GNU General Public License version 3.0 requirements will be |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
33 met: http://www.gnu.org/copyleft/gpl.html. |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
34 |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
35 If you are unsure which license is appropriate for your use, please |
09a0f1d048f2
update parser to that from jambi 4.5, attemt to fix building with gcc 4.4
eldar
parents:
1
diff
changeset
|
36 contact the sales department at qt-sales@nokia.com. |
1 | 37 |
38 ** | |
39 ** This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE | |
40 ** WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. | |
41 ** | |
42 ****************************************************************************/ | |
43 | |
44 | |
45 #include "lexer.h" | |
46 #include "tokens.h" | |
47 #include "control.h" | |
48 | |
49 #include <cctype> | |
50 #include <iostream> | |
51 | |
52 scan_fun_ptr Lexer::s_scan_keyword_table[] = { | |
53 &Lexer::scanKeyword0, &Lexer::scanKeyword0, | |
54 &Lexer::scanKeyword2, &Lexer::scanKeyword3, | |
55 &Lexer::scanKeyword4, &Lexer::scanKeyword5, | |
56 &Lexer::scanKeyword6, &Lexer::scanKeyword7, | |
57 &Lexer::scanKeyword8, &Lexer::scanKeyword9, | |
58 &Lexer::scanKeyword10, &Lexer::scanKeyword11, | |
59 &Lexer::scanKeyword12, &Lexer::scanKeyword13, | |
60 &Lexer::scanKeyword14, &Lexer::scanKeyword0, | |
61 &Lexer::scanKeyword16 | |
62 }; | |
63 | |
64 void LocationManager::extract_line(int offset, int *line, QString *filename) const | |
65 { | |
66 *line = 0; | |
67 if (token_stream.size () < 1) | |
68 return; | |
69 | |
70 const unsigned char *begin_buffer = reinterpret_cast<const unsigned char *>(token_stream[0].text); | |
71 const unsigned char *cursor = begin_buffer + offset; | |
72 | |
73 ++cursor; // skip '#' | |
74 if (std::isspace(*cursor) && std::isdigit(*(cursor + 1))) | |
75 { | |
76 ++cursor; | |
77 char buffer[1024], *cp = buffer; | |
78 do { | |
79 *cp++ = *cursor++; | |
80 } while (std::isdigit(*cursor)); | |
81 *cp = '\0'; | |
82 int l = strtol(buffer, 0, 0); | |
83 | |
84 Q_ASSERT(std::isspace(*cursor)); | |
85 ++cursor; | |
86 | |
87 Q_ASSERT(*cursor == '"'); | |
88 ++cursor; | |
89 | |
90 cp = buffer; | |
91 while (*cursor && *cursor != '"') { | |
92 *cp++ = *cursor++; | |
93 } | |
94 *cp = '\0'; | |
95 Q_ASSERT(*cursor == '"'); | |
96 ++cursor; | |
97 | |
98 *filename = buffer; | |
99 *line = l; | |
100 // printf("filename: %s line: %d\n", buffer, line); | |
101 } | |
102 } | |
103 | |
104 void LocationManager::positionAt(std::size_t offset, int *line, int *column, | |
105 QString *filename) const | |
106 { | |
107 int ppline, ppcolumn; | |
108 line_table.positionAt(offset, &ppline, &ppcolumn); | |
109 | |
110 int base_line; | |
111 extract_line((int) line_table[ppline-1], &base_line, filename); | |
112 | |
113 int line2, column2; | |
114 location_table.positionAt((int) line_table[ppline-1], &line2, &column2); | |
115 | |
116 location_table.positionAt(offset, line, column); | |
117 *line = base_line + *line - line2 - 1; | |
118 } | |
119 | |
120 scan_fun_ptr Lexer::s_scan_table[256]; | |
121 bool Lexer::s_initialized = false; | |
122 | |
123 void Lexer::tokenize(const char *contents, std::size_t size) | |
124 { | |
125 if (!s_initialized) | |
126 initialize_scan_table(); | |
127 | |
128 token_stream.resize(1024); | |
129 token_stream[0].kind = Token_EOF; | |
130 token_stream[0].text = contents; | |
131 | |
132 index = 1; | |
133 | |
134 cursor = (const unsigned char *) contents; | |
135 begin_buffer = (const unsigned char *) contents; | |
136 end_buffer = cursor + size; | |
137 | |
138 location_table.resize(1024); | |
139 location_table[0] = 0; | |
140 location_table.current_line = 1; | |
141 | |
142 line_table.resize(1024); | |
143 line_table[0] = 0; | |
144 line_table.current_line = 1; | |
145 | |
146 do { | |
147 if (index == token_stream.size()) | |
148 token_stream.resize(token_stream.size() * 2); | |
149 | |
150 Token *current_token = &token_stream[(int) index]; | |
151 current_token->text = reinterpret_cast<const char*>(begin_buffer); | |
152 current_token->position = cursor - begin_buffer; | |
153 (this->*s_scan_table[*cursor])(); | |
154 current_token->size = cursor - begin_buffer - current_token->position; | |
155 } while (cursor < end_buffer); | |
156 | |
157 if (index == token_stream.size()) | |
158 token_stream.resize(token_stream.size() * 2); | |
159 | |
160 Q_ASSERT(index < token_stream.size()); | |
161 token_stream[(int) index].position = cursor - begin_buffer; | |
162 token_stream[(int) index].kind = Token_EOF; | |
163 } | |
164 | |
165 void Lexer::reportError(const QString& msg) | |
166 { | |
167 int line, column; | |
168 QString fileName; | |
169 | |
170 std::size_t tok = token_stream.cursor(); | |
171 _M_location.positionAt(token_stream.position(tok), | |
172 &line, &column, &fileName); | |
173 | |
174 Control::ErrorMessage errmsg; | |
175 errmsg.setLine(line + 1); | |
176 errmsg.setColumn(column); | |
177 errmsg.setFileName(fileName); | |
178 errmsg.setMessage(QLatin1String("** LEXER ERROR ") + msg); | |
179 control->reportError(errmsg); | |
180 } | |
181 | |
182 void Lexer::initialize_scan_table() | |
183 { | |
184 s_initialized = true; | |
185 | |
186 for (int i=0; i<256; ++i) | |
187 { | |
188 if (isspace(i)) | |
189 s_scan_table[i] = &Lexer::scan_white_spaces; | |
190 else if (isalpha(i) || i == '_') | |
191 s_scan_table[i] = &Lexer::scan_identifier_or_keyword; | |
192 else if (isdigit(i)) | |
193 s_scan_table[i] = &Lexer::scan_int_constant; | |
194 else | |
195 s_scan_table[i] = &Lexer::scan_invalid_input; | |
196 } | |
197 | |
198 s_scan_table[int('L')] = &Lexer::scan_identifier_or_literal; | |
199 s_scan_table[int('\n')] = &Lexer::scan_newline; | |
200 s_scan_table[int('#')] = &Lexer::scan_preprocessor; | |
201 | |
202 s_scan_table[int('\'')] = &Lexer::scan_char_constant; | |
203 s_scan_table[int('"')] = &Lexer::scan_string_constant; | |
204 | |
205 s_scan_table[int('.')] = &Lexer::scan_int_constant; | |
206 | |
207 s_scan_table[int('!')] = &Lexer::scan_not; | |
208 s_scan_table[int('%')] = &Lexer::scan_remainder; | |
209 s_scan_table[int('&')] = &Lexer::scan_and; | |
210 s_scan_table[int('(')] = &Lexer::scan_left_paren; | |
211 s_scan_table[int(')')] = &Lexer::scan_right_paren; | |
212 s_scan_table[int('*')] = &Lexer::scan_star; | |
213 s_scan_table[int('+')] = &Lexer::scan_plus; | |
214 s_scan_table[int(',')] = &Lexer::scan_comma; | |
215 s_scan_table[int('-')] = &Lexer::scan_minus; | |
216 s_scan_table[int('/')] = &Lexer::scan_divide; | |
217 s_scan_table[int(':')] = &Lexer::scan_colon; | |
218 s_scan_table[int(';')] = &Lexer::scan_semicolon; | |
219 s_scan_table[int('<')] = &Lexer::scan_less; | |
220 s_scan_table[int('=')] = &Lexer::scan_equal; | |
221 s_scan_table[int('>')] = &Lexer::scan_greater; | |
222 s_scan_table[int('?')] = &Lexer::scan_question; | |
223 s_scan_table[int('[')] = &Lexer::scan_left_bracket; | |
224 s_scan_table[int(']')] = &Lexer::scan_right_bracket; | |
225 s_scan_table[int('^')] = &Lexer::scan_xor; | |
226 s_scan_table[int('{')] = &Lexer::scan_left_brace; | |
227 s_scan_table[int('|')] = &Lexer::scan_or; | |
228 s_scan_table[int('}')] = &Lexer::scan_right_brace; | |
229 s_scan_table[int('~')] = &Lexer::scan_tilde; | |
230 | |
231 s_scan_table[0] = &Lexer::scan_EOF; | |
232 } | |
233 | |
234 void Lexer::scan_preprocessor() | |
235 { | |
236 if (line_table.current_line == line_table.size()) | |
237 line_table.resize(line_table.current_line * 2); | |
238 | |
239 line_table[(int) line_table.current_line++] = (cursor - begin_buffer); | |
240 | |
241 while (*cursor && *cursor != '\n') | |
242 ++cursor; | |
243 | |
244 if (*cursor != '\n') | |
245 reportError("expected newline"); | |
246 } | |
247 | |
248 void Lexer::scan_char_constant() | |
249 { | |
250 const unsigned char *begin = cursor; | |
251 | |
252 ++cursor; | |
253 while (*cursor && *cursor != '\'') | |
254 { | |
255 if (*cursor == '\n') | |
256 reportError("did not expect newline"); | |
257 | |
258 if (*cursor == '\\') | |
259 ++cursor; | |
260 ++cursor; | |
261 } | |
262 | |
263 if (*cursor != '\'') | |
264 reportError("expected \'"); | |
265 | |
266 ++cursor; | |
267 | |
268 token_stream[(int) index].extra.symbol = | |
269 control->findOrInsertName((const char*) begin, cursor - begin); | |
270 | |
271 token_stream[(int) index++].kind = Token_char_literal; | |
272 } | |
273 | |
274 void Lexer::scan_string_constant() | |
275 { | |
276 const unsigned char *begin = cursor; | |
277 | |
278 ++cursor; | |
279 while (*cursor && *cursor != '"') | |
280 { | |
281 if (*cursor == '\n') | |
282 reportError("did not expect newline"); | |
283 | |
284 if (*cursor == '\\') | |
285 ++cursor; | |
286 ++cursor; | |
287 } | |
288 | |
289 if (*cursor != '"') | |
290 reportError("expected \""); | |
291 | |
292 ++cursor; | |
293 | |
294 token_stream[(int) index].extra.symbol = | |
295 control->findOrInsertName((const char*) begin, cursor - begin); | |
296 | |
297 token_stream[(int) index++].kind = Token_string_literal; | |
298 } | |
299 | |
300 void Lexer::scan_newline() | |
301 { | |
302 if (location_table.current_line == location_table.size()) | |
303 location_table.resize(location_table.current_line * 2); | |
304 | |
305 location_table[(int) location_table.current_line++] = (cursor - begin_buffer); | |
306 ++cursor; | |
307 } | |
308 | |
309 void Lexer::scan_white_spaces() | |
310 { | |
311 while (isspace(*cursor)) | |
312 { | |
313 if (*cursor == '\n') | |
314 scan_newline(); | |
315 else | |
316 ++cursor; | |
317 } | |
318 } | |
319 | |
320 void Lexer::scan_identifier_or_literal() | |
321 { | |
322 switch (*(cursor + 1)) | |
323 { | |
324 case '\'': | |
325 ++cursor; | |
326 scan_char_constant(); | |
327 break; | |
328 | |
329 case '\"': | |
330 ++cursor; | |
331 scan_string_constant(); | |
332 break; | |
333 | |
334 default: | |
335 scan_identifier_or_keyword(); | |
336 break; | |
337 } | |
338 } | |
339 | |
340 void Lexer::scan_identifier_or_keyword() | |
341 { | |
342 const unsigned char *skip = cursor; | |
343 while (isalnum(*skip) || *skip== '_') | |
344 ++skip; | |
345 | |
346 int n = skip - cursor; | |
347 Token *current_token = &token_stream[(int) index]; | |
348 (this->*s_scan_keyword_table[n < 17 ? n : 0])(); | |
349 | |
350 if (current_token->kind == Token_identifier) | |
351 { | |
352 current_token->extra.symbol = | |
353 control->findOrInsertName((const char*) cursor, n); | |
354 } | |
355 | |
356 cursor = skip; | |
357 } | |
358 | |
359 void Lexer::scan_int_constant() | |
360 { | |
361 if (*cursor == '.' && !std::isdigit(*(cursor + 1))) | |
362 { | |
363 scan_dot(); | |
364 return; | |
365 } | |
366 | |
367 const unsigned char *begin = cursor; | |
368 | |
369 while (isalnum(*cursor) || *cursor == '.') | |
370 ++cursor; | |
371 | |
372 token_stream[(int) index].extra.symbol = | |
373 control->findOrInsertName((const char*) begin, cursor - begin); | |
374 | |
375 token_stream[(int) index++].kind = Token_number_literal; | |
376 } | |
377 | |
378 void Lexer::scan_not() | |
379 { | |
380 /* | |
381 '!' ::= not | |
382 '!=' ::= not_equal | |
383 */ | |
384 | |
385 ++cursor; | |
386 | |
387 if (*cursor == '=') | |
388 { | |
389 ++cursor; | |
390 token_stream[(int) index++].kind = Token_not_eq; | |
391 } | |
392 else | |
393 { | |
394 token_stream[(int) index++].kind = '!'; | |
395 } | |
396 } | |
397 | |
398 void Lexer::scan_remainder() | |
399 { | |
400 /* | |
401 '%' ::= remainder | |
402 '%=' ::= remainder_equal | |
403 */ | |
404 | |
405 ++cursor; | |
406 | |
407 if (*cursor == '=') | |
408 { | |
409 ++cursor; | |
410 token_stream[(int) index++].kind = Token_assign; | |
411 } | |
412 else | |
413 { | |
414 token_stream[(int) index++].kind = '%'; | |
415 } | |
416 } | |
417 | |
418 void Lexer::scan_and() | |
419 { | |
420 /* | |
421 '&&' ::= and_and | |
422 '&' ::= and | |
423 '&=' ::= and_equal | |
424 */ | |
425 | |
426 ++cursor; | |
427 if (*cursor == '=') | |
428 { | |
429 ++cursor; | |
430 token_stream[(int) index++].kind = Token_assign; | |
431 } | |
432 else if (*cursor == '&') | |
433 { | |
434 ++cursor; | |
435 token_stream[(int) index++].kind = Token_and; | |
436 } | |
437 else | |
438 { | |
439 token_stream[(int) index++].kind = '&'; | |
440 } | |
441 } | |
442 | |
443 void Lexer::scan_left_paren() | |
444 { | |
445 ++cursor; | |
446 token_stream[(int) index++].kind = '('; | |
447 } | |
448 | |
449 void Lexer::scan_right_paren() | |
450 { | |
451 ++cursor; | |
452 token_stream[(int) index++].kind = ')'; | |
453 } | |
454 | |
455 void Lexer::scan_star() | |
456 { | |
457 /* | |
458 '*' ::= star | |
459 '*=' ::= star_equal | |
460 */ | |
461 | |
462 ++cursor; | |
463 | |
464 if (*cursor == '=') | |
465 { | |
466 ++cursor; | |
467 token_stream[(int) index++].kind = Token_assign; | |
468 } | |
469 else | |
470 { | |
471 token_stream[(int) index++].kind = '*'; | |
472 } | |
473 } | |
474 | |
475 void Lexer::scan_plus() | |
476 { | |
477 /* | |
478 '+' ::= plus | |
479 '++' ::= incr | |
480 '+=' ::= plus_equal | |
481 */ | |
482 | |
483 ++cursor; | |
484 if (*cursor == '=') | |
485 { | |
486 ++cursor; | |
487 token_stream[(int) index++].kind = Token_assign; | |
488 } | |
489 else if (*cursor == '+') | |
490 { | |
491 ++cursor; | |
492 token_stream[(int) index++].kind = Token_incr; | |
493 } | |
494 else | |
495 { | |
496 token_stream[(int) index++].kind = '+'; | |
497 } | |
498 } | |
499 | |
500 void Lexer::scan_comma() | |
501 { | |
502 ++cursor; | |
503 token_stream[(int) index++].kind = ','; | |
504 } | |
505 | |
506 void Lexer::scan_minus() | |
507 { | |
508 /* | |
509 '-' ::= minus | |
510 '--' ::= decr | |
511 '-=' ::= minus_equal | |
512 '->' ::= left_arrow | |
513 */ | |
514 | |
515 ++cursor; | |
516 if (*cursor == '=') | |
517 { | |
518 ++cursor; | |
519 token_stream[(int) index++].kind = Token_assign; | |
520 } | |
521 else if (*cursor == '-') | |
522 { | |
523 ++cursor; | |
524 token_stream[(int) index++].kind = Token_decr; | |
525 } | |
526 else if (*cursor == '>') | |
527 { | |
528 ++cursor; | |
529 token_stream[(int) index++].kind = Token_arrow; | |
530 if (*cursor == '*') | |
531 { | |
532 ++cursor; | |
533 token_stream[(int) index++].kind = Token_ptrmem; | |
534 } | |
535 } | |
536 else | |
537 { | |
538 token_stream[(int) index++].kind = '-'; | |
539 } | |
540 } | |
541 | |
542 void Lexer::scan_dot() | |
543 { | |
544 /* | |
545 '.' ::= dot | |
546 '...' ::= ellipsis | |
547 */ | |
548 | |
549 ++cursor; | |
550 if (*cursor == '.' && *(cursor + 1) == '.') | |
551 { | |
552 cursor += 2; | |
553 token_stream[(int) index++].kind = Token_ellipsis; | |
554 } | |
555 else if (*cursor == '.' && *(cursor + 1) == '*') | |
556 { | |
557 cursor += 2; | |
558 token_stream[(int) index++].kind = Token_ptrmem; | |
559 } | |
560 else | |
561 token_stream[(int) index++].kind = '.'; | |
562 } | |
563 | |
564 void Lexer::scan_divide() | |
565 { | |
566 /* | |
567 '/' ::= divide | |
568 '/=' ::= divide_equal | |
569 */ | |
570 | |
571 ++cursor; | |
572 | |
573 if (*cursor == '=') | |
574 { | |
575 ++cursor; | |
576 token_stream[(int) index++].kind = Token_assign; | |
577 } | |
578 else | |
579 { | |
580 token_stream[(int) index++].kind = '/'; | |
581 } | |
582 } | |
583 | |
584 void Lexer::scan_colon() | |
585 { | |
586 ++cursor; | |
587 if (*cursor == ':') | |
588 { | |
589 ++cursor; | |
590 token_stream[(int) index++].kind = Token_scope; | |
591 } | |
592 else | |
593 { | |
594 token_stream[(int) index++].kind = ':'; | |
595 } | |
596 } | |
597 | |
598 void Lexer::scan_semicolon() | |
599 { | |
600 ++cursor; | |
601 token_stream[(int) index++].kind = ';'; | |
602 } | |
603 | |
604 void Lexer::scan_less() | |
605 { | |
606 /* | |
607 '<' ::= less | |
608 '<<' ::= left_shift | |
609 '<<=' ::= left_shift_equal | |
610 '<=' ::= less_equal | |
611 */ | |
612 | |
613 ++cursor; | |
614 if (*cursor == '=') | |
615 { | |
616 ++cursor; | |
617 token_stream[(int) index++].kind = Token_leq; | |
618 } | |
619 else if (*cursor == '<') | |
620 { | |
621 ++cursor; | |
622 if (*cursor == '=') | |
623 { | |
624 ++cursor; | |
625 token_stream[(int) index++].kind = Token_assign; | |
626 } | |
627 else | |
628 { | |
629 token_stream[(int) index++].kind = Token_shift; | |
630 } | |
631 } | |
632 else | |
633 { | |
634 token_stream[(int) index++].kind = '<'; | |
635 } | |
636 } | |
637 | |
638 void Lexer::scan_equal() | |
639 { | |
640 /* | |
641 '=' ::= equal | |
642 '==' ::= equal_equal | |
643 */ | |
644 ++cursor; | |
645 | |
646 if (*cursor == '=') | |
647 { | |
648 ++cursor; | |
649 token_stream[(int) index++].kind = Token_eq; | |
650 } | |
651 else | |
652 { | |
653 token_stream[(int) index++].kind = '='; | |
654 } | |
655 } | |
656 | |
657 void Lexer::scan_greater() | |
658 { | |
659 /* | |
660 '>' ::= greater | |
661 '>=' ::= greater_equal | |
662 '>>' ::= right_shift | |
663 '>>=' ::= right_shift_equal | |
664 */ | |
665 | |
666 ++cursor; | |
667 if (*cursor == '=') | |
668 { | |
669 ++cursor; | |
670 token_stream[(int) index++].kind = Token_geq; | |
671 } | |
672 else if (*cursor == '>') | |
673 { | |
674 ++cursor; | |
675 if (*cursor == '=') | |
676 { | |
677 ++cursor; | |
678 token_stream[(int) index++].kind = Token_assign; | |
679 } | |
680 else | |
681 { | |
682 token_stream[(int) index++].kind = Token_shift; | |
683 } | |
684 } | |
685 else | |
686 { | |
687 token_stream[(int) index++].kind = '>'; | |
688 } | |
689 } | |
690 | |
691 void Lexer::scan_question() | |
692 { | |
693 ++cursor; | |
694 token_stream[(int) index++].kind = '?'; | |
695 } | |
696 | |
697 void Lexer::scan_left_bracket() | |
698 { | |
699 ++cursor; | |
700 token_stream[(int) index++].kind = '['; | |
701 } | |
702 | |
703 void Lexer::scan_right_bracket() | |
704 { | |
705 ++cursor; | |
706 token_stream[(int) index++].kind = ']'; | |
707 } | |
708 | |
709 void Lexer::scan_xor() | |
710 { | |
711 /* | |
712 '^' ::= xor | |
713 '^=' ::= xor_equal | |
714 */ | |
715 ++cursor; | |
716 | |
717 if (*cursor == '=') | |
718 { | |
719 ++cursor; | |
720 token_stream[(int) index++].kind = Token_assign; | |
721 } | |
722 else | |
723 { | |
724 token_stream[(int) index++].kind = '^'; | |
725 } | |
726 } | |
727 | |
728 void Lexer::scan_left_brace() | |
729 { | |
730 ++cursor; | |
731 token_stream[(int) index++].kind = '{'; | |
732 } | |
733 | |
734 void Lexer::scan_or() | |
735 { | |
736 /* | |
737 '|' ::= or | |
738 '|=' ::= or_equal | |
739 '||' ::= or_or | |
740 */ | |
741 ++cursor; | |
742 if (*cursor == '=') | |
743 { | |
744 ++cursor; | |
745 token_stream[(int) index++].kind = Token_assign; | |
746 } | |
747 else if (*cursor == '|') | |
748 { | |
749 ++cursor; | |
750 token_stream[(int) index++].kind = Token_or; | |
751 } | |
752 else | |
753 { | |
754 token_stream[(int) index++].kind = '|'; | |
755 } | |
756 } | |
757 | |
758 void Lexer::scan_right_brace() | |
759 { | |
760 ++cursor; | |
761 token_stream[(int) index++].kind = '}'; | |
762 } | |
763 | |
764 void Lexer::scan_tilde() | |
765 { | |
766 ++cursor; | |
767 token_stream[(int) index++].kind = '~'; | |
768 } | |
769 | |
770 void Lexer::scan_EOF() | |
771 { | |
772 ++cursor; | |
773 token_stream[(int) index++].kind = Token_EOF; | |
774 } | |
775 | |
776 void Lexer::scan_invalid_input() | |
777 { | |
778 QString errmsg("invalid input: %1"); | |
779 errmsg.arg(int(*cursor)); | |
780 reportError(errmsg); | |
781 ++cursor; | |
782 } | |
783 | |
784 void LocationTable::positionAt(std::size_t offset, int max_line, | |
785 int *line, int *column) const | |
786 { | |
787 if (!(line && column && max_line != 0)) | |
788 return; | |
789 | |
790 int first = 0; | |
791 int len = max_line; | |
792 int half; | |
793 int middle; | |
794 | |
795 while (len > 0) | |
796 { | |
797 half = len >> 1; | |
798 middle = first; | |
799 | |
800 middle += half; | |
801 | |
802 if (lines[middle] < offset) | |
803 { | |
804 first = middle; | |
805 ++first; | |
806 len = len - half - 1; | |
807 } | |
808 else | |
809 len = half; | |
810 } | |
811 | |
812 *line = std::max(first, 1); | |
813 *column = (int) (offset - lines[*line - 1] - 1); | |
814 | |
815 if (*column < 0) | |
816 { | |
817 *column = 0; | |
818 } | |
819 } | |
820 | |
821 void Lexer::scanKeyword0() | |
822 { | |
823 token_stream[(int) index++].kind = Token_identifier; | |
824 } | |
825 | |
826 void Lexer::scanKeyword2() | |
827 { | |
828 switch (*cursor) | |
829 { | |
830 case 'i': | |
831 if (*(cursor + 1) == 'f') | |
832 { | |
833 token_stream[(int) index++].kind = Token_if; | |
834 return; | |
835 } | |
836 break; | |
837 | |
838 case 'd': | |
839 if (*(cursor + 1) == 'o') | |
840 { | |
841 token_stream[(int) index++].kind = Token_do; | |
842 return; | |
843 } | |
844 break; | |
845 | |
846 case 'o': | |
847 if (*(cursor + 1) == 'r') | |
848 { | |
849 token_stream[(int) index++].kind = Token_or; | |
850 return; | |
851 } | |
852 break; | |
853 | |
854 } | |
855 token_stream[(int) index++].kind = Token_identifier; | |
856 } | |
857 | |
858 void Lexer::scanKeyword3() | |
859 { | |
860 switch (*cursor) | |
861 { | |
862 case 'a': | |
863 if (*(cursor + 1) == 'n' && | |
864 *(cursor + 2) == 'd') | |
865 { | |
866 token_stream[(int) index++].kind = Token_and; | |
867 return; | |
868 } | |
869 if (*(cursor + 1) == 's' && | |
870 *(cursor + 2) == 'm') | |
871 { | |
872 token_stream[(int) index++].kind = Token_asm; | |
873 return; | |
874 } | |
875 break; | |
876 | |
877 case 'f': | |
878 if (*(cursor + 1) == 'o' && | |
879 *(cursor + 2) == 'r') | |
880 { | |
881 token_stream[(int) index++].kind = Token_for; | |
882 return; | |
883 } | |
884 break; | |
885 | |
886 case 'i': | |
887 if (*(cursor + 1) == 'n' && | |
888 *(cursor + 2) == 't') | |
889 { | |
890 token_stream[(int) index++].kind = Token_int; | |
891 return; | |
892 } | |
893 break; | |
894 | |
895 case 'n': | |
896 if (*(cursor + 1) == 'e' && | |
897 *(cursor + 2) == 'w') | |
898 { | |
899 token_stream[(int) index++].kind = Token_new; | |
900 return; | |
901 } | |
902 if (*(cursor + 1) == 'o' && | |
903 *(cursor + 2) == 't') | |
904 { | |
905 token_stream[(int) index++].kind = Token_not; | |
906 return; | |
907 } | |
908 break; | |
909 | |
910 case 't': | |
911 if (*(cursor + 1) == 'r' && | |
912 *(cursor + 2) == 'y') | |
913 { | |
914 token_stream[(int) index++].kind = Token_try; | |
915 return; | |
916 } | |
917 break; | |
918 | |
919 case 'x': | |
920 if (*(cursor + 1) == 'o' && | |
921 *(cursor + 2) == 'r') | |
922 { | |
923 token_stream[(int) index++].kind = Token_xor; | |
924 return; | |
925 } | |
926 break; | |
927 | |
928 } | |
929 token_stream[(int) index++].kind = Token_identifier; | |
930 } | |
931 | |
932 void Lexer::scanKeyword4() | |
933 { | |
934 switch (*cursor) | |
935 { | |
936 case 'a': | |
937 if (*(cursor + 1) == 'u' && | |
938 *(cursor + 2) == 't' && | |
939 *(cursor + 3) == 'o') | |
940 { | |
941 token_stream[(int) index++].kind = Token_auto; | |
942 return; | |
943 } | |
944 break; | |
945 | |
946 case 'c': | |
947 if (*(cursor + 1) == 'a' && | |
948 *(cursor + 2) == 's' && | |
949 *(cursor + 3) == 'e') | |
950 { | |
951 token_stream[(int) index++].kind = Token_case; | |
952 return; | |
953 } | |
954 if (*(cursor + 1) == 'h' && | |
955 *(cursor + 2) == 'a' && | |
956 *(cursor + 3) == 'r') | |
957 { | |
958 token_stream[(int) index++].kind = Token_char; | |
959 return; | |
960 } | |
961 break; | |
962 | |
963 case 'b': | |
964 if (*(cursor + 1) == 'o' && | |
965 *(cursor + 2) == 'o' && | |
966 *(cursor + 3) == 'l') | |
967 { | |
968 token_stream[(int) index++].kind = Token_bool; | |
969 return; | |
970 } | |
971 break; | |
972 | |
973 case 'e': | |
974 if (*(cursor + 1) == 'l' && | |
975 *(cursor + 2) == 's' && | |
976 *(cursor + 3) == 'e') | |
977 { | |
978 token_stream[(int) index++].kind = Token_else; | |
979 return; | |
980 } | |
981 if (*(cursor + 1) == 'm' && | |
982 *(cursor + 2) == 'i' && | |
983 *(cursor + 3) == 't') | |
984 { | |
985 token_stream[(int) index++].kind = Token_emit; | |
986 return; | |
987 } | |
988 if (*(cursor + 1) == 'n' && | |
989 *(cursor + 2) == 'u' && | |
990 *(cursor + 3) == 'm') | |
991 { | |
992 token_stream[(int) index++].kind = Token_enum; | |
993 return; | |
994 } | |
995 break; | |
996 | |
997 case 'g': | |
998 if (*(cursor + 1) == 'o' && | |
999 *(cursor + 2) == 't' && | |
1000 *(cursor + 3) == 'o') | |
1001 { | |
1002 token_stream[(int) index++].kind = Token_goto; | |
1003 return; | |
1004 } | |
1005 break; | |
1006 | |
1007 case 'l': | |
1008 if (*(cursor + 1) == 'o' && | |
1009 *(cursor + 2) == 'n' && | |
1010 *(cursor + 3) == 'g') | |
1011 { | |
1012 token_stream[(int) index++].kind = Token_long; | |
1013 return; | |
1014 } | |
1015 break; | |
1016 | |
1017 case 't': | |
1018 if (*(cursor + 1) == 'h' && | |
1019 *(cursor + 2) == 'i' && | |
1020 *(cursor + 3) == 's') | |
1021 { | |
1022 token_stream[(int) index++].kind = Token_this; | |
1023 return; | |
1024 } | |
1025 break; | |
1026 | |
1027 case 'v': | |
1028 if (*(cursor + 1) == 'o' && | |
1029 *(cursor + 2) == 'i' && | |
1030 *(cursor + 3) == 'd') | |
1031 { | |
1032 token_stream[(int) index++].kind = Token_void; | |
1033 return; | |
1034 } | |
1035 break; | |
1036 | |
1037 } | |
1038 token_stream[(int) index++].kind = Token_identifier; | |
1039 } | |
1040 | |
1041 void Lexer::scanKeyword5() | |
1042 { | |
1043 switch (*cursor) | |
1044 { | |
1045 case 'c': | |
1046 if (*(cursor + 1) == 'a' && | |
1047 *(cursor + 2) == 't' && | |
1048 *(cursor + 3) == 'c' && | |
1049 *(cursor + 4) == 'h') | |
1050 { | |
1051 token_stream[(int) index++].kind = Token_catch; | |
1052 return; | |
1053 } | |
1054 if (*(cursor + 1) == 'l' && | |
1055 *(cursor + 2) == 'a' && | |
1056 *(cursor + 3) == 's' && | |
1057 *(cursor + 4) == 's') | |
1058 { | |
1059 token_stream[(int) index++].kind = Token_class; | |
1060 return; | |
1061 } | |
1062 if (*(cursor + 1) == 'o' && | |
1063 *(cursor + 2) == 'm' && | |
1064 *(cursor + 3) == 'p' && | |
1065 *(cursor + 4) == 'l') | |
1066 { | |
1067 token_stream[(int) index++].kind = Token_compl; | |
1068 return; | |
1069 } | |
1070 if (*(cursor + 1) == 'o' && | |
1071 *(cursor + 2) == 'n' && | |
1072 *(cursor + 3) == 's' && | |
1073 *(cursor + 4) == 't') | |
1074 { | |
1075 token_stream[(int) index++].kind = Token_const; | |
1076 return; | |
1077 } | |
1078 break; | |
1079 | |
1080 case 'b': | |
1081 if (*(cursor + 1) == 'i' && | |
1082 *(cursor + 2) == 't' && | |
1083 *(cursor + 3) == 'o' && | |
1084 *(cursor + 4) == 'r') | |
1085 { | |
1086 token_stream[(int) index++].kind = Token_bitor; | |
1087 return; | |
1088 } | |
1089 if (*(cursor + 1) == 'r' && | |
1090 *(cursor + 2) == 'e' && | |
1091 *(cursor + 3) == 'a' && | |
1092 *(cursor + 4) == 'k') | |
1093 { | |
1094 token_stream[(int) index++].kind = Token_break; | |
1095 return; | |
1096 } | |
1097 break; | |
1098 | |
1099 case 'f': | |
1100 if (*(cursor + 1) == 'l' && | |
1101 *(cursor + 2) == 'o' && | |
1102 *(cursor + 3) == 'a' && | |
1103 *(cursor + 4) == 't') | |
1104 { | |
1105 token_stream[(int) index++].kind = Token_float; | |
1106 return; | |
1107 } | |
1108 break; | |
1109 | |
1110 case 'o': | |
1111 if (*(cursor + 1) == 'r' && | |
1112 *(cursor + 2) == '_' && | |
1113 *(cursor + 3) == 'e' && | |
1114 *(cursor + 4) == 'q') | |
1115 { | |
1116 token_stream[(int) index++].kind = Token_or_eq; | |
1117 return; | |
1118 } | |
1119 break; | |
1120 | |
1121 case 's': | |
1122 if (*(cursor + 1) == 'h' && | |
1123 *(cursor + 2) == 'o' && | |
1124 *(cursor + 3) == 'r' && | |
1125 *(cursor + 4) == 't') | |
1126 { | |
1127 token_stream[(int) index++].kind = Token_short; | |
1128 return; | |
1129 } | |
1130 if (*(cursor + 1) == 'l' && | |
1131 *(cursor + 2) == 'o' && | |
1132 *(cursor + 3) == 't' && | |
1133 *(cursor + 4) == 's') | |
1134 { | |
1135 token_stream[(int) index++].kind = Token_slots; | |
1136 return; | |
1137 } | |
1138 break; | |
1139 | |
1140 case 'u': | |
1141 if (*(cursor + 1) == 'n' && | |
1142 *(cursor + 2) == 'i' && | |
1143 *(cursor + 3) == 'o' && | |
1144 *(cursor + 4) == 'n') | |
1145 { | |
1146 token_stream[(int) index++].kind = Token_union; | |
1147 return; | |
1148 } | |
1149 if (*(cursor + 1) == 's' && | |
1150 *(cursor + 2) == 'i' && | |
1151 *(cursor + 3) == 'n' && | |
1152 *(cursor + 4) == 'g') | |
1153 { | |
1154 token_stream[(int) index++].kind = Token_using; | |
1155 return; | |
1156 } | |
1157 break; | |
1158 | |
1159 case 't': | |
1160 if (*(cursor + 1) == 'h' && | |
1161 *(cursor + 2) == 'r' && | |
1162 *(cursor + 3) == 'o' && | |
1163 *(cursor + 4) == 'w') | |
1164 { | |
1165 token_stream[(int) index++].kind = Token_throw; | |
1166 return; | |
1167 } | |
1168 break; | |
1169 | |
1170 case 'w': | |
1171 if (*(cursor + 1) == 'h' && | |
1172 *(cursor + 2) == 'i' && | |
1173 *(cursor + 3) == 'l' && | |
1174 *(cursor + 4) == 'e') | |
1175 { | |
1176 token_stream[(int) index++].kind = Token_while; | |
1177 return; | |
1178 } | |
1179 break; | |
1180 | |
1181 } | |
1182 token_stream[(int) index++].kind = Token_identifier; | |
1183 } | |
1184 | |
1185 void Lexer::scanKeyword6() | |
1186 { | |
1187 switch (*cursor) | |
1188 { | |
1189 case 'a': | |
1190 if (*(cursor + 1) == 'n' && | |
1191 *(cursor + 2) == 'd' && | |
1192 *(cursor + 3) == '_' && | |
1193 *(cursor + 4) == 'e' && | |
1194 *(cursor + 5) == 'q') | |
1195 { | |
1196 token_stream[(int) index++].kind = Token_and_eq; | |
1197 return; | |
1198 } | |
1199 break; | |
1200 | |
1201 case 'b': | |
1202 if (*(cursor + 1) == 'i' && | |
1203 *(cursor + 2) == 't' && | |
1204 *(cursor + 3) == 'a' && | |
1205 *(cursor + 4) == 'n' && | |
1206 *(cursor + 5) == 'd') | |
1207 { | |
1208 token_stream[(int) index++].kind = Token_bitand; | |
1209 return; | |
1210 } | |
1211 break; | |
1212 | |
1213 case 'e': | |
1214 if (*(cursor + 1) == 'x' && | |
1215 *(cursor + 2) == 'p' && | |
1216 *(cursor + 3) == 'o' && | |
1217 *(cursor + 4) == 'r' && | |
1218 *(cursor + 5) == 't') | |
1219 { | |
1220 token_stream[(int) index++].kind = Token_export; | |
1221 return; | |
1222 } | |
1223 if (*(cursor + 1) == 'x' && | |
1224 *(cursor + 2) == 't' && | |
1225 *(cursor + 3) == 'e' && | |
1226 *(cursor + 4) == 'r' && | |
1227 *(cursor + 5) == 'n') | |
1228 { | |
1229 token_stream[(int) index++].kind = Token_extern; | |
1230 return; | |
1231 } | |
1232 break; | |
1233 | |
1234 case 'd': | |
1235 if (*(cursor + 1) == 'e' && | |
1236 *(cursor + 2) == 'l' && | |
1237 *(cursor + 3) == 'e' && | |
1238 *(cursor + 4) == 't' && | |
1239 *(cursor + 5) == 'e') | |
1240 { | |
1241 token_stream[(int) index++].kind = Token_delete; | |
1242 return; | |
1243 } | |
1244 if (*(cursor + 1) == 'o' && | |
1245 *(cursor + 2) == 'u' && | |
1246 *(cursor + 3) == 'b' && | |
1247 *(cursor + 4) == 'l' && | |
1248 *(cursor + 5) == 'e') | |
1249 { | |
1250 token_stream[(int) index++].kind = Token_double; | |
1251 return; | |
1252 } | |
1253 break; | |
1254 | |
1255 case 'f': | |
1256 if (*(cursor + 1) == 'r' && | |
1257 *(cursor + 2) == 'i' && | |
1258 *(cursor + 3) == 'e' && | |
1259 *(cursor + 4) == 'n' && | |
1260 *(cursor + 5) == 'd') | |
1261 { | |
1262 token_stream[(int) index++].kind = Token_friend; | |
1263 return; | |
1264 } | |
1265 break; | |
1266 | |
1267 case 'i': | |
1268 if (*(cursor + 1) == 'n' && | |
1269 *(cursor + 2) == 'l' && | |
1270 *(cursor + 3) == 'i' && | |
1271 *(cursor + 4) == 'n' && | |
1272 *(cursor + 5) == 'e') | |
1273 { | |
1274 token_stream[(int) index++].kind = Token_inline; | |
1275 return; | |
1276 } | |
1277 break; | |
1278 | |
1279 case 'K': | |
1280 if (*(cursor + 1) == '_' && | |
1281 *(cursor + 2) == 'D' && | |
1282 *(cursor + 3) == 'C' && | |
1283 *(cursor + 4) == 'O' && | |
1284 *(cursor + 5) == 'P') | |
1285 { | |
1286 token_stream[(int) index++].kind = Token_K_DCOP; | |
1287 return; | |
1288 } | |
1289 break; | |
1290 | |
1291 case 'n': | |
1292 if (*(cursor + 1) == 'o' && | |
1293 *(cursor + 2) == 't' && | |
1294 *(cursor + 3) == '_' && | |
1295 *(cursor + 4) == 'e' && | |
1296 *(cursor + 5) == 'q') | |
1297 { | |
1298 token_stream[(int) index++].kind = Token_not_eq; | |
1299 return; | |
1300 } | |
1301 break; | |
1302 | |
1303 case 'p': | |
1304 if (*(cursor + 1) == 'u' && | |
1305 *(cursor + 2) == 'b' && | |
1306 *(cursor + 3) == 'l' && | |
1307 *(cursor + 4) == 'i' && | |
1308 *(cursor + 5) == 'c') | |
1309 { | |
1310 token_stream[(int) index++].kind = Token_public; | |
1311 return; | |
1312 } | |
1313 break; | |
1314 | |
1315 case 's': | |
1316 if (*(cursor + 1) == 'i' && | |
1317 *(cursor + 2) == 'g' && | |
1318 *(cursor + 3) == 'n' && | |
1319 *(cursor + 4) == 'e' && | |
1320 *(cursor + 5) == 'd') | |
1321 { | |
1322 token_stream[(int) index++].kind = Token_signed; | |
1323 return; | |
1324 } | |
1325 if (*(cursor + 1) == 'i' && | |
1326 *(cursor + 2) == 'z' && | |
1327 *(cursor + 3) == 'e' && | |
1328 *(cursor + 4) == 'o' && | |
1329 *(cursor + 5) == 'f') | |
1330 { | |
1331 token_stream[(int) index++].kind = Token_sizeof; | |
1332 return; | |
1333 } | |
1334 if (*(cursor + 1) == 't' && | |
1335 *(cursor + 2) == 'a' && | |
1336 *(cursor + 3) == 't' && | |
1337 *(cursor + 4) == 'i' && | |
1338 *(cursor + 5) == 'c') | |
1339 { | |
1340 token_stream[(int) index++].kind = Token_static; | |
1341 return; | |
1342 } | |
1343 if (*(cursor + 1) == 't' && | |
1344 *(cursor + 2) == 'r' && | |
1345 *(cursor + 3) == 'u' && | |
1346 *(cursor + 4) == 'c' && | |
1347 *(cursor + 5) == 't') | |
1348 { | |
1349 token_stream[(int) index++].kind = Token_struct; | |
1350 return; | |
1351 } | |
1352 if (*(cursor + 1) == 'w' && | |
1353 *(cursor + 2) == 'i' && | |
1354 *(cursor + 3) == 't' && | |
1355 *(cursor + 4) == 'c' && | |
1356 *(cursor + 5) == 'h') | |
1357 { | |
1358 token_stream[(int) index++].kind = Token_switch; | |
1359 return; | |
1360 } | |
1361 break; | |
1362 | |
1363 case 'r': | |
1364 if (*(cursor + 1) == 'e' && | |
1365 *(cursor + 2) == 't' && | |
1366 *(cursor + 3) == 'u' && | |
1367 *(cursor + 4) == 'r' && | |
1368 *(cursor + 5) == 'n') | |
1369 { | |
1370 token_stream[(int) index++].kind = Token_return; | |
1371 return; | |
1372 } | |
1373 break; | |
1374 | |
1375 case 't': | |
1376 if (*(cursor + 1) == 'y' && | |
1377 *(cursor + 2) == 'p' && | |
1378 *(cursor + 3) == 'e' && | |
1379 *(cursor + 4) == 'i' && | |
1380 *(cursor + 5) == 'd') | |
1381 { | |
1382 token_stream[(int) index++].kind = Token_typeid; | |
1383 return; | |
1384 } | |
1385 break; | |
1386 | |
1387 case 'x': | |
1388 if (*(cursor + 1) == 'o' && | |
1389 *(cursor + 2) == 'r' && | |
1390 *(cursor + 3) == '_' && | |
1391 *(cursor + 4) == 'e' && | |
1392 *(cursor + 5) == 'q') | |
1393 { | |
1394 token_stream[(int) index++].kind = Token_xor_eq; | |
1395 return; | |
1396 } | |
1397 break; | |
1398 | |
1399 case 'k': | |
1400 if (*(cursor + 1) == '_' && | |
1401 *(cursor + 2) == 'd' && | |
1402 *(cursor + 3) == 'c' && | |
1403 *(cursor + 4) == 'o' && | |
1404 *(cursor + 5) == 'p') | |
1405 { | |
1406 token_stream[(int) index++].kind = Token_k_dcop; | |
1407 return; | |
1408 } | |
1409 break; | |
1410 | |
1411 } | |
1412 token_stream[(int) index++].kind = Token_identifier; | |
1413 } | |
1414 | |
1415 void Lexer::scanKeyword7() | |
1416 { | |
1417 switch (*cursor) | |
1418 { | |
1419 case 'd': | |
1420 if (*(cursor + 1) == 'e' && | |
1421 *(cursor + 2) == 'f' && | |
1422 *(cursor + 3) == 'a' && | |
1423 *(cursor + 4) == 'u' && | |
1424 *(cursor + 5) == 'l' && | |
1425 *(cursor + 6) == 't') | |
1426 { | |
1427 token_stream[(int) index++].kind = Token_default; | |
1428 return; | |
1429 } | |
1430 break; | |
1431 | |
1432 case 'm': | |
1433 if (*(cursor + 1) == 'u' && | |
1434 *(cursor + 2) == 't' && | |
1435 *(cursor + 3) == 'a' && | |
1436 *(cursor + 4) == 'b' && | |
1437 *(cursor + 5) == 'l' && | |
1438 *(cursor + 6) == 'e') | |
1439 { | |
1440 token_stream[(int) index++].kind = Token_mutable; | |
1441 return; | |
1442 } | |
1443 break; | |
1444 | |
1445 case 'p': | |
1446 if (*(cursor + 1) == 'r' && | |
1447 *(cursor + 2) == 'i' && | |
1448 *(cursor + 3) == 'v' && | |
1449 *(cursor + 4) == 'a' && | |
1450 *(cursor + 5) == 't' && | |
1451 *(cursor + 6) == 'e') | |
1452 { | |
1453 token_stream[(int) index++].kind = Token_private; | |
1454 return; | |
1455 } | |
1456 break; | |
1457 case 's': | |
1458 if (*(cursor + 1) == 'i' && | |
1459 *(cursor + 2) == 'g' && | |
1460 *(cursor + 3) == 'n' && | |
1461 *(cursor + 4) == 'a' && | |
1462 *(cursor + 5) == 'l' && | |
1463 *(cursor + 6) == 's') | |
1464 { | |
1465 token_stream[(int) index++].kind = Token_signals; | |
1466 return; | |
1467 } | |
1468 break; | |
1469 case 't': | |
1470 if (*(cursor + 1) == 'y' && | |
1471 *(cursor + 2) == 'p' && | |
1472 *(cursor + 3) == 'e' && | |
1473 *(cursor + 4) == 'd' && | |
1474 *(cursor + 5) == 'e' && | |
1475 *(cursor + 6) == 'f') | |
1476 { | |
1477 token_stream[(int) index++].kind = Token_typedef; | |
1478 return; | |
1479 } | |
1480 break; | |
1481 | |
1482 case 'v': | |
1483 if (*(cursor + 1) == 'i' && | |
1484 *(cursor + 2) == 'r' && | |
1485 *(cursor + 3) == 't' && | |
1486 *(cursor + 4) == 'u' && | |
1487 *(cursor + 5) == 'a' && | |
1488 *(cursor + 6) == 'l') | |
1489 { | |
1490 token_stream[(int) index++].kind = Token_virtual; | |
1491 return; | |
1492 } | |
1493 break; | |
1494 | |
1495 case 'Q': | |
1496 if (*(cursor + 1) == '_' && | |
1497 *(cursor + 2) == 'E' && | |
1498 *(cursor + 3) == 'N' && | |
1499 *(cursor + 4) == 'U' && | |
1500 *(cursor + 5) == 'M' && | |
1501 *(cursor + 6) == 'S') | |
1502 { | |
1503 token_stream[(int) index++].kind = Token_Q_ENUMS; | |
1504 return; | |
1505 } | |
1506 break; | |
1507 | |
1508 } | |
1509 token_stream[(int) index++].kind = Token_identifier; | |
1510 } | |
1511 | |
1512 void Lexer::scanKeyword8() | |
1513 { | |
1514 switch (*cursor) | |
1515 { | |
1516 case '_': | |
1517 if (*(cursor + 1) == '_' && | |
1518 *(cursor + 2) == 't' && | |
1519 *(cursor + 3) == 'y' && | |
1520 *(cursor + 4) == 'p' && | |
1521 *(cursor + 5) == 'e' && | |
1522 *(cursor + 6) == 'o' && | |
1523 *(cursor + 7) == 'f') | |
1524 { | |
1525 token_stream[(int) index++].kind = Token___typeof; | |
1526 return; | |
1527 } | |
1528 break; | |
1529 | |
1530 case 'c': | |
1531 if (*(cursor + 1) == 'o' && | |
1532 *(cursor + 2) == 'n' && | |
1533 *(cursor + 3) == 't' && | |
1534 *(cursor + 4) == 'i' && | |
1535 *(cursor + 5) == 'n' && | |
1536 *(cursor + 6) == 'u' && | |
1537 *(cursor + 7) == 'e') | |
1538 { | |
1539 token_stream[(int) index++].kind = Token_continue; | |
1540 return; | |
1541 } | |
1542 break; | |
1543 | |
1544 case 'e': | |
1545 if (*(cursor + 1) == 'x' && | |
1546 *(cursor + 2) == 'p' && | |
1547 *(cursor + 3) == 'l' && | |
1548 *(cursor + 4) == 'i' && | |
1549 *(cursor + 5) == 'c' && | |
1550 *(cursor + 6) == 'i' && | |
1551 *(cursor + 7) == 't') | |
1552 { | |
1553 token_stream[(int) index++].kind = Token_explicit; | |
1554 return; | |
1555 } | |
1556 break; | |
1557 | |
1558 case 'o': | |
1559 if (*(cursor + 1) == 'p' && | |
1560 *(cursor + 2) == 'e' && | |
1561 *(cursor + 3) == 'r' && | |
1562 *(cursor + 4) == 'a' && | |
1563 *(cursor + 5) == 't' && | |
1564 *(cursor + 6) == 'o' && | |
1565 *(cursor + 7) == 'r') | |
1566 { | |
1567 token_stream[(int) index++].kind = Token_operator; | |
1568 return; | |
1569 } | |
1570 break; | |
1571 | |
1572 case 'Q': | |
1573 if (*(cursor + 1) == '_' && | |
1574 *(cursor + 2) == 'O' && | |
1575 *(cursor + 3) == 'B' && | |
1576 *(cursor + 4) == 'J' && | |
1577 *(cursor + 5) == 'E' && | |
1578 *(cursor + 6) == 'C' && | |
1579 *(cursor + 7) == 'T') | |
1580 { | |
1581 token_stream[(int) index++].kind = Token_Q_OBJECT; | |
1582 return; | |
1583 } | |
1584 break; | |
1585 | |
1586 case 'r': | |
1587 if (*(cursor + 1) == 'e' && | |
1588 *(cursor + 2) == 'g' && | |
1589 *(cursor + 3) == 'i' && | |
1590 *(cursor + 4) == 's' && | |
1591 *(cursor + 5) == 't' && | |
1592 *(cursor + 6) == 'e' && | |
1593 *(cursor + 7) == 'r') | |
1594 { | |
1595 token_stream[(int) index++].kind = Token_register; | |
1596 return; | |
1597 } | |
1598 break; | |
1599 | |
1600 case 'u': | |
1601 if (*(cursor + 1) == 'n' && | |
1602 *(cursor + 2) == 's' && | |
1603 *(cursor + 3) == 'i' && | |
1604 *(cursor + 4) == 'g' && | |
1605 *(cursor + 5) == 'n' && | |
1606 *(cursor + 6) == 'e' && | |
1607 *(cursor + 7) == 'd') | |
1608 { | |
1609 token_stream[(int) index++].kind = Token_unsigned; | |
1610 return; | |
1611 } | |
1612 break; | |
1613 | |
1614 case 't': | |
1615 if (*(cursor + 1) == 'e' && | |
1616 *(cursor + 2) == 'm' && | |
1617 *(cursor + 3) == 'p' && | |
1618 *(cursor + 4) == 'l' && | |
1619 *(cursor + 5) == 'a' && | |
1620 *(cursor + 6) == 't' && | |
1621 *(cursor + 7) == 'e') | |
1622 { | |
1623 token_stream[(int) index++].kind = Token_template; | |
1624 return; | |
1625 } | |
1626 if (*(cursor + 1) == 'y' && | |
1627 *(cursor + 2) == 'p' && | |
1628 *(cursor + 3) == 'e' && | |
1629 *(cursor + 4) == 'n' && | |
1630 *(cursor + 5) == 'a' && | |
1631 *(cursor + 6) == 'm' && | |
1632 *(cursor + 7) == 'e') | |
1633 { | |
1634 token_stream[(int) index++].kind = Token_typename; | |
1635 return; | |
1636 } | |
1637 break; | |
1638 | |
1639 case 'v': | |
1640 if (*(cursor + 1) == 'o' && | |
1641 *(cursor + 2) == 'l' && | |
1642 *(cursor + 3) == 'a' && | |
1643 *(cursor + 4) == 't' && | |
1644 *(cursor + 5) == 'i' && | |
1645 *(cursor + 6) == 'l' && | |
1646 *(cursor + 7) == 'e') | |
1647 { | |
1648 token_stream[(int) index++].kind = Token_volatile; | |
1649 return; | |
1650 } | |
1651 break; | |
1652 | |
1653 } | |
1654 token_stream[(int) index++].kind = Token_identifier; | |
1655 } | |
1656 | |
1657 void Lexer::scanKeyword9() | |
1658 { | |
1659 switch (*cursor) | |
1660 { | |
1661 case 'p': | |
1662 if (*(cursor + 1) == 'r' && | |
1663 *(cursor + 2) == 'o' && | |
1664 *(cursor + 3) == 't' && | |
1665 *(cursor + 4) == 'e' && | |
1666 *(cursor + 5) == 'c' && | |
1667 *(cursor + 6) == 't' && | |
1668 *(cursor + 7) == 'e' && | |
1669 *(cursor + 8) == 'd') | |
1670 { | |
1671 token_stream[(int) index++].kind = Token_protected; | |
1672 return; | |
1673 } | |
1674 break; | |
1675 | |
1676 case 'n': | |
1677 if (*(cursor + 1) == 'a' && | |
1678 *(cursor + 2) == 'm' && | |
1679 *(cursor + 3) == 'e' && | |
1680 *(cursor + 4) == 's' && | |
1681 *(cursor + 5) == 'p' && | |
1682 *(cursor + 6) == 'a' && | |
1683 *(cursor + 7) == 'c' && | |
1684 *(cursor + 8) == 'e') | |
1685 { | |
1686 token_stream[(int) index++].kind = Token_namespace; | |
1687 return; | |
1688 } | |
1689 break; | |
1690 | |
1691 } | |
1692 token_stream[(int) index++].kind = Token_identifier; | |
1693 } | |
1694 | |
1695 void Lexer::scanKeyword10() | |
1696 { | |
1697 switch (*cursor) | |
1698 { | |
1699 case 'c': | |
1700 if (*(cursor + 1) == 'o' && | |
1701 *(cursor + 2) == 'n' && | |
1702 *(cursor + 3) == 's' && | |
1703 *(cursor + 4) == 't' && | |
1704 *(cursor + 5) == '_' && | |
1705 *(cursor + 6) == 'c' && | |
1706 *(cursor + 7) == 'a' && | |
1707 *(cursor + 8) == 's' && | |
1708 *(cursor + 9) == 't') | |
1709 { | |
1710 token_stream[(int) index++].kind = Token_const_cast; | |
1711 return; | |
1712 } | |
1713 break; | |
1714 | |
1715 case 'Q': | |
1716 if (*(cursor + 1) == '_' && | |
1717 *(cursor + 2) == 'P' && | |
1718 *(cursor + 3) == 'R' && | |
1719 *(cursor + 4) == 'O' && | |
1720 *(cursor + 5) == 'P' && | |
1721 *(cursor + 6) == 'E' && | |
1722 *(cursor + 7) == 'R' && | |
1723 *(cursor + 8) == 'T' && | |
1724 *(cursor + 9) == 'Y') | |
1725 { | |
1726 token_stream[(int) index++].kind = Token_Q_PROPERTY; | |
1727 return; | |
1728 } | |
1729 | |
1730 break; | |
1731 } | |
1732 | |
1733 token_stream[(int) index++].kind = Token_identifier; | |
1734 } | |
1735 | |
1736 void Lexer::scanKeyword11() | |
1737 { | |
1738 switch (*cursor) | |
1739 { | |
1740 case 'Q': | |
1741 if (*(cursor + 1) == '_' && | |
1742 *(cursor + 2) == 'I' && | |
1743 *(cursor + 3) == 'N' && | |
1744 *(cursor + 4) == 'V' && | |
1745 *(cursor + 5) == 'O' && | |
1746 *(cursor + 6) == 'K' && | |
1747 *(cursor + 7) == 'A' && | |
1748 *(cursor + 8) == 'B' && | |
1749 *(cursor + 9) == 'L' && | |
1750 *(cursor + 10) == 'E') | |
1751 { | |
1752 token_stream[(int) index++].kind = Token_Q_INVOKABLE; | |
1753 return; | |
1754 } | |
1755 break; | |
1756 | |
1757 case 's': | |
1758 if (*(cursor + 1) == 't' && | |
1759 *(cursor + 2) == 'a' && | |
1760 *(cursor + 3) == 't' && | |
1761 *(cursor + 4) == 'i' && | |
1762 *(cursor + 5) == 'c' && | |
1763 *(cursor + 6) == '_' && | |
1764 *(cursor + 7) == 'c' && | |
1765 *(cursor + 8) == 'a' && | |
1766 *(cursor + 9) == 's' && | |
1767 *(cursor + 10) == 't') | |
1768 { | |
1769 token_stream[(int) index++].kind = Token_static_cast; | |
1770 return; | |
1771 } | |
1772 break; | |
1773 | |
1774 } | |
1775 token_stream[(int) index++].kind = Token_identifier; | |
1776 } | |
1777 | |
1778 void Lexer::scanKeyword12() | |
1779 { | |
1780 switch (*cursor) | |
1781 { | |
1782 case 'd': | |
1783 if (*(cursor + 1) == 'y' && | |
1784 *(cursor + 2) == 'n' && | |
1785 *(cursor + 3) == 'a' && | |
1786 *(cursor + 4) == 'm' && | |
1787 *(cursor + 5) == 'i' && | |
1788 *(cursor + 6) == 'c' && | |
1789 *(cursor + 7) == '_' && | |
1790 *(cursor + 8) == 'c' && | |
1791 *(cursor + 9) == 'a' && | |
1792 *(cursor + 10) == 's' && | |
1793 *(cursor + 11) == 't') | |
1794 { | |
1795 token_stream[(int) index++].kind = Token_dynamic_cast; | |
1796 return; | |
1797 } | |
1798 break; | |
1799 | |
1800 } | |
1801 token_stream[(int) index++].kind = Token_identifier; | |
1802 } | |
1803 | |
1804 void Lexer::scanKeyword13() | |
1805 { | |
1806 switch (*cursor) | |
1807 { | |
1808 case '_': | |
1809 if (*(cursor + 1) == '_' && | |
1810 *(cursor + 2) == 'a' && | |
1811 *(cursor + 3) == 't' && | |
1812 *(cursor + 4) == 't' && | |
1813 *(cursor + 5) == 'r' && | |
1814 *(cursor + 6) == 'i' && | |
1815 *(cursor + 7) == 'b' && | |
1816 *(cursor + 8) == 'u' && | |
1817 *(cursor + 9) == 't' && | |
1818 *(cursor + 10) == 'e' && | |
1819 *(cursor + 11) == '_' && | |
1820 *(cursor + 12) == '_') | |
1821 { | |
1822 token_stream[(int) index++].kind = Token___attribute__; | |
1823 return; | |
1824 } | |
1825 break; | |
1826 } | |
1827 token_stream[(int) index++].kind = Token_identifier; | |
1828 } | |
1829 | |
1830 void Lexer::scanKeyword14() | |
1831 { | |
1832 switch (*cursor) | |
1833 { | |
1834 case 'k': | |
1835 if (*(cursor + 1) == '_' && | |
1836 *(cursor + 2) == 'd' && | |
1837 *(cursor + 3) == 'c' && | |
1838 *(cursor + 4) == 'o' && | |
1839 *(cursor + 5) == 'p' && | |
1840 *(cursor + 6) == '_' && | |
1841 *(cursor + 7) == 's' && | |
1842 *(cursor + 8) == 'i' && | |
1843 *(cursor + 9) == 'g' && | |
1844 *(cursor + 10) == 'n' && | |
1845 *(cursor + 11) == 'a' && | |
1846 *(cursor + 12) == 'l' && | |
1847 *(cursor + 13) == 's') | |
1848 { | |
1849 token_stream[(int) index++].kind = Token_k_dcop_signals; | |
1850 return; | |
1851 } | |
1852 break; | |
1853 } | |
1854 token_stream[(int) index++].kind = Token_identifier; | |
1855 } | |
1856 | |
1857 void Lexer::scanKeyword16() | |
1858 { | |
1859 switch (*cursor) | |
1860 { | |
1861 case 'r': | |
1862 if (*(cursor + 1) == 'e' && | |
1863 *(cursor + 2) == 'i' && | |
1864 *(cursor + 3) == 'n' && | |
1865 *(cursor + 4) == 't' && | |
1866 *(cursor + 5) == 'e' && | |
1867 *(cursor + 6) == 'r' && | |
1868 *(cursor + 7) == 'p' && | |
1869 *(cursor + 8) == 'r' && | |
1870 *(cursor + 9) == 'e' && | |
1871 *(cursor + 10) == 't' && | |
1872 *(cursor + 11) == '_' && | |
1873 *(cursor + 12) == 'c' && | |
1874 *(cursor + 13) == 'a' && | |
1875 *(cursor + 14) == 's' && | |
1876 *(cursor + 15) == 't') | |
1877 { | |
1878 token_stream[(int) index++].kind = Token_reinterpret_cast; | |
1879 return; | |
1880 } | |
1881 break; | |
1882 } | |
1883 | |
1884 token_stream[(int) index++].kind = Token_identifier; | |
1885 } | |
1886 | |
1887 // kate: space-indent on; indent-width 2; replace-tabs on; |