# HG changeset patch # User Aziz K?ksal # Date 1200696012 -3600 # Node ID ff6971637f8855b83462b34c2676693afb9766e4 # Parent cedfc67faabf7075e6b73eec4d7106951f10fad4 Renamed Token member type to kind. diff -r cedfc67faabf -r ff6971637f88 trunk/src/cmd/Generate.d --- a/trunk/src/cmd/Generate.d Fri Jan 18 23:11:44 2008 +0100 +++ b/trunk/src/cmd/Generate.d Fri Jan 18 23:40:12 2008 +0100 @@ -390,7 +390,7 @@ } // Traverse linked list and print tokens. - while (token.type != TOK.EOF) + while (token.kind != TOK.EOF) { token = token.next; @@ -445,7 +445,7 @@ print(tags[DocPart.SrcBegin]); // Traverse linked list and print tokens. - while (token.type != TOK.EOF) + while (token.kind != TOK.EOF) { token = token.next; // Print whitespace. @@ -462,7 +462,7 @@ alias DocPart DP; string srcText = xml_escape(token.srcText); - switch(token.type) + switch(token.kind) { case TOK.Identifier: print.format(tags[DP.Identifier], srcText); @@ -530,7 +530,7 @@ case TOK.Not: // Check if this is part of a template instantiation. // TODO: comments aren't skipped. Use Token.nextNWS and Token.prevNWS - if (token.prev.type == TOK.Identifier && token.next.type == TOK.LParen) + if (token.prev.kind == TOK.Identifier && token.next.kind == TOK.LParen) goto default; print(tags[DP.Not]); break; diff -r cedfc67faabf -r ff6971637f88 trunk/src/cmd/Statistics.d --- a/trunk/src/cmd/Statistics.d Fri Jan 18 23:11:44 2008 +0100 +++ b/trunk/src/cmd/Statistics.d Fri Jan 18 23:40:12 2008 +0100 @@ -172,13 +172,13 @@ stats.tokenCount += 1; if (printTokensTable) - stats.tokensTable[token.type] += 1; + stats.tokensTable[token.kind] += 1; // Count whitespace characters if (token.ws !is null) stats.whitespaceCount += token.start - token.ws; - switch (token.type) + switch (token.kind) { case TOK.Identifier: stats.identCount++; @@ -204,6 +204,6 @@ break; token = token.next; } - assert(token.type == TOK.EOF); + assert(token.kind == TOK.EOF); return stats; } diff -r cedfc67faabf -r ff6971637f88 trunk/src/dil/ast/Expressions.d --- a/trunk/src/dil/ast/Expressions.d Fri Jan 18 23:11:44 2008 +0100 +++ b/trunk/src/dil/ast/Expressions.d Fri Jan 18 23:40:12 2008 +0100 @@ -420,13 +420,13 @@ bool isPos() { assert(begin !is null); - return begin.type == TOK.Plus; + return begin.kind == TOK.Plus; } bool isNeg() { assert(begin !is null); - return begin.type == TOK.Minus; + return begin.kind == TOK.Minus; } } @@ -646,6 +646,12 @@ mixin(set_kind); } + bool toBool() + { + assert(begin !is null); + return begin.kind == TOK.True ? true : false; + } + Expression value; /// IntExpression of type int. } @@ -663,7 +669,7 @@ this(Token* token) { auto type = Types.Int; // Should be most common case. - switch (token.type) + switch (token.kind) { // case TOK.Int32: // type = Types.Int; break; @@ -674,7 +680,7 @@ case TOK.Uint64: type = Types.Ulong; break; default: - assert(token.type == TOK.Int32); + assert(token.kind == TOK.Int32); } this(token.ulong_, type); } @@ -694,7 +700,7 @@ this(Token* token) { auto type = Types.Double; // Most common case? - switch (token.type) + switch (token.kind) { case TOK.Float32: type = Types.Float; break; @@ -709,7 +715,7 @@ case TOK.Imaginary80: type = Types.Ireal; break; default: - assert(token.type == TOK.Float64); + assert(token.kind == TOK.Float64); } this(token.real_, type); } diff -r cedfc67faabf -r ff6971637f88 trunk/src/dil/doc/Doc.d --- a/trunk/src/dil/doc/Doc.d Fri Jan 18 23:11:44 2008 +0100 +++ b/trunk/src/dil/doc/Doc.d Fri Jan 18 23:40:12 2008 +0100 @@ -8,12 +8,12 @@ bool isDoxygenComment(Token* token) { // Doxygen: '/+!' '/*!' '//!' - return token.type == TOK.Comment && token.start[2] == '!'; + return token.kind == TOK.Comment && token.start[2] == '!'; } bool isDDocComment(Token* token) { // DDOC: '/++' '/**' '///' - return token.type == TOK.Comment && token.start[1] == token.start[2]; + return token.kind == TOK.Comment && token.start[1] == token.start[2]; } /++ @@ -30,19 +30,19 @@ while (1) { token = token.prev; - if (token.type == TOK.LBrace || - token.type == TOK.RBrace || - token.type == TOK.Semicolon || - token.type == TOK.HEAD || - (node.kind == NodeKind.EnumMember && token.type == TOK.Comma)) + if (token.kind == TOK.LBrace || + token.kind == TOK.RBrace || + token.kind == TOK.Semicolon || + token.kind == TOK.HEAD || + (node.kind == NodeKind.EnumMember && token.kind == TOK.Comma)) break; - if (token.type == TOK.Comment) + if (token.kind == TOK.Comment) { // Check that this comment doesn't belong to the previous declaration. - if (node.kind == NodeKind.EnumMember && token.type == TOK.Comma) + if (node.kind == NodeKind.EnumMember && token.kind == TOK.Comma) break; - switch (token.prev.type) + switch (token.prev.kind) { case TOK.Semicolon, TOK.RBrace: break; @@ -54,15 +54,15 @@ } // Get single comment to the right. token = node.end.next; - if (token.type == TOK.Comment && isDocComment(token)) + if (token.kind == TOK.Comment && isDocComment(token)) comments ~= token; else if (node.kind == NodeKind.EnumMember) { token = node.end.nextNWS; - if (token.type == TOK.Comma) + if (token.kind == TOK.Comma) { token = token.next; - if (token.type == TOK.Comment && isDocComment(token)) + if (token.kind == TOK.Comment && isDocComment(token)) comments ~= token; } } diff -r cedfc67faabf -r ff6971637f88 trunk/src/dil/lexer/Lexer.d --- a/trunk/src/dil/lexer/Lexer.d Fri Jan 18 23:11:44 2008 +0100 +++ b/trunk/src/dil/lexer/Lexer.d Fri Jan 18 23:40:12 2008 +0100 @@ -70,14 +70,14 @@ this.lineBegin = this.p; this.head = new Token; - this.head.type = TOK.HEAD; + this.head.kind = TOK.HEAD; this.head.start = this.head.end = this.p; this.token = this.head; // Initialize this.filePaths. newFilePath(this.filePath); // Add a newline as the first token after the head. auto newline = new Token; - newline.type = TOK.Newline; + newline.kind = TOK.Newline; newline.setWhitespaceFlag(); newline.start = newline.end = this.p; newline.newline.filePaths = this.filePaths; @@ -96,7 +96,7 @@ auto token = head.next; while (token !is null) { - assert(token.type == TOK.EOF ? token == tail && token.next is null : 1); + assert(token.kind == TOK.EOF ? token == tail && token.next is null : 1); delete token.prev; token = token.next; } @@ -112,7 +112,7 @@ if (*p == '#' && p[1] == '!') { auto t = new Token; - t.type = TOK.Shebang; + t.kind = TOK.Shebang; t.setWhitespaceFlag(); t.start = p; ++p; @@ -127,7 +127,7 @@ void finalizeSpecialToken(ref Token t) { assert(t.srcText[0..2] == "__"); - switch (t.type) + switch (t.kind) { case TOK.FILE: t.str = this.filePaths.setPath; @@ -142,7 +142,7 @@ time(&time_val); char* str = ctime(&time_val); char[] time_str = str[0 .. strlen(str)]; - switch (t.type) + switch (t.kind) { case TOK.DATE: time_str = time_str[4..11] ~ time_str[20..24] ~ \0; break; @@ -189,7 +189,7 @@ if (t.next) { t = t.next; -// if (t.type == TOK.Newline) +// if (t.kind == TOK.Newline) // this.newline = t; } else if (t != this.tail) @@ -212,7 +212,7 @@ TOK nextToken() { scanNext(this.token); - return this.token.type; + return this.token.kind; } /// Returns true if p points to the last character of a Newline. @@ -238,8 +238,8 @@ } out { - assert(text.ptr <= t.start && t.start < end, Token.toString(t.type)); - assert(text.ptr <= t.end && t.end <= end, Token.toString(t.type)); + assert(text.ptr <= t.start && t.start < end, Token.toString(t.kind)); + assert(text.ptr <= t.end && t.end <= end, Token.toString(t.kind)); } body { @@ -267,7 +267,7 @@ ++lineNum; setLineBegin(p); // this.newline = &t; - t.type = TOK.Newline; + t.kind = TOK.Newline; t.setWhitespaceFlag(); t.newline.filePaths = this.filePaths; t.newline.oriLineNum = lineNum; @@ -304,20 +304,20 @@ t.end = p; auto id = IdTable.lookup(t.srcText); - t.type = id.type; + t.kind = id.type; t.ident = id; - if (t.type == TOK.Identifier || t.isKeyword) + if (t.kind == TOK.Identifier || t.isKeyword) return; else if (t.isSpecialToken) finalizeSpecialToken(t); - else if (t.type == TOK.EOF) + else if (t.kind == TOK.EOF) { tail = &t; assert(t.srcText == "__EOF__"); } else - assert(0, "unexpected token type: " ~ Token.toString(t.type)); + assert(0, "unexpected token type: " ~ Token.toString(t.kind)); return; } @@ -331,7 +331,7 @@ { case '=': ++p; - t.type = TOK.DivAssign; + t.kind = TOK.DivAssign; t.end = p; return; case '+': @@ -341,12 +341,12 @@ case '/': while (!isEndOfLine(++p)) isascii(*p) || decodeUTF8(); - t.type = TOK.Comment; + t.kind = TOK.Comment; t.setWhitespaceFlag(); t.end = p; return; default: - t.type = TOK.Div; + t.kind = TOK.Div; t.end = p; return; } @@ -371,7 +371,7 @@ encodeUTF8(buffer, c); } while (*p == '\\') buffer ~= 0; - t.type = TOK.String; + t.kind = TOK.String; t.str = buffer; t.end = p; return; @@ -380,7 +380,7 @@ switch (c) { case '=': - t.type = TOK.GreaterEqual; + t.kind = TOK.GreaterEqual; goto Lcommon; case '>': if (p[1] == '>') @@ -388,21 +388,21 @@ ++p; if (p[1] == '=') { ++p; - t.type = TOK.URShiftAssign; + t.kind = TOK.URShiftAssign; } else - t.type = TOK.URShift; + t.kind = TOK.URShift; } else if (p[1] == '=') { ++p; - t.type = TOK.RShiftAssign; + t.kind = TOK.RShiftAssign; } else - t.type = TOK.RShift; + t.kind = TOK.RShift; goto Lcommon; default: - t.type = TOK.Greater; + t.kind = TOK.Greater; goto Lcommon2; } assert(0); @@ -411,26 +411,26 @@ switch (c) { case '=': - t.type = TOK.LessEqual; + t.kind = TOK.LessEqual; goto Lcommon; case '<': if (p[1] == '=') { ++p; - t.type = TOK.LShiftAssign; + t.kind = TOK.LShiftAssign; } else - t.type = TOK.LShift; + t.kind = TOK.LShift; goto Lcommon; case '>': if (p[1] == '=') { ++p; - t.type = TOK.LorEorG; + t.kind = TOK.LorEorG; } else - t.type = TOK.LorG; + t.kind = TOK.LorG; goto Lcommon; default: - t.type = TOK.Less; + t.kind = TOK.Less; goto Lcommon2; } assert(0); @@ -444,17 +444,17 @@ { if (p[1] == '=') { ++p; - t.type = TOK.Unordered; + t.kind = TOK.Unordered; } else - t.type = TOK.UorE; + t.kind = TOK.UorE; } else if (c == '=') { - t.type = TOK.UorG; + t.kind = TOK.UorG; } else { - t.type = TOK.UorGorE; + t.kind = TOK.UorGorE; goto Lcommon2; } goto Lcommon; @@ -462,16 +462,16 @@ if (p[1] == '=') { ++p; - t.type = TOK.UorL; + t.kind = TOK.UorL; } else - t.type = TOK.UorLorE; + t.kind = TOK.UorLorE; goto Lcommon; case '=': - t.type = TOK.NotEqual; + t.kind = TOK.NotEqual; goto Lcommon; default: - t.type = TOK.Not; + t.kind = TOK.Not; goto Lcommon2; } assert(0); @@ -481,135 +481,135 @@ ++p; if (p[1] == '.') { ++p; - t.type = TOK.Ellipses; + t.kind = TOK.Ellipses; } else - t.type = TOK.Slice; + t.kind = TOK.Slice; } else if (isdigit(p[1])) { return scanReal(t); } else - t.type = TOK.Dot; + t.kind = TOK.Dot; goto Lcommon; case '|': /* | || |= */ c = *++p; if (c == '=') - t.type = TOK.OrAssign; + t.kind = TOK.OrAssign; else if (c == '|') - t.type = TOK.OrLogical; + t.kind = TOK.OrLogical; else { - t.type = TOK.OrBinary; + t.kind = TOK.OrBinary; goto Lcommon2; } goto Lcommon; case '&': /* & && &= */ c = *++p; if (c == '=') - t.type = TOK.AndAssign; + t.kind = TOK.AndAssign; else if (c == '&') - t.type = TOK.AndLogical; + t.kind = TOK.AndLogical; else { - t.type = TOK.AndBinary; + t.kind = TOK.AndBinary; goto Lcommon2; } goto Lcommon; case '+': /* + ++ += */ c = *++p; if (c == '=') - t.type = TOK.PlusAssign; + t.kind = TOK.PlusAssign; else if (c == '+') - t.type = TOK.PlusPlus; + t.kind = TOK.PlusPlus; else { - t.type = TOK.Plus; + t.kind = TOK.Plus; goto Lcommon2; } goto Lcommon; case '-': /* - -- -= */ c = *++p; if (c == '=') - t.type = TOK.MinusAssign; + t.kind = TOK.MinusAssign; else if (c == '-') - t.type = TOK.MinusMinus; + t.kind = TOK.MinusMinus; else { - t.type = TOK.Minus; + t.kind = TOK.Minus; goto Lcommon2; } goto Lcommon; case '=': /* = == */ if (p[1] == '=') { ++p; - t.type = TOK.Equal; + t.kind = TOK.Equal; } else - t.type = TOK.Assign; + t.kind = TOK.Assign; goto Lcommon; case '~': /* ~ ~= */ if (p[1] == '=') { ++p; - t.type = TOK.CatAssign; + t.kind = TOK.CatAssign; } else - t.type = TOK.Tilde; + t.kind = TOK.Tilde; goto Lcommon; case '*': /* * *= */ if (p[1] == '=') { ++p; - t.type = TOK.MulAssign; + t.kind = TOK.MulAssign; } else - t.type = TOK.Mul; + t.kind = TOK.Mul; goto Lcommon; case '^': /* ^ ^= */ if (p[1] == '=') { ++p; - t.type = TOK.XorAssign; + t.kind = TOK.XorAssign; } else - t.type = TOK.Xor; + t.kind = TOK.Xor; goto Lcommon; case '%': /* % %= */ if (p[1] == '=') { ++p; - t.type = TOK.ModAssign; + t.kind = TOK.ModAssign; } else - t.type = TOK.Mod; + t.kind = TOK.Mod; goto Lcommon; // Single character tokens: case '(': - t.type = TOK.LParen; + t.kind = TOK.LParen; goto Lcommon; case ')': - t.type = TOK.RParen; + t.kind = TOK.RParen; goto Lcommon; case '[': - t.type = TOK.LBracket; + t.kind = TOK.LBracket; goto Lcommon; case ']': - t.type = TOK.RBracket; + t.kind = TOK.RBracket; goto Lcommon; case '{': - t.type = TOK.LBrace; + t.kind = TOK.LBrace; goto Lcommon; case '}': - t.type = TOK.RBrace; + t.kind = TOK.RBrace; goto Lcommon; case ':': - t.type = TOK.Colon; + t.kind = TOK.Colon; goto Lcommon; case ';': - t.type = TOK.Semicolon; + t.kind = TOK.Semicolon; goto Lcommon; case '?': - t.type = TOK.Question; + t.kind = TOK.Question; goto Lcommon; case ',': - t.type = TOK.Comma; + t.kind = TOK.Comma; goto Lcommon; case '$': - t.type = TOK.Dollar; + t.kind = TOK.Dollar; Lcommon: ++p; Lcommon2: @@ -624,7 +624,7 @@ if (isEOF(c)) { assert(isEOF(*p), ""~*p); - t.type = TOK.EOF; + t.kind = TOK.EOF; t.end = p; tail = &t; assert(t.start == t.end); @@ -641,7 +641,7 @@ error(t.start, MID.IllegalCharacter, cast(dchar)c); ++p; - t.type = TOK.Illegal; + t.kind = TOK.Illegal; t.setWhitespaceFlag(); t.dchar_ = c; t.end = p; @@ -695,8 +695,8 @@ } out { - assert(text.ptr <= t.start && t.start < end, Token.toString(t.type)); - assert(text.ptr <= t.end && t.end <= end, Token.toString(t.type)); + assert(text.ptr <= t.start && t.start < end, Token.toString(t.kind)); + assert(text.ptr <= t.end && t.end <= end, Token.toString(t.kind)); } body { @@ -722,7 +722,7 @@ ++lineNum; setLineBegin(p); // this.newline = &t; - t.type = TOK.Newline; + t.kind = TOK.Newline; t.setWhitespaceFlag(); t.newline.filePaths = this.filePaths; t.newline.oriLineNum = lineNum; @@ -771,10 +771,10 @@ switch (c) { case toUint!(">>>="): - t.type = TOK.RShiftAssign; + t.kind = TOK.RShiftAssign; goto Lcommon_4; case toUint!("!<>="): - t.type = TOK.Unordered; + t.kind = TOK.Unordered; Lcommon_4: p += 4; t.end = p; @@ -789,28 +789,28 @@ switch (c) { case toUint!(">>="): - t.type = TOK.RShiftAssign; + t.kind = TOK.RShiftAssign; goto Lcommon_3; case toUint!(">>>"): - t.type = TOK.URShift; + t.kind = TOK.URShift; goto Lcommon_3; case toUint!("<>="): - t.type = TOK.LorEorG; + t.kind = TOK.LorEorG; goto Lcommon_3; case toUint!("<<="): - t.type = TOK.LShiftAssign; + t.kind = TOK.LShiftAssign; goto Lcommon_3; case toUint!("!<="): - t.type = TOK.UorG; + t.kind = TOK.UorG; goto Lcommon_3; case toUint!("!>="): - t.type = TOK.UorL; + t.kind = TOK.UorL; goto Lcommon_3; case toUint!("!<>"): - t.type = TOK.UorE; + t.kind = TOK.UorE; goto Lcommon_3; case toUint!("..."): - t.type = TOK.Ellipses; + t.kind = TOK.Ellipses; Lcommon_3: p += 3; t.end = p; @@ -835,78 +835,78 @@ assert(*p == '/'); while (!isEndOfLine(++p)) isascii(*p) || decodeUTF8(); - t.type = TOK.Comment; + t.kind = TOK.Comment; t.setWhitespaceFlag(); t.end = p; return; case toUint!(">="): - t.type = TOK.GreaterEqual; + t.kind = TOK.GreaterEqual; goto Lcommon_2; case toUint!(">>"): - t.type = TOK.RShift; + t.kind = TOK.RShift; goto Lcommon_2; case toUint!("<<"): - t.type = TOK.LShift; + t.kind = TOK.LShift; goto Lcommon_2; case toUint!("<="): - t.type = TOK.LessEqual; + t.kind = TOK.LessEqual; goto Lcommon_2; case toUint!("<>"): - t.type = TOK.LorG; + t.kind = TOK.LorG; goto Lcommon_2; case toUint!("!<"): - t.type = TOK.UorGorE; + t.kind = TOK.UorGorE; goto Lcommon_2; case toUint!("!>"): - t.type = TOK.UorLorE; + t.kind = TOK.UorLorE; goto Lcommon_2; case toUint!("!="): - t.type = TOK.NotEqual; + t.kind = TOK.NotEqual; goto Lcommon_2; case toUint!(".."): - t.type = TOK.Slice; + t.kind = TOK.Slice; goto Lcommon_2; case toUint!("&&"): - t.type = TOK.AndLogical; + t.kind = TOK.AndLogical; goto Lcommon_2; case toUint!("&="): - t.type = TOK.AndAssign; + t.kind = TOK.AndAssign; goto Lcommon_2; case toUint!("||"): - t.type = TOK.OrLogical; + t.kind = TOK.OrLogical; goto Lcommon_2; case toUint!("|="): - t.type = TOK.OrAssign; + t.kind = TOK.OrAssign; goto Lcommon_2; case toUint!("++"): - t.type = TOK.PlusPlus; + t.kind = TOK.PlusPlus; goto Lcommon_2; case toUint!("+="): - t.type = TOK.PlusAssign; + t.kind = TOK.PlusAssign; goto Lcommon_2; case toUint!("--"): - t.type = TOK.MinusMinus; + t.kind = TOK.MinusMinus; goto Lcommon_2; case toUint!("-="): - t.type = TOK.MinusAssign; + t.kind = TOK.MinusAssign; goto Lcommon_2; case toUint!("=="): - t.type = TOK.Equal; + t.kind = TOK.Equal; goto Lcommon_2; case toUint!("~="): - t.type = TOK.CatAssign; + t.kind = TOK.CatAssign; goto Lcommon_2; case toUint!("*="): - t.type = TOK.MulAssign; + t.kind = TOK.MulAssign; goto Lcommon_2; case toUint!("/="): - t.type = TOK.DivAssign; + t.kind = TOK.DivAssign; goto Lcommon_2; case toUint!("^="): - t.type = TOK.XorAssign; + t.kind = TOK.XorAssign; goto Lcommon_2; case toUint!("%="): - t.type = TOK.ModAssign; + t.kind = TOK.ModAssign; Lcommon_2: p += 2; t.end = p; @@ -939,86 +939,86 @@ encodeUTF8(buffer, c); } while (*p == '\\') buffer ~= 0; - t.type = TOK.String; + t.kind = TOK.String; t.str = buffer; t.end = p; return; case '<': - t.type = TOK.Greater; + t.kind = TOK.Greater; goto Lcommon; case '>': - t.type = TOK.Less; + t.kind = TOK.Less; goto Lcommon; case '^': - t.type = TOK.Xor; + t.kind = TOK.Xor; goto Lcommon; case '!': - t.type = TOK.Not; + t.kind = TOK.Not; goto Lcommon; case '.': if (isdigit(p[1])) return scanReal(t); - t.type = TOK.Dot; + t.kind = TOK.Dot; goto Lcommon; case '&': - t.type = TOK.AndBinary; + t.kind = TOK.AndBinary; goto Lcommon; case '|': - t.type = TOK.OrBinary; + t.kind = TOK.OrBinary; goto Lcommon; case '+': - t.type = TOK.Plus; + t.kind = TOK.Plus; goto Lcommon; case '-': - t.type = TOK.Minus; + t.kind = TOK.Minus; goto Lcommon; case '=': - t.type = TOK.Assign; + t.kind = TOK.Assign; goto Lcommon; case '~': - t.type = TOK.Tilde; + t.kind = TOK.Tilde; goto Lcommon; case '*': - t.type = TOK.Mul; + t.kind = TOK.Mul; goto Lcommon; case '/': - t.type = TOK.Div; + t.kind = TOK.Div; goto Lcommon; case '%': - t.type = TOK.Mod; + t.kind = TOK.Mod; goto Lcommon; case '(': - t.type = TOK.LParen; + t.kind = TOK.LParen; goto Lcommon; case ')': - t.type = TOK.RParen; + t.kind = TOK.RParen; goto Lcommon; case '[': - t.type = TOK.LBracket; + t.kind = TOK.LBracket; goto Lcommon; case ']': - t.type = TOK.RBracket; + t.kind = TOK.RBracket; goto Lcommon; case '{': - t.type = TOK.LBrace; + t.kind = TOK.LBrace; goto Lcommon; case '}': - t.type = TOK.RBrace; + t.kind = TOK.RBrace; goto Lcommon; case ':': - t.type = TOK.Colon; + t.kind = TOK.Colon; goto Lcommon; case ';': - t.type = TOK.Semicolon; + t.kind = TOK.Semicolon; goto Lcommon; case '?': - t.type = TOK.Question; + t.kind = TOK.Question; goto Lcommon; case ',': - t.type = TOK.Comma; + t.kind = TOK.Comma; goto Lcommon; case '$': - t.type = TOK.Dollar; + t.kind = TOK.Dollar; Lcommon: ++p; t.end = p; @@ -1054,20 +1054,20 @@ t.end = p; auto id = IdTable.lookup(t.srcText); - t.type = id.type; + t.kind = id.type; t.ident = id; - if (t.type == TOK.Identifier || t.isKeyword) + if (t.kind == TOK.Identifier || t.isKeyword) return; else if (t.isSpecialToken) finalizeSpecialToken(t); - else if (t.type == TOK.EOF) + else if (t.kind == TOK.EOF) { tail = &t; assert(t.srcText == "__EOF__"); } else - assert(0, "unexpected token type: " ~ Token.toString(t.type)); + assert(0, "unexpected token type: " ~ Token.toString(t.kind)); return; } @@ -1078,7 +1078,7 @@ if (isEOF(c)) { assert(isEOF(*p), *p~""); - t.type = TOK.EOF; + t.kind = TOK.EOF; t.end = p; tail = &t; assert(t.start == t.end); @@ -1095,7 +1095,7 @@ error(t.start, MID.IllegalCharacter, cast(dchar)c); ++p; - t.type = TOK.Illegal; + t.kind = TOK.Illegal; t.setWhitespaceFlag(); t.dchar_ = c; t.end = p; @@ -1138,7 +1138,7 @@ } } } - t.type = TOK.Comment; + t.kind = TOK.Comment; t.setWhitespaceFlag(); t.end = p; return; @@ -1188,7 +1188,7 @@ } } } - t.type = TOK.Comment; + t.kind = TOK.Comment; t.setWhitespaceFlag(); t.end = p; return; @@ -1217,7 +1217,7 @@ assert(*p == '"'); auto tokenLineNum = lineNum; auto tokenLineBegin = lineBegin; - t.type = TOK.String; + t.kind = TOK.String; char[] buffer; uint c; while (1) @@ -1271,7 +1271,7 @@ { assert(*p == '\''); ++p; - t.type = TOK.CharLiteral; + t.kind = TOK.CharLiteral; switch (*p) { case '\\': @@ -1302,7 +1302,7 @@ assert(*p == '`' || *p == '"' && p[-1] == 'r'); auto tokenLineNum = lineNum; auto tokenLineBegin = lineBegin; - t.type = TOK.String; + t.kind = TOK.String; uint delim = *p; char[] buffer; uint c; @@ -1355,7 +1355,7 @@ void scanHexStringLiteral(ref Token t) { assert(p[0] == 'x' && p[1] == '"'); - t.type = TOK.String; + t.kind = TOK.String; auto tokenLineNum = lineNum; auto tokenLineBegin = lineBegin; @@ -1439,7 +1439,7 @@ void scanDelimitedStringLiteral(ref Token t) { assert(p[0] == 'q' && p[1] == '"'); - t.type = TOK.String; + t.kind = TOK.String; auto tokenLineNum = lineNum; auto tokenLineBegin = lineBegin; @@ -1617,7 +1617,7 @@ void scanTokenStringLiteral(ref Token t) { assert(p[0] == 'q' && p[1] == '{'); - t.type = TOK.String; + t.kind = TOK.String; auto tokenLineNum = lineNum; auto tokenLineBegin = lineBegin; @@ -1642,7 +1642,7 @@ token.prev = prev_t; prev_t.next = token; prev_t = token; - switch (token.type) + switch (token.kind) { case TOK.LBrace: ++level; @@ -1666,13 +1666,13 @@ break; // Exit loop. } - assert(token.type == TOK.RBrace || token.type == TOK.EOF); - assert(token.type == TOK.RBrace && t.next is null || - token.type == TOK.EOF && t.next !is null); + assert(token.kind == TOK.RBrace || token.kind == TOK.EOF); + assert(token.kind == TOK.RBrace && t.next is null || + token.kind == TOK.EOF && t.next !is null); char[] buffer; // token points to } or EOF - if (token.type == TOK.EOF) + if (token.kind == TOK.EOF) { t.end = token.start; buffer = t.srcText[2..$].dup ~ '\0'; @@ -2094,33 +2094,33 @@ { if (isDecimal) error(t.start, MID.OverflowDecimalSign); - t.type = TOK.Uint64; + t.kind = TOK.Uint64; } else if (ulong_ & 0xFFFF_FFFF_0000_0000) - t.type = TOK.Int64; + t.kind = TOK.Int64; else if (ulong_ & 0x8000_0000) - t.type = isDecimal ? TOK.Int64 : TOK.Uint32; + t.kind = isDecimal ? TOK.Int64 : TOK.Uint32; else - t.type = TOK.Int32; + t.kind = TOK.Int32; break; case Suffix.Unsigned: if (ulong_ & 0xFFFF_FFFF_0000_0000) - t.type = TOK.Uint64; + t.kind = TOK.Uint64; else - t.type = TOK.Uint32; + t.kind = TOK.Uint32; break; case Suffix.Long: if (ulong_ & 0x8000_0000_0000_0000) { if (isDecimal) error(t.start, MID.OverflowDecimalSign); - t.type = TOK.Uint64; + t.kind = TOK.Uint64; } else - t.type = TOK.Int64; + t.kind = TOK.Int64; break; case Suffix.Unsigned | Suffix.Long: - t.type = TOK.Uint64; + t.kind = TOK.Uint64; break; default: assert(0); @@ -2226,7 +2226,7 @@ finalizeFloat(t, buffer); return; Lerr: - t.type = TOK.Float32; + t.kind = TOK.Float32; t.end = p; error(t.start, mid); } @@ -2238,26 +2238,26 @@ switch (*p) { case 'f', 'F': - t.type = TOK.Float32; + t.kind = TOK.Float32; t.float_ = strtof(buffer.ptr, null); ++p; break; case 'L': - t.type = TOK.Float80; + t.kind = TOK.Float80; t.real_ = strtold(buffer.ptr, null); ++p; break; default: - t.type = TOK.Float64; + t.kind = TOK.Float64; t.double_ = strtod(buffer.ptr, null); } if (*p == 'i') { ++p; - t.type += 3; // Switch to imaginary counterpart. - assert(t.type == TOK.Imaginary32 || - t.type == TOK.Imaginary64 || - t.type == TOK.Imaginary80); + t.kind += 3; // Switch to imaginary counterpart. + assert(t.kind == TOK.Imaginary32 || + t.kind == TOK.Imaginary64 || + t.kind == TOK.Imaginary80); } if (errno() == ERANGE) error(t.start, MID.OverflowFloatNumber); @@ -2268,7 +2268,7 @@ void scanSpecialTokenSequence(ref Token t) { assert(*p == '#'); - t.type = TOK.HashLine; + t.kind = TOK.HashLine; t.setWhitespaceFlag(); MID mid; @@ -2303,7 +2303,7 @@ } t.tokLineNum = new Token; scan(*t.tokLineNum); - if (t.tokLineNum.type != TOK.Int32 && t.tokLineNum.type != TOK.Uint32) + if (t.tokLineNum.kind != TOK.Int32 && t.tokLineNum.kind != TOK.Uint32) { errorAtColumn = t.tokLineNum.start; mid = MID.ExpectedIntegerAfterSTLine; @@ -2322,7 +2322,7 @@ } t.tokLineFilespec = new Token; t.tokLineFilespec.start = p; - t.tokLineFilespec.type = TOK.Filespec; + t.tokLineFilespec.kind = TOK.Filespec; t.tokLineFilespec.setWhitespaceFlag(); while (*++p != '"') { @@ -2378,12 +2378,12 @@ Token* insertEmptyTokenBefore(Token* t) { assert(t !is null && t.prev !is null); - assert(text.ptr <= t.start && t.start < end, Token.toString(t.type)); - assert(text.ptr <= t.end && t.end <= end, Token.toString(t.type)); + assert(text.ptr <= t.start && t.start < end, Token.toString(t.kind)); + assert(text.ptr <= t.end && t.end <= end, Token.toString(t.kind)); auto prev_t = t.prev; auto new_t = new Token; - new_t.type = TOK.Empty; + new_t.kind = TOK.Empty; new_t.start = new_t.end = prev_t.end; // Link in new token. prev_t.next = new_t; @@ -2678,7 +2678,7 @@ struct Pair { char[] tokenText; - TOK type; + TOK kind; } static Pair[] pairs = [ {"#!äöüß", TOK.Shebang}, {"\n", TOK.Newline}, @@ -2724,10 +2724,10 @@ // Join all token texts into a single string. foreach (i, pair; pairs) - if (pair.type == TOK.Comment && pair.tokenText[1] == '/' || // Line comment. - pair.type == TOK.Shebang) + if (pair.kind == TOK.Comment && pair.tokenText[1] == '/' || // Line comment. + pair.kind == TOK.Shebang) { - assert(pairs[i+1].type == TOK.Newline); // Must be followed by a newline. + assert(pairs[i+1].kind == TOK.Newline); // Must be followed by a newline. src ~= pair.tokenText; } else @@ -2738,7 +2738,7 @@ uint i; assert(token == lx.head); - assert(token.next.type == TOK.Newline); + assert(token.next.kind == TOK.Newline); token = token.next.next; do { @@ -2746,7 +2746,7 @@ assert(token.srcText == pairs[i].tokenText, Format("Scanned '{0}' but expected '{1}'", token.srcText, pairs[i].tokenText)); ++i; token = token.next; - } while (token.type != TOK.EOF) + } while (token.kind != TOK.EOF) } unittest @@ -2757,22 +2757,22 @@ auto next = lx.head; lx.peek(next); - assert(next.type == TOK.Newline); + assert(next.kind == TOK.Newline); lx.peek(next); - assert(next.type == TOK.Unittest); + assert(next.kind == TOK.Unittest); lx.peek(next); - assert(next.type == TOK.LBrace); + assert(next.kind == TOK.LBrace); lx.peek(next); - assert(next.type == TOK.RBrace); + assert(next.kind == TOK.RBrace); lx.peek(next); - assert(next.type == TOK.EOF); + assert(next.kind == TOK.EOF); lx = new Lexer("", null); next = lx.head; lx.peek(next); - assert(next.type == TOK.Newline); + assert(next.kind == TOK.Newline); lx.peek(next); - assert(next.type == TOK.EOF); + assert(next.kind == TOK.EOF); } unittest diff -r cedfc67faabf -r ff6971637f88 trunk/src/dil/lexer/Token.d --- a/trunk/src/dil/lexer/Token.d Fri Jan 18 23:11:44 2008 +0100 +++ b/trunk/src/dil/lexer/Token.d Fri Jan 18 23:40:12 2008 +0100 @@ -24,7 +24,7 @@ Whitespace = 1, /// Tokens with this flag are ignored by the Parser. } - TOK type; /// The type of the token. + TOK kind; /// The token kind. Flags flags; /// The flags of the token. /// Pointers to the next and previous tokens (doubly-linked list.) Token* next, prev; @@ -93,7 +93,7 @@ auto token = next; while (token !is null && token.isWhitespace) token = token.next; - if (token is null || token.type == TOK.EOF) + if (token is null || token.kind == TOK.EOF) return this; return token; } @@ -109,7 +109,7 @@ auto token = prev; while (token !is null && token.isWhitespace) token = token.prev; - if (token is null || token.type == TOK.HEAD) + if (token is null || token.kind == TOK.HEAD) return this; return token; } @@ -132,20 +132,20 @@ +/ bool isMultiline() { - return type == TOK.String && start[0] != '\\' || - type == TOK.Comment && start[1] != '/'; + return kind == TOK.String && start[0] != '\\' || + kind == TOK.Comment && start[1] != '/'; } /// Returns true if this is a keyword token. bool isKeyword() { - return KeywordsBegin <= type && type <= KeywordsEnd; + return KeywordsBegin <= kind && kind <= KeywordsEnd; } /// Returns true if this is an integral type token. bool isIntegralType() { - return IntegralTypeBegin <= type && type <= IntegralTypeEnd; + return IntegralTypeBegin <= kind && kind <= IntegralTypeEnd; } /// Returns true if this is a whitespace token. @@ -157,7 +157,7 @@ /// Returns true if this is a special token. bool isSpecialToken() { - return SpecialTokensBegin <= type && type <= SpecialTokensEnd; + return SpecialTokensBegin <= kind && kind <= SpecialTokensEnd; } version(D2) @@ -165,31 +165,31 @@ /// Returns true if this is a token string literal. bool isTokenStringLiteral() { - return type == TOK.String && tok_str !is null; + return kind == TOK.String && tok_str !is null; } } /// Returns true if this token starts a DeclarationDefinition. bool isDeclDefStart() { - return isDeclDefStartToken(type); + return isDeclDefStartToken(kind); } /// Returns true if this token starts a Statement. bool isStatementStart() { - return isStatementStartToken(type); + return isStatementStartToken(kind); } /// Returns true if this token starts an AsmInstruction. bool isAsmInstructionStart() { - return isAsmInstructionStartToken(type); + return isAsmInstructionStartToken(kind); } - int opEquals(TOK type2) + int opEquals(TOK kind2) { - return type == type2; + return kind == kind2; } /// Returns the Location of this token. @@ -197,7 +197,7 @@ { auto search_t = this.prev; // Find previous newline token. - while (search_t.type != TOK.Newline) + while (search_t.kind != TOK.Newline) search_t = search_t.prev; static if (realLocation) { @@ -276,7 +276,7 @@ auto token = cast(Token*)p; if (token) { - if(token.type == TOK.HashLine) + if(token.kind == TOK.HashLine) token.destructHashLineToken(); else { @@ -290,7 +290,7 @@ void destructHashLineToken() { - assert(type == TOK.HashLine); + assert(kind == TOK.HashLine); delete tokLineNum; delete tokLineFilespec; } @@ -299,15 +299,15 @@ { void destructTokenStringLiteral() { - assert(type == TOK.String); + assert(kind == TOK.String); assert(start && *start == 'q' && start[1] == '{'); assert(tok_str !is null); auto tok_it = tok_str; auto tok_del = tok_str; - while (tok_it && tok_it.type != TOK.EOF) + while (tok_it && tok_it.kind != TOK.EOF) { tok_it = tok_it.next; - assert(tok_del && tok_del.type != TOK.EOF); + assert(tok_del && tok_del.kind != TOK.EOF); delete tok_del; tok_del = tok_it; } diff -r cedfc67faabf -r ff6971637f88 trunk/src/dil/parser/ImportParser.d --- a/trunk/src/dil/parser/ImportParser.d Fri Jan 18 23:11:44 2008 +0100 +++ b/trunk/src/dil/parser/ImportParser.d Fri Jan 18 23:40:12 2008 +0100 @@ -24,9 +24,9 @@ { auto decls = new Declarations; super.init(); - if (token.type == T.Module) + if (token.kind == T.Module) decls ~= parseModuleDeclaration(); - while (token.type != T.EOF) + while (token.kind != T.EOF) parseDeclarationDefinition(Protection.None); return decls; } @@ -34,21 +34,21 @@ void parseDeclarationDefinitionsBlock(Protection prot) { skip(T.LBrace); - while (token.type != T.RBrace && token.type != T.EOF) + while (token.kind != T.RBrace && token.kind != T.EOF) parseDeclarationDefinition(prot); skip(T.RBrace); } void parseDeclarationsBlock(Protection prot) { - switch (token.type) + switch (token.kind) { case T.LBrace: parseDeclarationDefinitionsBlock(prot); break; case T.Colon: nT(); - while (token.type != T.RBrace && token.type != T.EOF) + while (token.kind != T.RBrace && token.kind != T.EOF) parseDeclarationDefinition(prot); break; default: @@ -63,11 +63,11 @@ while (1) { lexer.peek(next); - if (next.type == opening) + if (next.kind == opening) ++level; - else if (next.type == closing && --level == 0) + else if (next.kind == closing && --level == 0) return true; - else if (next.type == T.EOF) + else if (next.kind == T.EOF) break; } return false; @@ -87,13 +87,13 @@ void skip(TOK tok) { - token.type == tok && nT(); + token.kind == tok && nT(); } void parseProtectionAttribute() { Protection prot; - switch (token.type) + switch (token.kind) { case T.Private: prot = Protection.Private; break; @@ -114,11 +114,11 @@ void parseDeclarationDefinition(Protection prot) { - switch (token.type) + switch (token.kind) { case T.Align: nT(); - if (token.type == T.LParen) + if (token.kind == T.LParen) nT(), nT(), nT(); // ( Integer ) parseDeclarationsBlock(prot); break; @@ -137,7 +137,7 @@ // Storage classes case T.Extern: nT(); - token.type == T.LParen && skipToTokenAfterClosingParen(); + token.kind == T.LParen && skipToTokenAfterClosingParen(); parseDeclarationsBlock(prot); break; case T.Const: @@ -181,7 +181,7 @@ nT(), nT(); skipToTokenAfterClosingParen(); parseDeclarationsBlock(prot); - if (token.type == T.Else) + if (token.kind == T.Else) nT(), parseDeclarationsBlock(prot); break; case T.Assert: @@ -201,14 +201,14 @@ break; case T.Enum: nT(); - token.type == T.Identifier && nT(); - if (token.type == T.Colon) + token.kind == T.Identifier && nT(); + if (token.kind == T.Colon) { nT(); - while (token.type != T.LBrace && token.type != T.EOF) + while (token.kind != T.LBrace && token.kind != T.EOF) nT(); } - if (token.type == T.Semicolon) + if (token.kind == T.Semicolon) nT(); else skipToTokenAfterClosingBrace(); @@ -216,25 +216,25 @@ case T.Class: case T.Interface: nT(), skip(T.Identifier); // class Identifier - token.type == T.LParen && skipToTokenAfterClosingParen(); // Skip template params. - if (token.type == T.Colon) + token.kind == T.LParen && skipToTokenAfterClosingParen(); // Skip template params. + if (token.kind == T.Colon) { // BaseClasses nT(); - while (token.type != T.LBrace && token.type != T.EOF) - if (token.type == T.LParen) // Skip ( tokens... ) + while (token.kind != T.LBrace && token.kind != T.EOF) + if (token.kind == T.LParen) // Skip ( tokens... ) skipToTokenAfterClosingParen(); else nT(); } - if (token.type == T.Semicolon) + if (token.kind == T.Semicolon) nT(); else parseDeclarationDefinitionsBlock(Protection.None); break; case T.Struct, T.Union: nT(); skip(T.Identifier); - token.type == T.LParen && skipToTokenAfterClosingParen(); - if (token.type == T.Semicolon) + token.kind == T.LParen && skipToTokenAfterClosingParen(); + if (token.kind == T.Semicolon) nT(); else parseDeclarationDefinitionsBlock(Protection.None); @@ -257,7 +257,7 @@ else goto case_InvariantAttribute; } - token.type == T.LParen && skipToTokenAfterClosingParen(); + token.kind == T.LParen && skipToTokenAfterClosingParen(); parseFunctionBody(); break; case T.Unittest: @@ -266,27 +266,27 @@ break; case T.Debug: nT(); - if (token.type == T.Assign) + if (token.kind == T.Assign) { nT(), nT(), nT(); // = Condition ; break; } - if (token.type == T.LParen) + if (token.kind == T.LParen) nT(), nT(), nT(); // ( Condition ) parseDeclarationsBlock(prot); - if (token.type == T.Else) + if (token.kind == T.Else) nT(), parseDeclarationsBlock(prot); break; case T.Version: nT(); - if (token.type == T.Assign) + if (token.kind == T.Assign) { nT(), nT(), nT(); // = Condition ; break; } nT(), nT(), nT(); // ( Condition ) parseDeclarationsBlock(prot); - if (token.type == T.Else) + if (token.kind == T.Else) nT(), parseDeclarationsBlock(prot); break; case T.Template: @@ -306,8 +306,8 @@ parseFunctionBody(); break; case T.Mixin: - while (token.type != T.Semicolon && token.type != T.EOF) - if (token.type == T.LParen) + while (token.kind != T.Semicolon && token.kind != T.EOF) + if (token.kind == T.LParen) skipToTokenAfterClosingParen(); else nT(); @@ -319,10 +319,10 @@ // Declaration case T.Identifier, T.Dot, T.Typeof: case_Declaration: - while (token.type != T.Semicolon && token.type != T.EOF) - if (token.type == T.LParen) + while (token.kind != T.Semicolon && token.kind != T.EOF) + if (token.kind == T.LParen) skipToTokenAfterClosingParen(); - else if (token.type == T.LBrace) + else if (token.kind == T.LBrace) skipToTokenAfterClosingBrace(); else nT(); @@ -339,7 +339,7 @@ { while (1) { - switch (token.type) + switch (token.kind) { case T.LBrace: skipToTokenAfterClosingBrace(); @@ -353,7 +353,7 @@ continue; case T.Out: nT(); - if (token.type == T.LParen) + if (token.kind == T.LParen) nT(), nT(), nT(); // ( Identifier ) skipToTokenAfterClosingBrace(); continue; diff -r cedfc67faabf -r ff6971637f88 trunk/src/dil/parser/Parser.d --- a/trunk/src/dil/parser/Parser.d Fri Jan 18 23:11:44 2008 +0100 +++ b/trunk/src/dil/parser/Parser.d Fri Jan 18 23:40:12 2008 +0100 @@ -78,7 +78,7 @@ init(); auto begin = token; auto decls = new Declarations; - if (token.type == T.Module) + if (token.kind == T.Module) decls ~= parseModuleDeclaration(); decls.addOptChildren(parseDeclarationDefinitions()); set(decls, begin); @@ -148,7 +148,7 @@ do lexer.peek(next); while (next.isWhitespace) // Skip whitespace - return next.type; + return next.kind; } TOK peekAfter(ref Token* next) @@ -157,13 +157,13 @@ do lexer.peek(next); while (next.isWhitespace) // Skip whitespace - return next.type; + return next.kind; } /// Skips the current token if its type matches tok and returns true. bool skipped()(TOK tok) // Templatized, so it's inlined. { - return token.type == tok ? (nT(), true) : false; + return token.kind == tok ? (nT(), true) : false; } /++++++++++++++++++++++++++++++ @@ -172,14 +172,14 @@ Declaration parseModuleDeclaration() { - assert(token.type == T.Module); + assert(token.kind == T.Module); auto begin = token; ModuleFQN moduleFQN; do { nT(); moduleFQN ~= requireIdentifier(MSG.ExpectedModuleIdentifier); - } while (token.type == T.Dot) + } while (token.kind == T.Dot) require(T.Semicolon); return set(new ModuleDeclaration(moduleFQN), begin); } @@ -193,7 +193,7 @@ Declaration[] parseDeclarationDefinitions() { Declaration[] decls; - while (token.type != T.EOF) + while (token.kind != T.EOF) decls ~= parseDeclarationDefinition(); return decls; } @@ -219,7 +219,7 @@ auto begin = token; auto decls = new Declarations; require(T.LBrace); - while (token.type != T.RBrace && token.type != T.EOF) + while (token.kind != T.RBrace && token.kind != T.EOF) decls ~= parseDeclarationDefinition(); require(T.RBrace); set(decls, begin); @@ -239,7 +239,7 @@ { auto begin = token; Declaration decl; - switch (token.type) + switch (token.kind) { case T.Align, T.Pragma, @@ -366,7 +366,7 @@ default: if (token.isIntegralType) goto case_Declaration; - else if (token.type == T.Module) + else if (token.kind == T.Module) { decl = parseModuleDeclaration(); error(begin, MSG.ModuleDeclarationNotFirst); @@ -378,8 +378,8 @@ do nT(); while (!token.isDeclDefStart && - token.type != T.RBrace && - token.type != T.EOF) + token.kind != T.RBrace && + token.kind != T.EOF) auto text = Token.textSpan(begin, this.prevToken); error(begin, MSG.IllegalDeclaration, text); } @@ -400,13 +400,13 @@ Declaration parseDeclarationsBlock(bool noColon = false) { Declaration d; - switch (token.type) + switch (token.kind) { case T.LBrace: auto begin = token; nT(); auto decls = new Declarations; - while (token.type != T.RBrace && token.type != T.EOF) + while (token.kind != T.RBrace && token.kind != T.EOF) decls ~= parseDeclarationDefinition(); require(T.RBrace); d = set(decls, begin); @@ -417,7 +417,7 @@ nT(); auto begin = token; auto decls = new Declarations; - while (token.type != T.RBrace && token.type != T.EOF) + while (token.kind != T.RBrace && token.kind != T.EOF) decls ~= parseDeclarationDefinition(); d = set(decls, begin); break; @@ -454,7 +454,7 @@ // Check for AutoDeclaration: StorageClasses Identifier = if (testAutoDeclaration && - token.type == T.Identifier && + token.kind == T.Identifier && peekNext() == T.Assign) { ident = token.ident; @@ -463,7 +463,7 @@ else { type = parseType(); // VariableType or ReturnType - if (token.type == T.LParen) + if (token.kind == T.LParen) { // C-style function pointers make the grammar ambiguous. // We have to treat them specially at function scope. @@ -482,7 +482,7 @@ { // Type FunctionName ( ParameterList ) FunctionBody ident = requireIdentifier(MSG.ExpectedFunctionName); ident || nT(); // Skip non-identifier token. - assert(token.type == T.LParen); + assert(token.kind == T.LParen); // It's a function declaration TemplateParameters tparams; if (tokenAfterParenIs(T.LParen)) @@ -494,7 +494,7 @@ auto params = parseParameterList(); version(D2) { - switch (token.type) + switch (token.kind) { case T.Const: stc |= StorageClass.Const; @@ -527,7 +527,7 @@ Identifier*[] idents = [ident]; Expression[] values; goto LenterLoop; // We've already parsed an identifier. Jump to if statement and check for initializer. - while (token.type == T.Comma) + while (token.kind == T.Comma) { nT(); idents ~= requireIdentifier(MSG.ExpectedVariableName); @@ -547,7 +547,7 @@ Expression parseInitializer() { - if (token.type == T.Void) + if (token.kind == T.Void) { auto begin = token; auto next = peekNext(); @@ -564,7 +564,7 @@ { auto begin = token; Expression init; - switch (token.type) + switch (token.kind) { case T.LBracket: // ArrayInitializer: @@ -574,7 +574,7 @@ Expression[] values; nT(); - while (token.type != T.RBracket) + while (token.kind != T.RBracket) { auto e = parseNonVoidInitializer(); if (skipped(T.Colon)) @@ -588,7 +588,7 @@ values ~= e; } - if (token.type != T.Comma) + if (token.kind != T.Comma) break; nT(); } @@ -605,9 +605,9 @@ Expression[] values; nT(); - while (token.type != T.RBrace) + while (token.kind != T.RBrace) { - if (token.type == T.Identifier && + if (token.kind == T.Identifier && // Peek for colon to see if this is a member identifier. peekNext() == T.Colon) { @@ -620,7 +620,7 @@ // NonVoidInitializer values ~= parseNonVoidInitializer(); - if (token.type != T.Comma) + if (token.kind != T.Comma) break; nT(); } @@ -635,7 +635,7 @@ init = si; break; } - assert(token.type == T.LBrace); + assert(token.kind == T.LBrace); //goto default; default: init = parseAssignExpression(); @@ -650,7 +650,7 @@ auto func = new FunctionBody; while (1) { - switch (token.type) + switch (token.kind) { case T.LBrace: func.funcBody = parseStatements(); @@ -691,11 +691,11 @@ LinkageType parseLinkageType() { LinkageType linkageType; - if (token.type != T.LParen) + if (token.kind != T.LParen) return linkageType; nT(); // Skip ( - if (token.type == T.RParen) + if (token.kind == T.RParen) { nT(); error(MID.MissingLinkageType); @@ -754,7 +754,7 @@ { Declaration decl; auto begin = token; - switch (token.type) + switch (token.kind) { case T.Extern: if (peekNext() != T.LParen) @@ -850,12 +850,12 @@ uint parseAlignAttribute() { - assert(token.type == T.Align); + assert(token.kind == T.Align); nT(); // Skip align keyword. uint size = DEFAULT_ALIGN_SIZE; // Global default. if (skipped(T.LParen)) { - if (token.type == T.Int32) + if (token.kind == T.Int32) (size = token.int_), nT(); else expected(T.Int32); @@ -868,7 +868,7 @@ { Declaration decl; - switch (token.type) + switch (token.kind) { case T.Align: uint alignSize = parseAlignAttribute(); @@ -897,7 +897,7 @@ default: // Protection attributes Protection prot; - switch (token.type) + switch (token.kind) { case T.Private: prot = Protection.Private; break; @@ -923,9 +923,9 @@ Declaration parseImportDeclaration() { - assert(token.type == T.Import || token.type == T.Static); + assert(token.kind == T.Import || token.kind == T.Static); bool isStatic = skipped(T.Static); - assert(token.type == T.Import); + assert(token.kind == T.Import); nT(); // Skip import keyword. ModuleFQN[] moduleFQNs; @@ -949,7 +949,7 @@ while (1) { moduleFQN ~= requireIdentifier(MSG.ExpectedModuleIdentifier); - if (token.type != T.Dot) + if (token.kind != T.Dot) break; nT(); } @@ -958,12 +958,12 @@ moduleFQNs ~= moduleFQN; moduleAliases ~= moduleAlias; - if (token.type != T.Comma) + if (token.kind != T.Comma) break; nT(); } - if (token.type == T.Colon) + if (token.kind == T.Colon) { // BindAlias = BindName(, BindAlias = BindName)*; // BindName(, BindName)*; @@ -980,7 +980,7 @@ // Push identifiers. bindNames ~= requireIdentifier(MSG.ExpectedImportName); bindAliases ~= bindAlias; - } while (token.type == T.Comma) + } while (token.kind == T.Comma) } require(T.Semicolon); @@ -990,7 +990,7 @@ Declaration parseEnumDeclaration() { - assert(token.type == T.Enum); + assert(token.kind == T.Enum); nT(); // Skip enum keyword. Identifier* enumName; @@ -1008,7 +1008,7 @@ else if (skipped(T.LBrace)) { hasBody = true; - while (token.type != T.RBrace) + while (token.kind != T.RBrace) { auto begin = token; auto name = requireIdentifier(MSG.ExpectedEnumMember); @@ -1021,7 +1021,7 @@ members ~= set(new EnumMember(name, value), begin); - if (token.type != T.Comma) + if (token.kind != T.Comma) break; nT(); // Skip , } @@ -1035,7 +1035,7 @@ Declaration parseClassDeclaration() { - assert(token.type == T.Class); + assert(token.kind == T.Class); nT(); // Skip class keyword. Identifier* className; @@ -1045,15 +1045,15 @@ className = requireIdentifier(MSG.ExpectedClassName); - if (token.type == T.LParen) + if (token.kind == T.LParen) tparams = parseTemplateParameterList(); - if (token.type == T.Colon) + if (token.kind == T.Colon) bases = parseBaseClasses(); if (bases.length == 0 && skipped(T.Semicolon)) {} - else if (token.type == T.LBrace) + else if (token.kind == T.LBrace) decls = parseDeclarationDefinitionsBody(); else error(token, MSG.ExpectedClassBody, token.srcText); @@ -1065,7 +1065,7 @@ { if (colonLeadsOff) { - assert(token.type == T.Colon); + assert(token.kind == T.Colon); nT(); // Skip colon } @@ -1074,7 +1074,7 @@ do { Protection prot = Protection.Public; - switch (token.type) + switch (token.kind) { case T.Identifier, T.Dot, T.Typeof: goto LparseBasicType; case T.Private: prot = Protection.Private; break; @@ -1096,7 +1096,7 @@ Declaration parseInterfaceDeclaration() { - assert(token.type == T.Interface); + assert(token.kind == T.Interface); nT(); // Skip interface keyword. Identifier* name; @@ -1106,15 +1106,15 @@ name = requireIdentifier(MSG.ExpectedInterfaceName); - if (token.type == T.LParen) + if (token.kind == T.LParen) tparams = parseTemplateParameterList(); - if (token.type == T.Colon) + if (token.kind == T.Colon) bases = parseBaseClasses(); if (bases.length == 0 && skipped(T.Semicolon)) {} - else if (token.type == T.LBrace) + else if (token.kind == T.LBrace) decls = parseDeclarationDefinitionsBody(); else error(token, MSG.ExpectedInterfaceBody, token.srcText); @@ -1124,8 +1124,8 @@ Declaration parseStructOrUnionDeclaration() { - assert(token.type == T.Struct || token.type == T.Union); - TOK tok = token.type; + assert(token.kind == T.Struct || token.kind == T.Union); + TOK tok = token.kind; nT(); // Skip struct or union keyword. Identifier* name; @@ -1134,12 +1134,12 @@ name = optionalIdentifier(); - if (name && token.type == T.LParen) + if (name && token.kind == T.LParen) tparams = parseTemplateParameterList(); if (name && skipped(T.Semicolon)) {} - else if (token.type == T.LBrace) + else if (token.kind == T.LBrace) decls = parseDeclarationDefinitionsBody(); else error(token, tok == T.Struct ? @@ -1158,7 +1158,7 @@ Declaration parseConstructorDeclaration() { - assert(token.type == T.This); + assert(token.kind == T.This); nT(); // Skip 'this' keyword. auto parameters = parseParameterList(); auto funcBody = parseFunctionBody(); @@ -1167,7 +1167,7 @@ Declaration parseDestructorDeclaration() { - assert(token.type == T.Tilde); + assert(token.kind == T.Tilde); nT(); // Skip ~ require(T.This); require(T.LParen); @@ -1178,7 +1178,7 @@ Declaration parseStaticConstructorDeclaration() { - assert(token.type == T.Static); + assert(token.kind == T.Static); nT(); // Skip static keyword. nT(); // Skip 'this' keyword. require(T.LParen); @@ -1189,7 +1189,7 @@ Declaration parseStaticDestructorDeclaration() { - assert(token.type == T.Static); + assert(token.kind == T.Static); nT(); // Skip static keyword. nT(); // Skip ~ require(T.This); @@ -1201,10 +1201,10 @@ Declaration parseInvariantDeclaration() { - assert(token.type == T.Invariant); + assert(token.kind == T.Invariant); nT(); // Skip invariant keyword. // Optional () for getting ready porting to D 2.0 - if (token.type == T.LParen) + if (token.kind == T.LParen) requireNext(T.RParen); auto funcBody = parseFunctionBody(); return new InvariantDeclaration(funcBody); @@ -1212,7 +1212,7 @@ Declaration parseUnittestDeclaration() { - assert(token.type == T.Unittest); + assert(token.kind == T.Unittest); nT(); // Skip unittest keyword. auto funcBody = parseFunctionBody(); return new UnittestDeclaration(funcBody); @@ -1228,7 +1228,7 @@ Declaration parseDebugDeclaration() { - assert(token.type == T.Debug); + assert(token.kind == T.Debug); nT(); // Skip debug keyword. Token* spec; @@ -1261,7 +1261,7 @@ Declaration parseVersionDeclaration() { - assert(token.type == T.Version); + assert(token.kind == T.Version); nT(); // Skip version keyword. Token* spec; @@ -1291,7 +1291,7 @@ Declaration parseStaticIfDeclaration() { - assert(token.type == T.Static); + assert(token.kind == T.Static); nT(); // Skip static keyword. nT(); // Skip if keyword. @@ -1312,7 +1312,7 @@ Declaration parseStaticAssertDeclaration() { - assert(token.type == T.Static); + assert(token.kind == T.Static); nT(); // Skip static keyword. nT(); // Skip assert keyword. Expression condition, message; @@ -1327,7 +1327,7 @@ Declaration parseTemplateDeclaration() { - assert(token.type == T.Template); + assert(token.kind == T.Template); nT(); // Skip template keyword. auto templateName = requireIdentifier(MSG.ExpectedTemplateName); auto templateParams = parseTemplateParameterList(); @@ -1337,7 +1337,7 @@ Declaration parseNewDeclaration() { - assert(token.type == T.New); + assert(token.kind == T.New); nT(); // Skip new keyword. auto parameters = parseParameterList(); auto funcBody = parseFunctionBody(); @@ -1346,7 +1346,7 @@ Declaration parseDeleteDeclaration() { - assert(token.type == T.Delete); + assert(token.kind == T.Delete); nT(); // Skip delete keyword. auto parameters = parseParameterList(); auto funcBody = parseFunctionBody(); @@ -1355,11 +1355,11 @@ Type parseTypeofType() { - assert(token.type == T.Typeof); + assert(token.kind == T.Typeof); auto begin = token; Type type; requireNext(T.LParen); - switch (token.type) + switch (token.kind) { version(D2) { @@ -1386,7 +1386,7 @@ */ Class parseMixin(Class)() { - assert(token.type == T.Mixin); + assert(token.kind == T.Mixin); nT(); // Skip mixin keyword. static if (is(Class == MixinDeclaration)) @@ -1427,7 +1427,7 @@ auto begin = token; require(T.LBrace); auto statements = new Statements(); - while (token.type != T.RBrace && token.type != T.EOF) + while (token.kind != T.RBrace && token.kind != T.EOF) statements ~= parseStatement(); require(T.RBrace); return set(statements, begin); @@ -1445,13 +1445,13 @@ goto LreturnDeclarationStatement; } - switch (token.type) + switch (token.kind) { case T.Align: uint size = parseAlignAttribute(); // Restrict align attribute to structs in parsing phase. StructDeclaration structDecl; - if (token.type == T.Struct) + if (token.kind == T.Struct) { auto begin2 = token; structDecl = parseStructOrUnionDeclaration().to!(StructDeclaration); @@ -1646,7 +1646,7 @@ if (token.isSpecialToken) goto case_parseExpressionStatement; - if (token.type != T.Dollar) + if (token.kind != T.Dollar) // Assert that this isn't a valid expression. assert(delegate bool(){ bool success; @@ -1661,8 +1661,8 @@ do nT(); while (!token.isStatementStart && - token.type != T.RBrace && - token.type != T.EOF) + token.kind != T.RBrace && + token.kind != T.EOF) auto text = Token.textSpan(begin, this.prevToken); error(begin, MSG.IllegalStatement, text); } @@ -1695,12 +1695,12 @@ if (skipped(T.LBrace)) { auto ss = new Statements(); - while (token.type != T.RBrace && token.type != T.EOF) + while (token.kind != T.RBrace && token.kind != T.EOF) ss ~= parseStatement(); require(T.RBrace); s = set(ss, begin); } - else if (token.type == T.Semicolon) + else if (token.kind == T.Semicolon) { error(token, MSG.ExpectedNonEmptyStatement); nT(); @@ -1734,7 +1734,7 @@ { auto begin = token; Declaration d; - switch (token.type) + switch (token.kind) { case T.Extern: if (peekNext() != T.LParen) @@ -1800,7 +1800,7 @@ Statement parseIfStatement() { - assert(token.type == T.If); + assert(token.kind == T.If); nT(); Statement variable; @@ -1855,7 +1855,7 @@ Statement parseWhileStatement() { - assert(token.type == T.While); + assert(token.kind == T.While); nT(); require(T.LParen); auto condition = parseExpression(); @@ -1865,7 +1865,7 @@ Statement parseDoWhileStatement() { - assert(token.type == T.Do); + assert(token.kind == T.Do); nT(); auto doBody = parseScopeStatement(); require(T.While); @@ -1877,21 +1877,21 @@ Statement parseForStatement() { - assert(token.type == T.For); + assert(token.kind == T.For); nT(); require(T.LParen); Statement init, forBody; Expression condition, increment; - if (token.type != T.Semicolon) + if (token.kind != T.Semicolon) init = parseNoScopeStatement(); else nT(); // Skip ; - if (token.type != T.Semicolon) + if (token.kind != T.Semicolon) condition = parseExpression(); require(T.Semicolon); - if (token.type != T.RParen) + if (token.kind != T.RParen) increment = parseExpression(); require(T.RParen); forBody = parseScopeStatement(); @@ -1900,8 +1900,8 @@ Statement parseForeachStatement() { - assert(token.type == T.Foreach || token.type == T.Foreach_reverse); - TOK tok = token.type; + assert(token.kind == T.Foreach || token.kind == T.Foreach_reverse); + TOK tok = token.kind; nT(); auto params = new Parameters; @@ -1915,7 +1915,7 @@ Type type; Identifier* ident; - switch (token.type) + switch (token.kind) { case T.Ref, T.Inout: stc = StorageClass.Ref; @@ -1935,7 +1935,7 @@ params ~= set(new Parameter(stc, type, ident, null), paramBegin); - if (token.type != T.Comma) + if (token.kind != T.Comma) break; nT(); } @@ -1961,7 +1961,7 @@ Statement parseSwitchStatement() { - assert(token.type == T.Switch); + assert(token.kind == T.Switch); nT(); require(T.LParen); auto condition = parseExpression(); @@ -1979,17 +1979,17 @@ // This function is similar to parseNoScopeStatement() auto begin = token; auto s = new Statements(); - while (token.type != T.Case && - token.type != T.Default && - token.type != T.RBrace && - token.type != T.EOF) + while (token.kind != T.Case && + token.kind != T.Default && + token.kind != T.RBrace && + token.kind != T.EOF) s ~= parseStatement(); return set(new ScopeStatement(s), begin); } Statement parseCaseStatement() { - assert(token.type == T.Case); + assert(token.kind == T.Case); nT(); auto values = parseExpressionList(); require(T.Colon); @@ -1999,7 +1999,7 @@ Statement parseDefaultStatement() { - assert(token.type == T.Default); + assert(token.kind == T.Default); nT(); require(T.Colon); auto defaultBody = parseCaseOrDefaultBody(); @@ -2008,7 +2008,7 @@ Statement parseContinueStatement() { - assert(token.type == T.Continue); + assert(token.kind == T.Continue); nT(); auto ident = optionalIdentifier(); require(T.Semicolon); @@ -2017,7 +2017,7 @@ Statement parseBreakStatement() { - assert(token.type == T.Break); + assert(token.kind == T.Break); nT(); auto ident = optionalIdentifier(); require(T.Semicolon); @@ -2026,10 +2026,10 @@ Statement parseReturnStatement() { - assert(token.type == T.Return); + assert(token.kind == T.Return); nT(); Expression expr; - if (token.type != T.Semicolon) + if (token.kind != T.Semicolon) expr = parseExpression(); require(T.Semicolon); return new ReturnStatement(expr); @@ -2037,15 +2037,15 @@ Statement parseGotoStatement() { - assert(token.type == T.Goto); + assert(token.kind == T.Goto); nT(); Identifier* ident; Expression caseExpr; - switch (token.type) + switch (token.kind) { case T.Case: nT(); - if (token.type == T.Semicolon) + if (token.kind == T.Semicolon) break; caseExpr = parseExpression(); break; @@ -2061,7 +2061,7 @@ Statement parseWithStatement() { - assert(token.type == T.With); + assert(token.kind == T.With); nT(); require(T.LParen); auto expr = parseExpression(); @@ -2071,7 +2071,7 @@ Statement parseSynchronizedStatement() { - assert(token.type == T.Synchronized); + assert(token.kind == T.Synchronized); nT(); Expression expr; if (skipped(T.LParen)) @@ -2084,7 +2084,7 @@ Statement parseTryStatement() { - assert(token.type == T.Try); + assert(token.kind == T.Try); auto begin = token; nT(); @@ -2114,14 +2114,14 @@ finBody = set(new FinallyBody(parseNoScopeStatement()), prevToken); if (catchBodies.length == 0 && finBody is null) - assert(begin.type == T.Try), error(begin, MSG.MissingCatchOrFinally); + assert(begin.kind == T.Try), error(begin, MSG.MissingCatchOrFinally); return new TryStatement(tryBody, catchBodies, finBody); } Statement parseThrowStatement() { - assert(token.type == T.Throw); + assert(token.kind == T.Throw); nT(); auto expr = parseExpression(); require(T.Semicolon); @@ -2130,9 +2130,9 @@ Statement parseScopeGuardStatement() { - assert(token.type == T.Scope); + assert(token.kind == T.Scope); nT(); - assert(token.type == T.LParen); + assert(token.kind == T.LParen); nT(); auto condition = requireIdentifier(MSG.ExpectedScopeIdentifier); if (condition) @@ -2145,7 +2145,7 @@ } require(T.RParen); Statement scopeBody; - if (token.type == T.LBrace) + if (token.kind == T.LBrace) scopeBody = parseScopeStatement(); else scopeBody = parseNoScopeStatement(); @@ -2154,12 +2154,12 @@ Statement parseVolatileStatement() { - assert(token.type == T.Volatile); + assert(token.kind == T.Volatile); nT(); Statement volatileBody; - if (token.type == T.Semicolon) + if (token.kind == T.Semicolon) nT(); - else if (token.type == T.LBrace) + else if (token.kind == T.LBrace) volatileBody = parseScopeStatement(); else volatileBody = parseStatement(); @@ -2168,7 +2168,7 @@ Statement parsePragmaStatement() { - assert(token.type == T.Pragma); + assert(token.kind == T.Pragma); nT(); Identifier* ident; @@ -2189,9 +2189,9 @@ Statement parseStaticIfStatement() { - assert(token.type == T.Static); + assert(token.kind == T.Static); nT(); - assert(token.type == T.If); + assert(token.kind == T.If); nT(); Expression condition; Statement ifBody, elseBody; @@ -2207,9 +2207,9 @@ Statement parseStaticAssertStatement() { - assert(token.type == T.Static); + assert(token.kind == T.Static); nT(); - assert(token.type == T.Assert); + assert(token.kind == T.Assert); nT(); Expression condition, message; require(T.LParen); @@ -2223,7 +2223,7 @@ Statement parseDebugStatement() { - assert(token.type == T.Debug); + assert(token.kind == T.Debug); nT(); // Skip debug keyword. Token* cond; @@ -2247,7 +2247,7 @@ Statement parseVersionStatement() { - assert(token.type == T.Version); + assert(token.kind == T.Version); nT(); // Skip version keyword. Token* cond; @@ -2272,11 +2272,11 @@ Statement parseAsmStatement() { - assert(token.type == T.Asm); + assert(token.kind == T.Asm); nT(); // Skip asm keyword. require(T.LBrace); auto ss = new Statements; - while (token.type != T.RBrace && token.type != T.EOF) + while (token.kind != T.RBrace && token.kind != T.EOF) ss ~= parseAsmInstruction(); require(T.RBrace); return new AsmStatement(ss); @@ -2287,7 +2287,7 @@ auto begin = token; Statement s; Identifier* ident; - switch (token.type) + switch (token.kind) { // Keywords that are valid opcodes. case T.In, T.Int, T.Out: @@ -2310,7 +2310,7 @@ // Opcode // Identifier Expression[] es; - if (token.type != T.Semicolon) + if (token.kind != T.Semicolon) do es ~= parseAsmExpression(); while (skipped(T.Comma)) @@ -2321,7 +2321,7 @@ // align Integer; nT(); int number = -1; - if (token.type == T.Int32) + if (token.kind == T.Int32) (number = token.int_), nT(); else error(token, MSG.ExpectedIntegerAfterAlign, token.srcText); @@ -2338,8 +2338,8 @@ do nT(); while (!token.isAsmInstructionStart && - token.type != T.RBrace && - token.type != T.EOF) + token.kind != T.RBrace && + token.kind != T.EOF) auto text = Token.textSpan(begin, this.prevToken); error(begin, MSG.IllegalAsmInstruction, text); } @@ -2369,7 +2369,7 @@ alias parseAsmAndAndExpression parseNext; auto begin = token; auto e = parseNext(); - while (token.type == T.OrLogical) + while (token.kind == T.OrLogical) { auto tok = token; nT(); @@ -2384,7 +2384,7 @@ alias parseAsmOrExpression parseNext; auto begin = token; auto e = parseNext(); - while (token.type == T.AndLogical) + while (token.kind == T.AndLogical) { auto tok = token; nT(); @@ -2399,7 +2399,7 @@ alias parseAsmXorExpression parseNext; auto begin = token; auto e = parseNext(); - while (token.type == T.OrBinary) + while (token.kind == T.OrBinary) { auto tok = token; nT(); @@ -2414,7 +2414,7 @@ alias parseAsmAndExpression parseNext; auto begin = token; auto e = parseNext(); - while (token.type == T.Xor) + while (token.kind == T.Xor) { auto tok = token; nT(); @@ -2429,7 +2429,7 @@ alias parseAsmCmpExpression parseNext; auto begin = token; auto e = parseNext(); - while (token.type == T.AndBinary) + while (token.kind == T.AndBinary) { auto tok = token; nT(); @@ -2446,7 +2446,7 @@ auto e = parseNext(); auto operator = token; - switch (operator.type) + switch (operator.kind) { case T.Equal, T.NotEqual: nT(); @@ -2471,7 +2471,7 @@ while (1) { auto operator = token; - switch (operator.type) + switch (operator.kind) { case T.LShift: nT(); e = new LShiftExpression(e, parseNext(), operator); break; case T.RShift: nT(); e = new RShiftExpression(e, parseNext(), operator); break; @@ -2492,7 +2492,7 @@ while (1) { auto operator = token; - switch (operator.type) + switch (operator.kind) { case T.Plus: nT(); e = new PlusExpression(e, parseNext(), operator); break; case T.Minus: nT(); e = new MinusExpression(e, parseNext(), operator); break; @@ -2514,7 +2514,7 @@ while (1) { auto operator = token; - switch (operator.type) + switch (operator.kind) { case T.Mul: nT(); e = new MulExpression(e, parseNext(), operator); break; case T.Div: nT(); e = new DivExpression(e, parseNext(), operator); break; @@ -2544,7 +2544,7 @@ { auto begin = token; Expression e; - switch (token.type) + switch (token.kind) { case T.Byte, T.Short, T.Int, T.Float, T.Double, T.Real: @@ -2556,7 +2556,7 @@ ID.word, ID.dword, ID.qword/*, "float", "double", "real"*/: LAsmTypePrefix: nT(); - if (token.type == T.Identifier && token.ident is Ident.ptr) + if (token.kind == T.Identifier && token.ident is Ident.ptr) nT(); else error(MID.ExpectedButFound, "ptr", token.srcText); @@ -2609,7 +2609,7 @@ { auto begin = token; Expression e; - switch (token.type) + switch (token.kind) { case T.Int32, T.Int64, T.Uint32, T.Uint64: e = new IntExpression(token); @@ -2647,7 +2647,7 @@ int number = -1; if (skipped(T.LParen)) { - if (token.type == T.Int32) + if (token.kind == T.Int32) (number = token.int_), nT(); else expected(T.Int32); @@ -2662,7 +2662,7 @@ if (skipped(T.Colon)) { // :0, :4, :8 - if (token.type == T.Int32) + if (token.kind == T.Int32) (number = token.int_), nT(); if (number != 0 && number != 4 && number != 8) error(MID.ExpectedButFound, "0, 4 or 8", token.srcText); @@ -2717,7 +2717,7 @@ alias parseAssignExpression parseNext; auto begin = token; auto e = parseNext(); - while (token.type == T.Comma) + while (token.kind == T.Comma) { auto comma = token; nT(); @@ -2732,7 +2732,7 @@ alias parseAssignExpression parseNext; auto begin = token; auto e = parseCondExpression(); - switch (token.type) + switch (token.kind) { case T.Assign: nT(); e = new AssignExpression(e, parseNext()); break; @@ -2771,7 +2771,7 @@ { auto begin = token; auto e = parseOrOrExpression(); - if (token.type == T.Question) + if (token.kind == T.Question) { auto tok = token; nT(); @@ -2789,7 +2789,7 @@ alias parseAndAndExpression parseNext; auto begin = token; auto e = parseNext(); - while (token.type == T.OrLogical) + while (token.kind == T.OrLogical) { auto tok = token; nT(); @@ -2804,7 +2804,7 @@ alias parseOrExpression parseNext; auto begin = token; auto e = parseNext(); - while (token.type == T.AndLogical) + while (token.kind == T.AndLogical) { auto tok = token; nT(); @@ -2819,7 +2819,7 @@ alias parseXorExpression parseNext; auto begin = token; auto e = parseNext(); - while (token.type == T.OrBinary) + while (token.kind == T.OrBinary) { auto tok = token; nT(); @@ -2834,7 +2834,7 @@ alias parseAndExpression parseNext; auto begin = token; auto e = parseNext(); - while (token.type == T.Xor) + while (token.kind == T.Xor) { auto tok = token; nT(); @@ -2849,7 +2849,7 @@ alias parseCmpExpression parseNext; auto begin = token; auto e = parseNext(); - while (token.type == T.AndBinary) + while (token.kind == T.AndBinary) { auto tok = token; nT(); @@ -2866,7 +2866,7 @@ auto e = parseShiftExpression(); auto operator = token; - switch (operator.type) + switch (operator.kind) { case T.Equal, T.NotEqual: nT(); @@ -2906,7 +2906,7 @@ while (1) { auto operator = token; - switch (operator.type) + switch (operator.kind) { case T.LShift: nT(); e = new LShiftExpression(e, parseNext(), operator); break; case T.RShift: nT(); e = new RShiftExpression(e, parseNext(), operator); break; @@ -2927,7 +2927,7 @@ while (1) { auto operator = token; - switch (operator.type) + switch (operator.kind) { case T.Plus: nT(); e = new PlusExpression(e, parseNext(), operator); break; case T.Minus: nT(); e = new MinusExpression(e, parseNext(), operator); break; @@ -2948,7 +2948,7 @@ while (1) { auto operator = token; - switch (operator.type) + switch (operator.kind) { case T.Mul: nT(); e = new MulExpression(e, parseNext(), operator); break; case T.Div: nT(); e = new DivExpression(e, parseNext(), operator); break; @@ -2973,7 +2973,7 @@ set(e, begin); } - switch (token.type) + switch (token.kind) { case T.PlusPlus: e = new PostIncrExpression(e); @@ -2988,7 +2988,7 @@ // parse Slice- and IndexExpression nT(); // [] is a SliceExpression - if (token.type == T.RBracket) + if (token.kind == T.RBracket) { e = new SliceExpression(e, null, null); break; @@ -3025,7 +3025,7 @@ { auto begin = token; Expression e; - switch (token.type) + switch (token.kind) { case T.AndBinary: nT(); @@ -3066,7 +3066,7 @@ case T.Cast: requireNext(T.LParen); Type type; - switch (token.type) + switch (token.kind) { version(D2) { @@ -3132,7 +3132,7 @@ auto ident = requireIdentifier(MSG.ExpectedAnIdentifier); Expression e; // Peek for '(' to avoid matching: id !is id - if (token.type == T.Not && peekNext() == T.LParen) + if (token.kind == T.Not && peekNext() == T.LParen) { // Identifier !( TemplateArguments ) nT(); // Skip !. auto tparams = parseTemplateArguments(); @@ -3145,14 +3145,14 @@ Expression parseNewOrIdentifierExpression() { - return token.type == T.New ? parseNewExpression() : parseIdentifierExpression(); + return token.kind == T.New ? parseNewExpression() : parseIdentifierExpression(); } Expression parsePrimaryExpression() { auto begin = token; Expression e; - switch (token.type) + switch (token.kind) { case T.Identifier: e = parseIdentifierExpression(); @@ -3197,7 +3197,7 @@ char[] str = token.str; char postfix = token.pf; nT(); - while (token.type == T.String) + while (token.kind == T.String) { if (postfix == '\0') postfix = token.pf; @@ -3221,7 +3221,7 @@ if (!skipped(T.RBracket)) { e = parseAssignExpression(); - if (token.type == T.Colon) + if (token.kind == T.Colon) goto LparseAssocArray; if (skipped(T.Comma)) values = [e] ~ parseExpressionList(); @@ -3244,7 +3244,7 @@ require(T.Colon); LenterLoop: values ~= parseAssignExpression(); - if (token.type != T.Comma) + if (token.kind != T.Comma) break; nT(); } @@ -3261,9 +3261,9 @@ nT(); // Skip function|delegate token. Type returnType; Parameters parameters; - if (token.type != T.LBrace) + if (token.kind != T.LBrace) { - if (token.type != T.LParen) // Optional return type + if (token.kind != T.LParen) // Optional return type returnType = parseType(); parameters = parseParameterList(); } @@ -3306,12 +3306,12 @@ type = parseDeclarator(ident, true); - switch (token.type) + switch (token.kind) { case T.Colon, T.Equal: opTok = token; nT(); - switch (token.type) + switch (token.kind) { case T.Typedef, T.Struct, @@ -3342,7 +3342,7 @@ { // is ( Type Identifier : TypeSpecialization , TemplateParameterList ) // is ( Type Identifier == TypeSpecialization , TemplateParameterList ) - if (ident && specType && token.type == T.Comma) + if (ident && specType && token.kind == T.Comma) tparams = parseTemplateParameterList2(); } require(T.RParen); @@ -3370,7 +3370,7 @@ require(T.LParen); auto id = requireIdentifier(MSG.ExpectedAnIdentifier); TemplateArguments args; - if (token.type == T.Comma) + if (token.kind == T.Comma) args = parseTemplateArguments2(); else require(T.RParen); @@ -3380,7 +3380,7 @@ default: if (token.isIntegralType) { // IntegralType . Identifier - auto type = new IntegralType(token.type); + auto type = new IntegralType(token.kind); nT(); set(type, begin); require(T.Dot); @@ -3410,23 +3410,23 @@ Expression parseNewExpression(/*Expression e*/) { auto begin = token; - assert(token.type == T.New); + assert(token.kind == T.New); nT(); // Skip new keyword. Expression[] newArguments; Expression[] ctorArguments; - if (token.type == T.LParen) + if (token.kind == T.LParen) newArguments = parseArguments(); // NewAnonClassExpression: // new (ArgumentList)opt class (ArgumentList)opt SuperClassopt InterfaceClassesopt ClassBody if (skipped(T.Class)) { - if (token.type == T.LParen) + if (token.kind == T.LParen) ctorArguments = parseArguments(); - BaseClassType[] bases = token.type != T.LBrace ? parseBaseClasses(false) : null ; + BaseClassType[] bases = token.kind != T.LBrace ? parseBaseClasses(false) : null ; auto decls = parseDeclarationDefinitionsBody(); return set(new NewAnonClassExpression(/*e, */newArguments, bases, ctorArguments, decls), begin); @@ -3438,7 +3438,7 @@ // NewArguments Type auto type = parseType(); - if (token.type == T.LParen) + if (token.kind == T.LParen) ctorArguments = parseArguments(); return set(new NewExpression(/*e, */newArguments, type, ctorArguments), begin); @@ -3467,10 +3467,10 @@ Type type; if (skipped(T.Dot)) type = set(new ModuleScopeType(parseIdentifierType()), begin); - else if (token.type == T.Typeof) + else if (token.kind == T.Typeof) { type = parseTypeofType(); - if (token.type != T.Dot) + if (token.kind != T.Dot) return type; } else @@ -3488,11 +3488,11 @@ if (token.isIntegralType) { - t = new IntegralType(token.type); + t = new IntegralType(token.kind); nT(); } else - switch (token.type) + switch (token.kind) { case T.Identifier, T.Typeof, T.Dot: t = parseQualifiedType(); @@ -3530,7 +3530,7 @@ while (1) { begin = token; - switch (token.type) + switch (token.kind) { case T.Mul: t = new PointerType(t); @@ -3540,7 +3540,7 @@ t = parseArrayType(t); continue; case T.Function, T.Delegate: - TOK tok = token.type; + TOK tok = token.kind; nT(); auto parameters = parseParameterList(); if (tok == T.Function) @@ -3559,14 +3559,14 @@ bool tokenAfterParenIs(TOK tok) { // We count nested parentheses tokens because template types may appear inside parameter lists; e.g. (int x, Foo!(int) y). - assert(token.type == T.LParen); + assert(token.kind == T.LParen); Token* next = token; uint level = 1; Loop: while (1) { lexer.peek(next); - switch (next.type) + switch (next.kind) { case T.RParen: if (--level == 0) @@ -3585,7 +3585,7 @@ default: } } - return next.type == tok; + return next.kind == tok; } /// Parse the C-style array types after the declarator. @@ -3598,7 +3598,7 @@ // Resulting chain: [][32]*[3]int Type parseNext() // Nested function required to accomplish this. { - if (token.type != T.LBracket) + if (token.kind != T.LBracket) return lhsType; // Break recursion; return Type on the left hand side of the Identifier. auto begin = token; @@ -3635,7 +3635,7 @@ Type parseArrayType(Type t) { - assert(token.type == T.LBracket); + assert(token.kind == T.LBracket); auto begin = token; nT(); if (skipped(T.RBracket)) @@ -3667,17 +3667,17 @@ Type parseCFunctionPointerType(Type type, ref Identifier* ident, bool optionalParamList) { - assert(token.type == T.LParen); + assert(token.kind == T.LParen); assert(type !is null); auto begin = token; nT(); // Skip ( type = parseBasicType2(type); - if (token.type == T.LParen) + if (token.kind == T.LParen) { // Can be nested. type = parseCFunctionPointerType(type, ident, true); } - else if (token.type == T.Identifier) + else if (token.kind == T.Identifier) { // The identifier of the function pointer and the declaration. ident = token.ident; @@ -3688,7 +3688,7 @@ Parameters params; if (optionalParamList) - params = token.type == T.LParen ? parseParameterList() : null; + params = token.kind == T.LParen ? parseParameterList() : null; else params = parseParameterList(); @@ -3700,9 +3700,9 @@ { auto t = parseType(); - if (token.type == T.LParen) + if (token.kind == T.LParen) t = parseCFunctionPointerType(t, ident, true); - else if (token.type == T.Identifier) + else if (token.kind == T.Identifier) { ident = token.ident; nT(); @@ -3737,10 +3737,10 @@ +/ Expression[] parseArguments() { - assert(token.type == T.LParen); + assert(token.kind == T.LParen); nT(); Expression[] args; - if (token.type != TOK.RParen) + if (token.kind != TOK.RParen) args = parseExpressionList(); require(TOK.RParen); return args; @@ -3788,7 +3788,7 @@ } Lstc_loop: - switch (token.type) + switch (token.kind) { version(D2) { @@ -3850,7 +3850,7 @@ pushParameter(); - if (token.type != T.Comma) + if (token.kind != T.Comma) break Loop; nT(); } @@ -3863,7 +3863,7 @@ { TemplateArguments targs; require(T.LParen); - if (token.type != T.RParen) + if (token.kind != T.RParen) targs = parseTemplateArguments_(); require(T.RParen); return targs; @@ -3873,10 +3873,10 @@ { TemplateArguments parseTemplateArguments2() { - assert(token.type == T.Comma); + assert(token.kind == T.Comma); nT(); TemplateArguments targs; - if (token.type != T.RParen) + if (token.kind != T.RParen) targs = parseTemplateArguments_(); else error(token, MSG.ExpectedTypeOrExpression); @@ -3894,7 +3894,7 @@ Type parseType_() { auto type = parseType(); - if (token.type == T.Comma || token.type == T.RParen) + if (token.kind == T.Comma || token.kind == T.RParen) return type; ++errorCount; // Cause try_() to fail. return null; @@ -3910,7 +3910,7 @@ // TemplateArgument: // AssignExpression targs ~= parseAssignExpression(); - if (token.type != T.Comma) + if (token.kind != T.Comma) break; // Exit loop. nT(); } @@ -3922,7 +3922,7 @@ { TemplateParameters tparams; require(T.LParen); - if (token.type != T.RParen) + if (token.kind != T.RParen) tparams = parseTemplateParameterList_(); require(T.RParen); return tparams; @@ -3932,10 +3932,10 @@ { TemplateParameters parseTemplateParameterList2() { - assert(token.type == T.Comma); + assert(token.kind == T.Comma); nT(); TemplateParameters tparams; - if (token.type != T.RParen) + if (token.kind != T.RParen) tparams = parseTemplateParameterList_(); else error(token, MSG.ExpectedTemplateParameters); @@ -3965,7 +3965,7 @@ defType = parseType(); } - switch (token.type) + switch (token.kind) { case T.Alias: // TemplateAliasParameter: @@ -3984,7 +3984,7 @@ // Identifier ... nT(); // Skip Identifier. nT(); // Skip Ellipses. - if (token.type == T.Comma) + if (token.kind == T.Comma) error(MID.TemplateTupleParameter); tp = new TemplateTupleParameter(ident); break; @@ -4031,7 +4031,7 @@ // Push template parameter. tparams ~= set(tp, paramBegin); - if (token.type != T.Comma) + if (token.kind != T.Comma) break; nT(); } @@ -4041,13 +4041,13 @@ void expected(TOK tok) { - if (token.type != tok) + if (token.kind != tok) error(MID.ExpectedButFound, Token.toString(tok), token.srcText); } void require(TOK tok) { - if (token.type == tok) + if (token.kind == tok) nT(); else error(MID.ExpectedButFound, Token.toString(tok), token.srcText); @@ -4062,7 +4062,7 @@ Identifier* optionalIdentifier() { Identifier* id; - if (token.type == T.Identifier) + if (token.kind == T.Identifier) (id = token.ident), nT(); return id; } @@ -4070,7 +4070,7 @@ Identifier* requireIdentifier() { Identifier* id; - if (token.type == T.Identifier) + if (token.kind == T.Identifier) (id = token.ident), nT(); else error(MID.ExpectedButFound, "Identifier", token.srcText); @@ -4084,7 +4084,7 @@ Identifier* requireIdentifier(char[] errorMsg) { Identifier* id; - if (token.type == T.Identifier) + if (token.kind == T.Identifier) (id = token.ident), nT(); else error(token, errorMsg, token.srcText); @@ -4094,7 +4094,7 @@ Identifier* requireIdentifier(MID mid) { Identifier* id; - if (token.type == T.Identifier) + if (token.kind == T.Identifier) (id = token.ident), nT(); else error(mid, token.srcText); @@ -4103,7 +4103,7 @@ Token* requireId() { - if (token.type == T.Identifier) + if (token.kind == T.Identifier) { auto id = token; nT(); @@ -4117,7 +4117,7 @@ Token* requireIdToken(char[] errorMsg) { Token* idtok; - if (token.type == T.Identifier) + if (token.kind == T.Identifier) (idtok = token), nT(); else error(token, errorMsg, token.srcText); diff -r cedfc67faabf -r ff6971637f88 trunk/src/dil/semantic/Pass1.d --- a/trunk/src/dil/semantic/Pass1.d Fri Jan 18 23:11:44 2008 +0100 +++ b/trunk/src/dil/semantic/Pass1.d Fri Jan 18 23:40:12 2008 +0100 @@ -334,7 +334,7 @@ { if (e.type) return e.value; - switch (e.specialToken.type) + switch (e.specialToken.kind) { case TOK.LINE, TOK.VERSION: e.value = new IntExpression(e.specialToken.uint_, Types.Uint); @@ -370,9 +370,7 @@ { if (e.type) return e; - assert(e.begin !is null); - auto b = (e.begin.type == TOK.True) ? true : false; - e.value = new IntExpression(b, Types.Bool); + e.value = new IntExpression(e.toBool(), Types.Bool); e.type = Types.Bool; return e; } diff -r cedfc67faabf -r ff6971637f88 trunk/src/main.d --- a/trunk/src/main.d Fri Jan 18 23:11:44 2008 +0100 +++ b/trunk/src/main.d Fri Jan 18 23:40:12 2008 +0100 @@ -182,9 +182,9 @@ lx.scanAll(); auto token = lx.firstToken(); - for (; token.type != TOK.EOF; token = token.next) + for (; token.kind != TOK.EOF; token = token.next) { - if (token.type == TOK.Newline || ignoreWSToks && token.isWhitespace) + if (token.kind == TOK.Newline || ignoreWSToks && token.isWhitespace) continue; if (printWS && token.ws) Stdout(token.wsChars);