# HG changeset patch # User Aziz K?ksal # Date 1203797756 -3600 # Node ID 57ef69eced96a0801806f2f1581b86ad278f6276 # Parent 939097e0990f4dd223cbe0ab16ac6c367349a040 Added functions isCodeSection() and skipCodeSection(). Added a lot of documentation comments and revised some. diff -r 939097e0990f -r 57ef69eced96 trunk/src/cmd/DDoc.d --- a/trunk/src/cmd/DDoc.d Sat Feb 23 02:26:43 2008 +0100 +++ b/trunk/src/cmd/DDoc.d Sat Feb 23 21:15:56 2008 +0100 @@ -608,9 +608,6 @@ alias Declaration D; override: -// D visit(ModuleDeclaration d) -// { return d; } - D visit(AliasDeclaration d) { if (!ddoc(d)) diff -r 939097e0990f -r 57ef69eced96 trunk/src/cmd/Generate.d --- a/trunk/src/cmd/Generate.d Sat Feb 23 02:26:43 2008 +0100 +++ b/trunk/src/cmd/Generate.d Sat Feb 23 21:15:56 2008 +0100 @@ -33,7 +33,7 @@ PrintLines = 1<<4 } -/// Executes the command. +/// Executes the generate command. void execute(string filePath, GenOption options, InfoManager infoMan) { assert(options != GenOption.Empty); @@ -70,6 +70,7 @@ return text; } +/// Maps tokens to (format) strings. class TagMap { string[string] table; @@ -104,6 +105,7 @@ tokenTable[i] = *pStr; } + /// Returns the value for str, or 'fallback' if str is not in the table. string opIndex(string str, string fallback = "") { auto p = str in table; @@ -112,11 +114,13 @@ return fallback; } + /// Returns the value for tok in O(1) time. string opIndex(TOK tok) { return tokenTable[tok]; } + /// Shortcuts for quick access. string Identifier, String, Char, Number, Keyword, LineC, BlockC, NestedC, Shebang, HLine, Filespec, Illegal, Newline, SpecialToken, Declaration, Statement, Expression, Type, Other, EOF; @@ -148,7 +152,8 @@ return -1; } -/// Returns: the short class name of an instance descending from Node. +/// Returns the short class name of a class descending from Node.$(BR) +/// E.g.: dil.ast.Declarations.ClassDeclaration -> Class char[] getShortClassName(Node node) { static char[][] name_table; @@ -271,6 +276,7 @@ // { // } +/// Highlights the syntax in a source file. void highlightSyntax(string filePath, TagMap tags, Print!(char) print, GenOption options) { auto parser = new Parser(new SourceText(filePath, true)); @@ -328,7 +334,7 @@ print(tags["DocEnd"]); } -/// Prints all tokens of a source file using the buffer print. +/// Highlights all tokens of a source file. void highlightTokens(string filePath, TagMap tags, Print!(char) print, GenOption options) { auto lx = new Lexer(new SourceText(filePath, true)); @@ -359,6 +365,7 @@ print(tags["DocEnd"]); } +/// A token highlighter designed for DDoc. class TokenHighlighter { TagMap tags; @@ -396,7 +403,7 @@ } } -/// Prints a token with tags using the buffer print. +/// Prints a token to the stream print. void printToken(Token* token, TagMap tags, Print!(char) print) { switch(token.kind) diff -r 939097e0990f -r 57ef69eced96 trunk/src/cmd/ImportGraph.d --- a/trunk/src/cmd/ImportGraph.d Sat Feb 23 02:26:43 2008 +0100 +++ b/trunk/src/cmd/ImportGraph.d Sat Feb 23 21:15:56 2008 +0100 @@ -20,6 +20,7 @@ alias FileConst.PathSeparatorChar dirSep; +/// Options for the importgraph command. enum IGraphOption { None, @@ -34,10 +35,11 @@ MarkCyclicModules = 1<<8, } +/// Represents a module dependency graph. class Graph { - Vertex[] vertices; - Edge[] edges; + Vertex[] vertices; /// The vertices or modules. + Edge[] edges; /// The edges or import statements. void addVertex(Vertex vertex) { @@ -123,6 +125,8 @@ Status status; /// Used by the cycle detection algorithm. } +/// Searches for a module in the file system looking in importPaths. +/// Returns: the file path to the module, or null if it wasn't found. string findModuleFilePath(string moduleFQNPath, string[] importPaths) { auto filePath = new FilePath(); @@ -140,6 +144,7 @@ return null; } +/// Builds a module dependency graph. class GraphBuilder { Graph graph; @@ -154,17 +159,18 @@ } /// Start building the graph and return that. + /// Params: + /// fileName = the file name of the root module. Graph start(string fileName) { loadModule(fileName); return graph; } - /++ - Loads all modules recursively and builds the graph at the same time. - Params: - moduleFQNPath = e.g.: dil/ast/Node (module FQN = dil.ast.Node) - +/ + /// Loads all modules recursively and builds the graph at the same time. + /// Params: + /// moduleFQNPath = the path version of the module FQN.$(BR) + /// E.g.: FQN = dil.ast.Node -> FQNPath = dil/ast/Node Vertex loadModule(string moduleFQNPath) { // Look up in table if the module is already loaded. @@ -228,6 +234,7 @@ } } +/// Executes the importgraph command. void execute(string filePathString, CompilationContext context, string[] strRegexps, uint levels, string siStyle, string piStyle, IGraphOption options) { @@ -269,6 +276,7 @@ printDotDocument(graph, siStyle, piStyle, options); } +/// Prints the file paths to the modules. void printModulePaths(Vertex[] vertices, uint level, char[] indent) { if (level == 0) @@ -281,6 +289,7 @@ } } +/// Prints a list of module FQNs. void printModuleList(Vertex[] vertices, uint level, char[] indent) { if (level == 0) @@ -293,6 +302,7 @@ } } +/// Prints the graph as a graphviz dot document. void printDotDocument(Graph graph, string siStyle, string piStyle, IGraphOption options) { diff -r 939097e0990f -r 57ef69eced96 trunk/src/dil/doc/Doc.d --- a/trunk/src/dil/doc/Doc.d Sat Feb 23 02:26:43 2008 +0100 +++ b/trunk/src/dil/doc/Doc.d Sat Feb 23 21:15:56 2008 +0100 @@ -12,6 +12,7 @@ import tango.text.Ascii : icompare; +/// Represents a sanitized and parsed DDoc comment. class DDocComment { Section[] sections; /// The sections of this comment. @@ -37,7 +38,7 @@ return null; } - /// Returns: true if "ditto" is the only text in this comment. + /// Returns true if "ditto" is the only text in this comment. bool isDitto() { if (summary && sections.length == 1 && @@ -45,15 +46,6 @@ return true; return false; } - -// MacrosSection[] getMacros() -// { -// MacrosSection[] macros; -// foreach (section; sections) -// if (section.Is("macros")) -// macros ~= new MacrosSection(section.name, section.text); -// return macros; -// } } /// Returns a node's DDocComment. @@ -90,8 +82,8 @@ /// Parses a DDoc comment string. struct DDocParser { - char* p; - char* textEnd; + char* p; /// Current character pointer. + char* textEnd; /// Points one character past the end of the text. Section[] sections; /// Parsed sections. Section summary; /// Optional summary section. Section description; /// Optional description section. @@ -151,6 +143,8 @@ return makeString(begin, end); } + /// Separates the text between p and end + /// into a summary and description section. void scanSummaryAndDescription(char* p, char* end) { assert(p <= end); @@ -159,18 +153,8 @@ end--; // Decrement end, so we can look ahead one character. while (p < end && !(*p == '\n' && p[1] == '\n')) { - // Skip over code sections. This is unlike how dmd behaves. - if (p+2 < end && *p == '-' && p[1] == '-' && p[2] == '-') - { - while (p < end && *p == '-') - p++; - p--; - while (++p < end) - if (p+2 < end && *p == '-' && p[1] == '-' && p[2] == '-') - break; - if (p >= end) - break; - } + if (isCodeSection(p, end)) + skipCodeSection(p, end); p++; } end++; @@ -193,6 +177,34 @@ } } + /// Returns true if p points to "$(DDD)". + bool isCodeSection(char* p, char* end) + { + return p+2 < end && *p == '-' && p[1] == '-' && p[2] == '-'; + } + + /// Skips over a code section. + /// + /// Note that dmd apparently doesn't skip over code sections when + /// parsing DDoc sections. However, from experience it seems + /// to be a good idea to do that. + void skipCodeSection(ref char* p, char* end) + out { assert(p+1 == end || *p == '-'); } + body + { + assert(isCodeSection(p, end)); + + while (p < end && *p == '-') + p++; + p--; + while (++p < end) + if (p+2 < end && *p == '-' && p[1] == '-' && p[2] == '-') + break; + while (p < end && *p == '-') + p++; + p--; + } + void skipWhitespace(ref char* p) { while (p < textEnd && (isspace(*p) || *p == '\n')) @@ -201,7 +213,7 @@ /// Find next "Identifier:". /// Params: - /// ident = set to the Identifier + /// ident = set to the Identifier. /// bodyBegin = set to the beginning of the text body (whitespace skipped.) /// Returns: true if found. bool findNextIdColon(ref char[] ident, ref char* bodyBegin) @@ -211,6 +223,12 @@ skipWhitespace(p); if (p >= textEnd) break; + if (isCodeSection(p, textEnd)) + { + skipCodeSection(p, textEnd); + p++; + continue; + } assert(isascii(*p) || isLeadByte(*p)); auto idBegin = p; if (isidbeg(*p) || isUnicodeAlpha(p, textEnd)) // IdStart @@ -236,6 +254,7 @@ } } +/// Represents a DDoc section. class Section { string name; @@ -246,6 +265,7 @@ this.text = text; } + /// Case-insensitively compares the section's name with name2. bool Is(char[] name2) { return icompare(name, name2) == 0; @@ -290,21 +310,24 @@ } } +/// Returns true if token is a Doxygen comment. bool isDoxygenComment(Token* token) { // Doxygen: '/+!' '/*!' '//!' return token.kind == TOK.Comment && token.start[2] == '!'; } +/// Returns true if token is a DDoc comment. bool isDDocComment(Token* token) { // DDOC: '/++' '/**' '///' return token.kind == TOK.Comment && token.start[1] == token.start[2]; } -/++ - Returns the surrounding documentation comment tokens. - Note: this function works correctly only if - the source text is syntactically correct. -+/ +/// Returns the surrounding documentation comment tokens. +/// Params: +/// node = the node to find doc comments for. +/// isDocComment = a function predicate that checks for doc comment tokens. +/// Note: this function works correctly only if +/// the source text is syntactically correct. Token*[] getDocTokens(Node node, bool function(Token*) isDocComment = &isDDocComment) { Token*[] comments; @@ -313,13 +336,12 @@ auto token = node.begin; // Scan backwards until we hit another declaration. Loop: - while (1) + for (; token; token = token.prev) { - token = token.prev; if (token.kind == TOK.LBrace || token.kind == TOK.RBrace || token.kind == TOK.Semicolon || - token.kind == TOK.HEAD || + /+token.kind == TOK.HEAD ||+/ (isEnumMember && token.kind == TOK.Comma)) break; @@ -379,6 +401,7 @@ } /// Sanitizes a DDoc comment string. +/// /// Leading "commentChar"s are removed from the lines. /// The various newline types are converted to '\n'. /// Params: diff -r 939097e0990f -r 57ef69eced96 trunk/src/macros_dil.ddoc --- a/trunk/src/macros_dil.ddoc Sat Feb 23 02:26:43 2008 +0100 +++ b/trunk/src/macros_dil.ddoc Sat Feb 23 21:15:56 2008 +0100 @@ -17,3 +17,4 @@ SYMBOL_ = $1 SYMBOL = $1 +DDD = --- \ No newline at end of file