/system/tools/hidl/docs/src/lexer/ |
D | DocLexer.kt | 36 .map { it.replace(Regex("$\\s*^"), TokenGrammar.EMPTY_LINE.value) } in <lambda>() 44 … TokenGrammar.EMPTY_LINE.value -> tokens.add(TokenGrammar.newToken("", TokenGrammar.EMPTY_LINE)) in <lambda>() 47 TokenGrammar.AT.value -> { in <lambda>() 48 tokens.add(TokenGrammar.newToken(token)) //'@' in <lambda>() 50 for (annotation in TokenGrammar.docAnnotations()) { in <lambda>() 52 tokens.add(TokenGrammar.newToken(scanner.next())) in <lambda>() 59 val id = TokenGrammar.getFromValueOrDefault(token) in <lambda>() 60 … val category = if (id == TokenGrammar.WORD) TokenCategory.DocWord else id.category in <lambda>() 61 tokens.add(TokenGrammar.newToken(token, category)) in <lambda>()
|
D | HidlLexer.kt | 42 TokenGrammar.COMMENT_LINE.value -> scanner.nextLine() in <lambda>() 45 TokenGrammar.COMMENT_START.value -> { in <lambda>() 46 … if (scanner.findWithinHorizon(Regex.escape(TokenGrammar.DOC_END.value), 0) == null) { in <lambda>() 54 TokenGrammar.DOC_START.value -> { in <lambda>() 55 tokens.add(TokenGrammar.newToken(token)) //doc_start in <lambda>() 75 tokens.add(TokenGrammar.newToken(TokenGrammar.DOC_END.value)) //doc_end in <lambda>() 78 TokenGrammar.AT.value -> { in <lambda>() 79 tokens.add(TokenGrammar.newToken(token)) //'@' in <lambda>() 81 for (annotation in TokenGrammar.annotations()) { in <lambda>() 86 if (scanner.hasNext(Regex.escape(TokenGrammar.PAREN_OPEN.value))) { in <lambda>() [all …]
|
D | ILexer.kt | 32 val delimiters = TokenGrammar.values() in <lambda>() 34 .filter { it != TokenGrammar.COMMENT_START } //don't convert '/**' to '/* *' in <lambda>() 44 … newLine = newLine.replace(":\\s+:".toRegex(), TokenGrammar.PKG_SCOPE.value) //': :' => '::' in <lambda>() 46 newLine = newLine.replace("<\\s+<".toRegex(), TokenGrammar.LSHIFT.value) in <lambda>() 47 newLine = newLine.replace(">\\s+>".toRegex(), TokenGrammar.RSHIFT.value) in <lambda>() 48 newLine = newLine.replace("<\\s+=".toRegex(), TokenGrammar.LEQ.value) in <lambda>() 49 newLine = newLine.replace(">\\s+=".toRegex(), TokenGrammar.GEQ.value) in <lambda>() 77 val delimiters = TokenGrammar.values() in <lambda>()
|
D | Token.kt | 19 data class Token(val identifier: TokenGrammar, val value: String, val category: TokenCategory) 25 enum class TokenGrammar(val value: String, val category: TokenCategory) { class 140 private val map = TokenGrammar.values().associateBy(TokenGrammar::value) 144 fun getFromValue(value: String): TokenGrammar? { in getFromValue() 148 fun getFromValueOrDefault(value: String): TokenGrammar { in getFromValueOrDefault() 166 fun newToken(value: String, identifier: TokenGrammar): Token { in newToken() 170 fun annotations(): List<TokenGrammar> { in annotations() 171 return TokenGrammar.values().filter { it.category == TokenCategory.Annotation } in annotations() 174 fun docAnnotations(): List<TokenGrammar> { in docAnnotations() 175 return TokenGrammar.values().filter { it.category == TokenCategory.DocAnnotation } in docAnnotations()
|
/system/tools/hidl/docs/src/parser/files/ |
D | AbstractFileParser.kt | 21 import lexer.TokenGrammar 52 …val structs: List<EntryParser> by lazy { getEntriesByCompoundDeclarationParser(TokenGrammar.STRUCT… in <lambda>() 53 …val unions: List<EntryParser> by lazy { getEntriesByCompoundDeclarationParser(TokenGrammar.UNION) } in <lambda>() 59 private fun getEntriesByCompoundDeclarationParser(identifier: TokenGrammar): List<EntryParser> { in getEntriesByCompoundDeclarationParser() 65 TokenGrammar.INTERFACE, 66 TokenGrammar.ENUM, 67 TokenGrammar.STRUCT, 68 TokenGrammar.UNION, 69 TokenGrammar.TYPEDEF) 84 if (token.identifier == TokenGrammar.DOC_START) { in insertDocsForRequiredTypes() [all …]
|
D | package.kt | 21 import lexer.TokenGrammar 37 if (token.identifier == TokenGrammar.PACKAGE) { in parsePackageInfo() 42 … if (token.identifier != TokenGrammar.AT && token.identifier != TokenGrammar.SEMICOLON) { in parsePackageInfo() 51 if (token.identifier != TokenGrammar.AT) break in parsePackageInfo() 56 if (token.identifier != TokenGrammar.SEMICOLON) break in parsePackageInfo()
|
D | InterfaceFileParser.kt | 21 import lexer.TokenGrammar 64 if (token.identifier == TokenGrammar.DOC_START) { in isInterface() 67 } else if (token.identifier == TokenGrammar.DOC_END) { in isInterface() 71 } else if (!inDoc && token.identifier == TokenGrammar.INTERFACE) { in isInterface()
|
/system/tools/hidl/docs/src/parser/elements/ |
D | AbstractParser.kt | 20 import lexer.TokenGrammar 34 while (iter.hasNext() && peekToken(iter)?.identifier == TokenGrammar.EMPTY_LINE) { 56 while (peekPreviousToken(iter)?.identifier == TokenGrammar.EMPTY_LINE) iter.previous() in parseTokens() 59 if (peekPreviousToken(iter)?.identifier == TokenGrammar.DOC_START) iter.previous() in parseTokens() 61 if (peekToken(iter)!!.identifier != TokenGrammar.DOC_START) in parseTokens() 69 if (token.identifier == TokenGrammar.DOC_END) { in parseTokens() 71 } else if (token.identifier == TokenGrammar.DOC_START) { in parseTokens() 76 if (peekPreviousToken(iter)?.identifier != TokenGrammar.DOC_END) { in parseTokens() 95 if (token.identifier == TokenGrammar.DOC_START) { in scanDeclarationTokens() 97 } else if (token.identifier == TokenGrammar.DOC_END) { in scanDeclarationTokens() [all …]
|
D | EntryParser.kt | 21 import lexer.TokenGrammar 76 while (peekPreviousToken(iter)?.identifier == TokenGrammar.EMPTY_LINE) iter.previous() in scanTokens() 78 if (peekPreviousToken(iter)?.identifier == TokenGrammar.DOC_START) iter.previous() in scanTokens() 95 assert(peekToken(iter)!!.identifier == TokenGrammar.DOC_START) in parseTokens() 97 assert(peekPreviousToken(iter)!!.identifier == TokenGrammar.DOC_END) in parseTokens() 100 if (peekToken(iter)?.identifier != TokenGrammar.DOC_START) { in parseTokens() 115 while (iter.hasNext() && peekToken(iter)!!.identifier == TokenGrammar.AT) { in parseTokens() 130 assert(token.identifier != TokenGrammar.AT && token.category != TokenCategory.Annotation) in parseTokens() 136 if (token.identifier == TokenGrammar.DOC_START) { in parseTokens() 138 } else if (token.identifier == TokenGrammar.PACKAGE) { in parseTokens() [all …]
|
D | DocAnnotationParser.kt | 22 import lexer.TokenGrammar 30 lateinit var tag: TokenGrammar 45 TokenGrammar.EMPTY_LINE -> "\n\n" in formatValue() 62 if (peekToken(iter)?.identifier == TokenGrammar.AT) iter.next() in scanTokens() 73 token.identifier == TokenGrammar.EMPTY_LINE -> break@loop in scanTokens() 76 token.identifier == TokenGrammar.DOC_END || in scanTokens() 77 …token.identifier == TokenGrammar.AT && peekToken(iter)?.category == TokenCategory.DocAnnotation ->… in scanTokens() 101 if (token.identifier == TokenGrammar.PARAM || token.identifier == TokenGrammar.RETURN) { in parseTokens() 114 descTokens.map { if (it.identifier == TokenGrammar.EMPTY_LINE) "\n\n" else it.value } in parseTokens()
|
D | DocParser.kt | 22 import lexer.TokenGrammar 39 .filterNot { it.identifier == TokenGrammar.DOC_START } in formatDescription() 40 .filterNot { it.identifier == TokenGrammar.DOC_END } in formatDescription() 43 TokenGrammar.EMPTY_LINE -> "\n\n" in formatDescription() 60 assert(token.identifier == TokenGrammar.DOC_START) in parseTokens() 61 assert(tokens.last().identifier == TokenGrammar.DOC_END) in parseTokens() 67 …token.identifier == TokenGrammar.AT && peekToken(iter)?.category == TokenCategory.DocAnnotation ->… in parseTokens() 70 token.identifier == TokenGrammar.DOC_END -> break@loop in parseTokens()
|
D | AnnotationParser.kt | 21 import lexer.TokenGrammar 28 lateinit var name: TokenGrammar 39 if (peekToken(iter)?.identifier == TokenGrammar.AT) iter.next() in scanTokens()
|
D | EntryCollectionParser.kt | 20 import lexer.TokenGrammar 58 if (token.identifier == TokenGrammar.DOC_START) { in parseTokens()
|
/system/tools/hidl/docs/src/parser/elements/declarations/ |
D | CompoundDeclarationParser.kt | 21 import lexer.TokenGrammar 30 lateinit var type: TokenGrammar 42 assert(token.identifier == TokenGrammar.STRUCT || token.identifier == TokenGrammar.UNION) in parseTokens() 43 assert(tokens.last().identifier == TokenGrammar.SEMICOLON) in parseTokens() 56 delimiter = TokenGrammar.SEMICOLON, in parseTokens() 57 openDelimiter = TokenGrammar.BRACE_OPEN, in parseTokens() 58 closeDelimiter = TokenGrammar.BRACE_CLOSE) in parseTokens() 66 if (statementTokens.first().identifier == TokenGrammar.DOC_START) { in parseTokens() 67 … val idx = statementTokens.indexOfFirst { it.identifier == TokenGrammar.DOC_END } in parseTokens() 78 TokenGrammar.STRUCT, TokenGrammar.UNION -> { in parseTokens() [all …]
|
D | AbstractDeclarationParser.kt | 21 import lexer.TokenGrammar 36 if (token.category != TokenCategory.Annotation && token.identifier != TokenGrammar.DOC_END) in scanTokens() 48 delimiter: TokenGrammar = TokenGrammar.COMMA, in scanDelimitedList() 49 openDelimiter: TokenGrammar = TokenGrammar.PAREN_OPEN, in scanDelimitedList() 50 … closeDelimiter: TokenGrammar = TokenGrammar.PAREN_CLOSE): List<List<Token>> { in scanDelimitedList() 80 if (token.identifier == TokenGrammar.DOC_START) { in scanDelimitedList() 82 } else if (token.identifier == TokenGrammar.DOC_END) { in scanDelimitedList()
|
D | EnumDeclarationParser.kt | 21 import lexer.TokenGrammar 41 assert(token.identifier == TokenGrammar.ENUM) in parseTokens() 42 assert(tokens.last().identifier == TokenGrammar.SEMICOLON) in parseTokens() 51 if (token.identifier != TokenGrammar.COLON) in parseTokens() 57 while (iter.hasNext() && peekToken(iter)!!.identifier != TokenGrammar.BRACE_OPEN) { in parseTokens() 64 …scanDelimitedList(iter, openDelimiter = TokenGrammar.BRACE_OPEN, closeDelimiter = TokenGrammar.BRA… in parseTokens() 71 if (statementTokens.first().identifier == TokenGrammar.DOC_START) { in parseTokens() 72 … val idx = statementTokens.indexOfFirst { it.identifier == TokenGrammar.DOC_END } in parseTokens() 99 if (tokens.any { it.identifier == TokenGrammar.EQUAL }) { in <lambda>() 100 this.value = tokens.takeLastWhile { it.identifier != TokenGrammar.EQUAL } in <lambda>()
|
D | InterfaceDeclarationParser.kt | 21 import lexer.TokenGrammar 37 assert(tokens.first().identifier == TokenGrammar.INTERFACE) in parseTokens() 38 assert(tokens.last().identifier == TokenGrammar.SEMICOLON) in parseTokens() 41 val sigToks = tokens.takeWhile { it.identifier != TokenGrammar.BRACE_OPEN } in parseTokens() 48 if (sigToks.any { it.identifier == TokenGrammar.EXTENDS }) { in parseTokens()
|
D | MethodDeclarationParser.kt | 21 import lexer.TokenGrammar 41 assert(tokens.last().identifier == TokenGrammar.SEMICOLON) in parseTokens() 62 if (token.identifier == TokenGrammar.PAREN_OPEN) { in parseTokens() 69 if (token.identifier == TokenGrammar.GENERATES) { in parseTokens() 75 assert(token.identifier == TokenGrammar.SEMICOLON) in parseTokens()
|
D | TypedefDeclarationParser.kt | 20 import lexer.TokenGrammar 33 assert(tokens.first().identifier == TokenGrammar.TYPEDEF) in parseTokens() 34 assert(tokens.last().identifier == TokenGrammar.SEMICOLON) in parseTokens()
|
/system/tools/hidl/docs/src/writer/elements/ |
D | MethodElement.kt | 19 import lexer.TokenGrammar in <lambda>() 61 private fun getDocAnnotationDesc(tag: TokenGrammar, arg: ArgEntry): String { in <lambda>()
|