/external/python/cpython3/Parser/pgen/ |
D | metaparser.py | 4 import tokenize # from stdlib 13 tokenize.NAME: "NAME", 14 tokenize.STRING: "STRING", 15 tokenize.NEWLINE: "NEWLINE", 16 tokenize.NL: "NL", 17 tokenize.OP: "OP", 18 tokenize.ENDMARKER: "ENDMARKER", 19 tokenize.COMMENT: "COMMENT", 25 self.generator = tokenize.generate_tokens(grammar_adaptor.readline) 32 while self.type != tokenize.ENDMARKER: [all …]
|
/external/minijail/ |
D | util_unittest.cc | 40 TEST(tokenize, null_stringp) { in TEST() argument 41 ASSERT_EQ(nullptr, tokenize(nullptr, nullptr)); in TEST() 42 ASSERT_EQ(nullptr, tokenize(nullptr, "")); in TEST() 43 ASSERT_EQ(nullptr, tokenize(nullptr, ",")); in TEST() 46 ASSERT_EQ(nullptr, tokenize(&p, nullptr)); in TEST() 50 TEST(tokenize, null_delim) { in TEST() argument 53 ASSERT_EQ(str, tokenize(&p, nullptr)); in TEST() 58 ASSERT_EQ(str, tokenize(&p, "")); in TEST() 64 TEST(tokenize, basic) { in TEST() argument 67 ASSERT_EQ("a", std::string(tokenize(&p, ","))); in TEST() [all …]
|
/external/python/cpython3/Tools/peg_generator/pegen/ |
D | tokenizer.py | 2 import tokenize 10 def shorttok(tok: tokenize.TokenInfo) -> str: 20 _tokens: List[tokenize.TokenInfo] 22 def __init__(self, tokengen: Iterator[tokenize.TokenInfo], *, verbose: bool = False): 30 def getnext(self) -> tokenize.TokenInfo: 35 if tok.type in (tokenize.NL, tokenize.COMMENT): 47 def peek(self) -> tokenize.TokenInfo: 51 if tok.type in (tokenize.NL, tokenize.COMMENT): 58 def diagnose(self) -> tokenize.TokenInfo:
|
/external/googletest/googlemock/scripts/generator/cpp/ |
D | ast.py | 44 from cpp import tokenize 549 if parts[-1].token_type == tokenize.NAME: 579 if (type_name and type_name[-1].token_type == tokenize.NAME and 580 p.token_type == tokenize.NAME): 581 type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0)) 749 if token.token_type == tokenize.NAME: 768 if next.token_type == tokenize.SYNTAX and next.name == '(': 773 syntax = tokenize.SYNTAX 783 new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';') 786 last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0) [all …]
|
/external/python/cpython3/Doc/library/ |
D | tokenize.rst | 1 :mod:`tokenize` --- Tokenizer for Python source 4 .. module:: tokenize 10 **Source code:** :source:`Lib/tokenize.py` 14 The :mod:`tokenize` module provides a lexical scanner for Python source code, 23 :term:`named tuple` returned from :func:`tokenize.tokenize`. 30 .. function:: tokenize(readline) 32 The :func:`.tokenize` generator requires one argument, *readline*, which 57 :func:`.tokenize` determines the source encoding of the file by looking for a 64 Like :func:`.tokenize`, the *readline* argument is a callable returning 69 :func:`.tokenize`. It does not yield an :data:`~token.ENCODING` token. [all …]
|
/external/python/cpython2/Tools/scripts/ |
D | checkappend.py | 39 import tokenize 106 tokenize.tokenize(self.file.readline, self.tokeneater) 107 except tokenize.TokenError, msg: 113 NEWLINE=tokenize.NEWLINE, 114 JUNK=(tokenize.COMMENT, tokenize.NL), 115 OP=tokenize.OP, 116 NAME=tokenize.NAME):
|
D | cleanfuture.py | 42 import tokenize 157 STRING = tokenize.STRING 158 NL = tokenize.NL 159 NEWLINE = tokenize.NEWLINE 160 COMMENT = tokenize.COMMENT 161 NAME = tokenize.NAME 162 OP = tokenize.OP 165 get = tokenize.generate_tokens(self.getline).next
|
D | reindent.py | 44 import tokenize 206 tokenize.tokenize(self.getline, self.tokeneater) 288 INDENT=tokenize.INDENT, 289 DEDENT=tokenize.DEDENT, 290 NEWLINE=tokenize.NEWLINE, 291 COMMENT=tokenize.COMMENT, 292 NL=tokenize.NL):
|
/external/python/cpython3/Tools/i18n/ |
D | pygettext.py | 166 import tokenize 334 if ttype == tokenize.STRING and is_literal_string(tstring): 337 elif ttype not in (tokenize.COMMENT, tokenize.NL): 341 if ttype == tokenize.NAME and tstring in ('class', 'def'): 344 if ttype == tokenize.NAME and tstring in opts.keywords: 349 if ttype == tokenize.OP: 360 if ttype == tokenize.STRING and is_literal_string(tstring): 363 elif ttype not in (tokenize.NEWLINE, tokenize.INDENT, 364 tokenize.COMMENT): 369 if ttype == tokenize.OP and tstring == '(': [all …]
|
/external/python/cpython2/Tools/i18n/ |
D | pygettext.py | 165 import tokenize 369 if ttype == tokenize.STRING: 372 elif ttype not in (tokenize.COMMENT, tokenize.NL): 376 if ttype == tokenize.NAME and tstring in ('class', 'def'): 379 if ttype == tokenize.NAME and tstring in opts.keywords: 384 if ttype == tokenize.OP and tstring == ':': 389 if ttype == tokenize.STRING: 392 elif ttype not in (tokenize.NEWLINE, tokenize.INDENT, 393 tokenize.COMMENT): 398 if ttype == tokenize.OP and tstring == '(': [all …]
|
/external/llvm-project/clang-tools-extra/unittests/clang-include-fixer/ |
D | FuzzySymbolIndexTests.cpp | 22 EXPECT_THAT(FuzzySymbolIndex::tokenize("URLHandlerCallback"), in TEST() 24 EXPECT_THAT(FuzzySymbolIndex::tokenize("snake_case11"), in TEST() 26 EXPECT_THAT(FuzzySymbolIndex::tokenize("__$42!!BOB\nbob"), in TEST() 37 auto Tokens = FuzzySymbolIndex::tokenize(Identifier); 45 return FuzzySymbolIndex::queryRegexp(FuzzySymbolIndex::tokenize(query)); in TEST()
|
/external/webrtc/rtc_base/ |
D | string_encode_unittest.cc | 141 EXPECT_EQ(5ul, tokenize("one two three four five", ' ', &fields)); in TEST() 143 EXPECT_EQ(1ul, tokenize("one", ' ', &fields)); in TEST() 147 EXPECT_EQ(5ul, tokenize(" one two three four five ", ' ', &fields)); in TEST() 149 EXPECT_EQ(1ul, tokenize(" one ", ' ', &fields)); in TEST() 151 EXPECT_EQ(0ul, tokenize(" ", ' ', &fields)); in TEST() 158 tokenize("find middle one", ' ', &fields); in TEST() 164 tokenize(" find middle one ", ' ', &fields); in TEST() 168 tokenize(" ", ' ', &fields); in TEST() 188 ASSERT_EQ(0ul, tokenize("D \"A B", ' ', '(', ')', nullptr)); in TEST() 191 tokenize("A B C", ' ', '"', '"', &fields); in TEST() [all …]
|
/external/deqp/framework/randomshaders/ |
D | rsgStatement.hpp | 44 virtual void tokenize (GeneratorState& state, TokenStream& str) const = DE_NULL; 62 void tokenize (GeneratorState& state, TokenStream& str) const; 78 void tokenize (GeneratorState& state, TokenStream& str) const; 98 void tokenize (GeneratorState& state, TokenStream& str) const; 119 void tokenize (GeneratorState& state, TokenStream& str) const; 145 void tokenize (GeneratorState& state, TokenStream& str) const;
|
D | rsgExpression.hpp | 57 virtual void tokenize (GeneratorState& state, TokenStream& str) const = DE_NULL; 74 …void tokenize (GeneratorState& state, TokenStream& str) const { DE_UNREF(state); str << Tok… in tokenize() function in rsg::VariableAccess 114 void tokenize (GeneratorState& state, TokenStream& str) const; 132 void tokenize (GeneratorState& state, TokenStream& str) const; 151 void tokenize (GeneratorState& state, TokenStream& str) const; 169 void tokenize (GeneratorState& state, TokenStream& str) const; 191 void tokenize (GeneratorState& state, TokenStream& str) const; 216 void tokenize (GeneratorState& state, TokenStream& str) const; 236 void tokenize (GeneratorState& state, TokenStream& str) const; 258 void tokenize (GeneratorState& state, TokenStream& str) const;
|
D | rsgStatement.cpp | 203 void BlockStatement::tokenize (GeneratorState& state, TokenStream& str) const in tokenize() function in rsg::BlockStatement 208 (*i)->tokenize(state, str); in tokenize() 219 void ExpressionStatement::tokenize (GeneratorState& state, TokenStream& str) const in tokenize() function in rsg::ExpressionStatement 222 m_expression->tokenize(state, str); in tokenize() 333 void DeclarationStatement::tokenize (GeneratorState& state, TokenStream& str) const in tokenize() function in rsg::DeclarationStatement 340 m_expression->tokenize(state, str); in tokenize() 456 void ConditionalStatement::tokenize (GeneratorState& state, TokenStream& str) const in tokenize() function in rsg::ConditionalStatement 462 m_condition->tokenize(state, str); in tokenize() 469 m_trueStatement->tokenize(state, str); in tokenize() 473 m_trueStatement->tokenize(state, str); in tokenize() [all …]
|
D | rsgShader.cpp | 93 void Shader::tokenize (GeneratorState& state, TokenStream& str) const in tokenize() function in rsg::Shader 101 m_globalStatements[ndx]->tokenize(state, str); in tokenize() 107 m_functions[ndx]->tokenize(state, str); in tokenize() 112 m_mainFunction.tokenize(state, str); in tokenize() 125 void Function::tokenize (GeneratorState& state, TokenStream& str) const in tokenize() function in rsg::Function 147 m_functionBlock.tokenize(state, str); in tokenize()
|
/external/python/cpython3/Lib/ |
D | tabnanny.py | 25 import tokenize 26 if not hasattr(tokenize, 'NL'): 98 f = tokenize.open(file) 107 process_tokens(tokenize.generate_tokens(f.readline)) 109 except tokenize.TokenError as msg: 278 INDENT = tokenize.INDENT 279 DEDENT = tokenize.DEDENT 280 NEWLINE = tokenize.NEWLINE 281 JUNK = tokenize.COMMENT, tokenize.NL
|
/external/python/cpython2/Doc/library/ |
D | tokenize.rst | 1 :mod:`tokenize` --- Tokenizer for Python source 4 .. module:: tokenize 9 **Source code:** :source:`Lib/tokenize.py` 13 The :mod:`tokenize` module provides a lexical scanner for Python source code, 22 :func:`tokenize.generate_tokens` for the character sequence that identifies a 48 .. function:: tokenize(readline[, tokeneater]) 50 The :func:`.tokenize` function accepts two parameters: one representing the input 51 stream, and one providing an output mechanism for :func:`.tokenize`. 67 :mod:`tokenize`, as are two additional token type values that might be passed to 68 the *tokeneater* function by :func:`.tokenize`: [all …]
|
/external/python/cpython2/Lib/ |
D | tabnanny.py | 26 import tokenize 27 if not hasattr(tokenize, 'NL'): 106 process_tokens(tokenize.generate_tokens(f.readline)) 108 except tokenize.TokenError, msg: 274 INDENT = tokenize.INDENT 275 DEDENT = tokenize.DEDENT 276 NEWLINE = tokenize.NEWLINE 277 JUNK = tokenize.COMMENT, tokenize.NL
|
/external/tensorflow/tensorflow/python/autograph/pyct/ |
D | parser.py | 29 import tokenize 70 token_gen = tokenize.generate_tokens(six.StringIO(code_string).readline) 77 except tokenize.TokenError: 85 if tok_type == tokenize.INDENT: 90 tokenize.NL, tokenize.NEWLINE, tokenize.STRING, tokenize.COMMENT): 101 if tok_type == tokenize.INDENT: 113 new_code = tokenize.untokenize(tokens)
|
/external/autotest/utils/ |
D | reindent.py | 44 import tokenize 176 tokenize.tokenize(self.getline, self.tokeneater) 258 INDENT=tokenize.INDENT, 259 DEDENT=tokenize.DEDENT, 260 NEWLINE=tokenize.NEWLINE, 261 COMMENT=tokenize.COMMENT, 262 NL=tokenize.NL):
|
/external/python/cpython3/Tools/scripts/ |
D | highlight.py | 11 import tokenize 35 tok_type = tokenize.COMMENT 37 for tok in tokenize.generate_tokens(readline): 41 if tok_type == tokenize.COMMENT: 43 elif tok_type == tokenize.OP and tok_str[:1] not in '{}[](),.:;@': 45 elif tok_type == tokenize.STRING: 47 if prev_tok_type == tokenize.INDENT or scol==0: 49 elif tok_type == tokenize.NAME:
|
D | cleanfuture.py | 42 import tokenize 156 STRING = tokenize.STRING 157 NL = tokenize.NL 158 NEWLINE = tokenize.NEWLINE 159 COMMENT = tokenize.COMMENT 160 NAME = tokenize.NAME 161 OP = tokenize.OP 164 get = tokenize.generate_tokens(self.getline).__next__
|
D | reindent.py | 46 import tokenize 122 encoding, _ = tokenize.detect_encoding(f.readline) 202 tokens = tokenize.generate_tokens(self.getline) 286 INDENT=tokenize.INDENT, 287 DEDENT=tokenize.DEDENT, 288 NEWLINE=tokenize.NEWLINE, 289 COMMENT=tokenize.COMMENT, 290 NL=tokenize.NL):
|
/external/python/pyfakefs/pyfakefs/ |
D | pytest_plugin.py | 13 import tokenize 29 Patcher.SKIPMODULES.add(tokenize) 41 tokenize._builtin_open = patcher.original_open
|