Home
last modified time | relevance | path

Searched refs:tokenize (Results 1 – 25 of 270) sorted by relevance

1234567891011

/external/minijail/
Dutil_unittest.cc41 TEST(tokenize, null_stringp) { in TEST() argument
42 ASSERT_EQ(nullptr, tokenize(nullptr, nullptr)); in TEST()
43 ASSERT_EQ(nullptr, tokenize(nullptr, "")); in TEST()
44 ASSERT_EQ(nullptr, tokenize(nullptr, ",")); in TEST()
47 ASSERT_EQ(nullptr, tokenize(&p, nullptr)); in TEST()
51 TEST(tokenize, null_delim) { in TEST() argument
54 ASSERT_EQ(str, tokenize(&p, nullptr)); in TEST()
59 ASSERT_EQ(str, tokenize(&p, "")); in TEST()
65 TEST(tokenize, basic) { in TEST() argument
68 ASSERT_EQ("a", std::string(tokenize(&p, ","))); in TEST()
[all …]
/external/python/cpython3/Tools/peg_generator/pegen/
Dtokenizer.py2 import tokenize
10 def shorttok(tok: tokenize.TokenInfo) -> str:
20 _tokens: List[tokenize.TokenInfo]
22 def __init__(self, tokengen: Iterator[tokenize.TokenInfo], *, verbose: bool = False):
30 def getnext(self) -> tokenize.TokenInfo:
35 if tok.type in (tokenize.NL, tokenize.COMMENT):
47 def peek(self) -> tokenize.TokenInfo:
51 if tok.type in (tokenize.NL, tokenize.COMMENT):
58 def diagnose(self) -> tokenize.TokenInfo:
/external/googletest/googlemock/scripts/generator/cpp/
Dast.py44 from cpp import tokenize
549 if parts[-1].token_type == tokenize.NAME:
579 if (type_name and type_name[-1].token_type == tokenize.NAME and
580 p.token_type == tokenize.NAME):
581 type_name.append(tokenize.Token(tokenize.SYNTAX, ' ', 0, 0))
749 if token.token_type == tokenize.NAME:
768 if next.token_type == tokenize.SYNTAX and next.name == '(':
773 syntax = tokenize.SYNTAX
783 new_temp = self._GetTokensUpTo(tokenize.SYNTAX, ';')
786 last_token = tokenize.Token(tokenize.SYNTAX, ';', 0, 0)
[all …]
/external/python/cpython3/Doc/library/
Dtokenize.rst1 :mod:`tokenize` --- Tokenizer for Python source
4 .. module:: tokenize
10 **Source code:** :source:`Lib/tokenize.py`
14 The :mod:`tokenize` module provides a lexical scanner for Python source code,
23 :term:`named tuple` returned from :func:`tokenize.tokenize`.
30 .. function:: tokenize(readline)
32 The :func:`.tokenize` generator requires one argument, *readline*, which
57 :func:`.tokenize` determines the source encoding of the file by looking for a
64 Like :func:`.tokenize`, the *readline* argument is a callable returning
69 :func:`.tokenize`. It does not yield an :data:`~token.ENCODING` token.
[all …]
/external/python/cpython2/Tools/i18n/
Dpygettext.py165 import tokenize
369 if ttype == tokenize.STRING:
372 elif ttype not in (tokenize.COMMENT, tokenize.NL):
376 if ttype == tokenize.NAME and tstring in ('class', 'def'):
379 if ttype == tokenize.NAME and tstring in opts.keywords:
384 if ttype == tokenize.OP and tstring == ':':
389 if ttype == tokenize.STRING:
392 elif ttype not in (tokenize.NEWLINE, tokenize.INDENT,
393 tokenize.COMMENT):
398 if ttype == tokenize.OP and tstring == '(':
[all …]
/external/python/cpython2/Tools/scripts/
Dcheckappend.py39 import tokenize
106 tokenize.tokenize(self.file.readline, self.tokeneater)
107 except tokenize.TokenError, msg:
113 NEWLINE=tokenize.NEWLINE,
114 JUNK=(tokenize.COMMENT, tokenize.NL),
115 OP=tokenize.OP,
116 NAME=tokenize.NAME):
Dcleanfuture.py42 import tokenize
157 STRING = tokenize.STRING
158 NL = tokenize.NL
159 NEWLINE = tokenize.NEWLINE
160 COMMENT = tokenize.COMMENT
161 NAME = tokenize.NAME
162 OP = tokenize.OP
165 get = tokenize.generate_tokens(self.getline).next
Dreindent.py44 import tokenize
206 tokenize.tokenize(self.getline, self.tokeneater)
288 INDENT=tokenize.INDENT,
289 DEDENT=tokenize.DEDENT,
290 NEWLINE=tokenize.NEWLINE,
291 COMMENT=tokenize.COMMENT,
292 NL=tokenize.NL):
/external/python/cpython3/Tools/i18n/
Dpygettext.py167 import tokenize
335 if ttype == tokenize.STRING and is_literal_string(tstring):
338 elif ttype not in (tokenize.COMMENT, tokenize.NL):
342 if ttype == tokenize.NAME and tstring in ('class', 'def'):
345 if ttype == tokenize.NAME and tstring in opts.keywords:
348 if ttype == tokenize.STRING:
402 if ttype == tokenize.OP:
413 if ttype == tokenize.STRING and is_literal_string(tstring):
416 elif ttype not in (tokenize.NEWLINE, tokenize.INDENT,
417 tokenize.COMMENT):
[all …]
/external/webrtc/rtc_base/
Dstring_encode_unittest.cc141 EXPECT_EQ(5ul, tokenize("one two three four five", ' ', &fields)); in TEST()
143 EXPECT_EQ(1ul, tokenize("one", ' ', &fields)); in TEST()
147 EXPECT_EQ(5ul, tokenize(" one two three four five ", ' ', &fields)); in TEST()
149 EXPECT_EQ(1ul, tokenize(" one ", ' ', &fields)); in TEST()
151 EXPECT_EQ(0ul, tokenize(" ", ' ', &fields)); in TEST()
158 tokenize("find middle one", ' ', &fields); in TEST()
164 tokenize(" find middle one ", ' ', &fields); in TEST()
168 tokenize(" ", ' ', &fields); in TEST()
188 ASSERT_EQ(0ul, tokenize("D \"A B", ' ', '(', ')', nullptr)); in TEST()
191 tokenize("A B C", ' ', '"', '"', &fields); in TEST()
[all …]
/external/python/cpython3/Lib/
Dtabnanny.py25 import tokenize
26 if not hasattr(tokenize, 'NL'):
98 f = tokenize.open(file)
107 process_tokens(tokenize.generate_tokens(f.readline))
109 except tokenize.TokenError as msg:
278 INDENT = tokenize.INDENT
279 DEDENT = tokenize.DEDENT
280 NEWLINE = tokenize.NEWLINE
281 JUNK = tokenize.COMMENT, tokenize.NL
/external/deqp/framework/randomshaders/
DrsgExpression.hpp57 virtual void tokenize (GeneratorState& state, TokenStream& str) const = DE_NULL;
74 …void tokenize (GeneratorState& state, TokenStream& str) const { DE_UNREF(state); str << Tok… in tokenize() function in rsg::VariableAccess
114 void tokenize (GeneratorState& state, TokenStream& str) const;
132 void tokenize (GeneratorState& state, TokenStream& str) const;
151 void tokenize (GeneratorState& state, TokenStream& str) const;
169 void tokenize (GeneratorState& state, TokenStream& str) const;
191 void tokenize (GeneratorState& state, TokenStream& str) const;
216 void tokenize (GeneratorState& state, TokenStream& str) const;
236 void tokenize (GeneratorState& state, TokenStream& str) const;
258 void tokenize (GeneratorState& state, TokenStream& str) const;
DrsgStatement.cpp203 void BlockStatement::tokenize (GeneratorState& state, TokenStream& str) const in tokenize() function in rsg::BlockStatement
208 (*i)->tokenize(state, str); in tokenize()
219 void ExpressionStatement::tokenize (GeneratorState& state, TokenStream& str) const in tokenize() function in rsg::ExpressionStatement
222 m_expression->tokenize(state, str); in tokenize()
332 void DeclarationStatement::tokenize (GeneratorState& state, TokenStream& str) const in tokenize() function in rsg::DeclarationStatement
339 m_expression->tokenize(state, str); in tokenize()
455 void ConditionalStatement::tokenize (GeneratorState& state, TokenStream& str) const in tokenize() function in rsg::ConditionalStatement
461 m_condition->tokenize(state, str); in tokenize()
468 m_trueStatement->tokenize(state, str); in tokenize()
472 m_trueStatement->tokenize(state, str); in tokenize()
[all …]
DrsgStatement.hpp44 virtual void tokenize (GeneratorState& state, TokenStream& str) const = DE_NULL;
62 void tokenize (GeneratorState& state, TokenStream& str) const;
78 void tokenize (GeneratorState& state, TokenStream& str) const;
98 void tokenize (GeneratorState& state, TokenStream& str) const;
119 void tokenize (GeneratorState& state, TokenStream& str) const;
145 void tokenize (GeneratorState& state, TokenStream& str) const;
DrsgShader.cpp93 void Shader::tokenize (GeneratorState& state, TokenStream& str) const in tokenize() function in rsg::Shader
101 m_globalStatements[ndx]->tokenize(state, str); in tokenize()
107 m_functions[ndx]->tokenize(state, str); in tokenize()
112 m_mainFunction.tokenize(state, str); in tokenize()
125 void Function::tokenize (GeneratorState& state, TokenStream& str) const in tokenize() function in rsg::Function
147 m_functionBlock.tokenize(state, str); in tokenize()
/external/python/cpython2/Doc/library/
Dtokenize.rst1 :mod:`tokenize` --- Tokenizer for Python source
4 .. module:: tokenize
9 **Source code:** :source:`Lib/tokenize.py`
13 The :mod:`tokenize` module provides a lexical scanner for Python source code,
22 :func:`tokenize.generate_tokens` for the character sequence that identifies a
48 .. function:: tokenize(readline[, tokeneater])
50 The :func:`.tokenize` function accepts two parameters: one representing the input
51 stream, and one providing an output mechanism for :func:`.tokenize`.
67 :mod:`tokenize`, as are two additional token type values that might be passed to
68 the *tokeneater* function by :func:`.tokenize`:
[all …]
/external/python/cpython2/Lib/
Dtabnanny.py26 import tokenize
27 if not hasattr(tokenize, 'NL'):
106 process_tokens(tokenize.generate_tokens(f.readline))
108 except tokenize.TokenError, msg:
274 INDENT = tokenize.INDENT
275 DEDENT = tokenize.DEDENT
276 NEWLINE = tokenize.NEWLINE
277 JUNK = tokenize.COMMENT, tokenize.NL
/external/tensorflow/tensorflow/python/autograph/pyct/
Dparser.py30 import tokenize
74 token_gen = tokenize.generate_tokens(six.StringIO(code_string).readline)
81 except tokenize.TokenError:
89 if tok_type == tokenize.INDENT:
94 tokenize.NL, tokenize.NEWLINE, tokenize.STRING, tokenize.COMMENT):
105 if tok_type == tokenize.INDENT:
117 new_code = tokenize.untokenize(tokens)
/external/autotest/utils/
Dreindent.py44 import tokenize
176 tokenize.tokenize(self.getline, self.tokeneater)
258 INDENT=tokenize.INDENT,
259 DEDENT=tokenize.DEDENT,
260 NEWLINE=tokenize.NEWLINE,
261 COMMENT=tokenize.COMMENT,
262 NL=tokenize.NL):
/external/python/cpython3/Tools/scripts/
Dcleanfuture.py42 import tokenize
156 STRING = tokenize.STRING
157 NL = tokenize.NL
158 NEWLINE = tokenize.NEWLINE
159 COMMENT = tokenize.COMMENT
160 NAME = tokenize.NAME
161 OP = tokenize.OP
164 get = tokenize.generate_tokens(self.getline).__next__
Dhighlight.py11 import tokenize
35 tok_type = tokenize.COMMENT
37 for tok in tokenize.generate_tokens(readline):
41 if tok_type == tokenize.COMMENT:
43 elif tok_type == tokenize.OP and tok_str[:1] not in '{}[](),.:;@':
45 elif tok_type == tokenize.STRING:
47 if prev_tok_type == tokenize.INDENT or scol==0:
49 elif tok_type == tokenize.NAME:
Dreindent.py46 import tokenize
122 encoding, _ = tokenize.detect_encoding(f.readline)
202 tokens = tokenize.generate_tokens(self.getline)
286 INDENT=tokenize.INDENT,
287 DEDENT=tokenize.DEDENT,
288 NEWLINE=tokenize.NEWLINE,
289 COMMENT=tokenize.COMMENT,
290 NL=tokenize.NL):
/external/tflite-support/tensorflow_lite_support/custom_ops/python/
Dsentencepiece_tokenizer_test.py68 tftext_tokenized = tftext_sp.tokenize(input_text)
69 opt_tokenized = opt_sp.tokenize(input_text)
82 tftext_tokenized = tftext_sp.tokenize(input_text)
100 tftext_tokenized = tftext_sp.tokenize(input_text)
101 opt_tokenized = opt_sp.tokenize(input_text)
115 return self.sp.tokenize(input_tensor).flat_values
240 _ = opt_sp.tokenize(test_text)
245 _ = tftext_sp.tokenize(test_text)
/external/chromium-trace/catapult/common/py_utils/py_utils/refactor/
Doffset_token.py12 import tokenize
71 tokenize_tokens = tokenize.generate_tokens(f.readline)
95 while offset_tokens[0].type == tokenize.NL:
120 return tokenize.untokenize(tokenize_tokens).replace('\\\n', ' \\\n')
/external/python/cpython3/Lib/lib2to3/pgen2/
Ddriver.py26 from . import grammar, parse, token, tokenize, pgen
59 if type in (tokenize.COMMENT, tokenize.NL):
88 tokens = tokenize.generate_tokens(stream.readline)
102 tokens = tokenize.generate_tokens(io.StringIO(text).readline)

1234567891011