Home
last modified time | relevance | path

Searched refs:DEDENT (Results 1 – 25 of 26) sorted by relevance

12

/third_party/python/Lib/test/
Dtest_tokenize.py646 DEDENT '' (4, 0) (4, 0)
647 DEDENT '' (4, 0) (4, 0)
825 DEDENT '' (4, 2) (4, 2)
833 DEDENT '' (6, 0) (6, 0)
834 DEDENT '' (6, 0) (6, 0)
859 DEDENT '' (3, 0) (3, 0)
907 DEDENT '' (7, 0) (7, 0)
945 DEDENT '' (7, 0) (7, 0)
/third_party/python/Include/
Dtoken.h19 #define DEDENT 6 macro
85 (x) == DEDENT)
/third_party/python/Lib/
Dtabnanny.py279 DEDENT = tokenize.DEDENT
303 elif type == DEDENT:
Dtokenize.py202 elif tok_type == DEDENT:
248 elif toknum == DEDENT:
519 yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
610 yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
Dtoken.py12 DEDENT = 6 variable
Dinspect.py1093 elif type == tokenize.DEDENT:
/third_party/python/Tools/scripts/
Dreindent.py287 DEDENT=tokenize.DEDENT, argument
302 elif type == DEDENT:
/third_party/python/Tools/peg_generator/pegen/
Dmetagrammar.gram53 | rulename memoflag? ":" alts NEWLINE INDENT more_alts DEDENT {
55 | rulename memoflag? ":" NEWLINE INDENT more_alts DEDENT {
/third_party/python/Lib/lib2to3/fixes/
Dfix_metaclass.py137 if isinstance(node, Leaf) and node.type != token.DEDENT:
224 suite.children[-1].type == token.DEDENT):
/third_party/cef/tools/yapf/yapf/yapflib/
Dcomment_splicer.py82 elif child.type == token.DEDENT:
88 if ancestor_at_indent.type == token.DEDENT:
Dpytree_unwrapper.py58 grammar_token.NEWLINE, grammar_token.DEDENT, grammar_token.INDENT,
/third_party/python/Lib/lib2to3/pgen2/
Dtokenize.py217 elif toknum == DEDENT:
434 yield (DEDENT, '', (lnum, pos), (lnum, pos), line)
558 yield (DEDENT, '', (lnum, 0), (lnum, 0), '')
Dtoken.py15 DEDENT = 6 variable
/third_party/python/Grammar/
DTokens7 DEDENT
Dpython.gram224 … | "match" subject=subject_expr ':' NEWLINE INDENT cases[asdl_match_case_seq*]=case_block+ DEDENT {
496 | NEWLINE INDENT a=statements DEDENT { a }
/third_party/python/Doc/library/
Dtoken-list.inc14 .. data:: DEDENT
Dtokenize.rst249 4,0-4,0: DEDENT ''
275 4,0-4,0: DEDENT ''
/third_party/python/Lib/lib2to3/
Dpatcomp.py30 skip = {token.NEWLINE, token.INDENT, token.DEDENT}
DGrammar.txt123 suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
/third_party/python/Parser/
Dpegen_errors.c411 if (last_token->type == INDENT || last_token->type == DEDENT) { in _Pypegen_set_syntax_error()
Dpegen.c967 if (token->type != ENDMARKER && (token->type < NEWLINE || token->type > DEDENT)) { in _PyPegen_get_last_nonnwhitespace_token()
1379 else if (p->tokens[p->fill-1]->type == DEDENT) { in _PyPegen_run_parser()
Dtokenizer.c1450 return DEDENT; in tok_get()
/third_party/python/Doc/reference/
Dlexical_analysis.rst204 .. index:: INDENT token, DEDENT token
207 DEDENT tokens, using a stack, as follows.
216 popped off, and for each number popped off a DEDENT token is generated. At the
217 end of the file, a DEDENT token is generated for each number remaining on the
267 Besides NEWLINE, INDENT and DEDENT, the following categories of tokens exist:
Dcompound_stmts.rst60 suite: `stmt_list` NEWLINE | NEWLINE INDENT `statement`+ DEDENT
66 single: DEDENT token
70 ``DEDENT``. Also note that optional continuation clauses always begin with a
512 match_stmt: 'match' `subject_expr` ":" NEWLINE INDENT `case_block`+ DEDENT
/third_party/python/Tools/i18n/
Dpygettext.py440 elif ttype not in [tokenize.COMMENT, token.INDENT, token.DEDENT,

12