/third_party/lwip/src/apps/http/makefsdata/ |
D | makefsdata.c | 81 #define NEWLINE "\r\n" macro 158 …<filename>] [-m] [-svr:<name>] [-x:<ext_list>] [-xc:<ext_list>" USAGE_ARG_DEFLATE NEWLINE NEWLINE); in print_usage() 159 printf(" targetdir: relative or absolute path to files to convert" NEWLINE); in print_usage() 160 printf(" switch -s: toggle processing of subdirectories (default is on)" NEWLINE); in print_usage() 161 … switch -e: exclude HTTP header from file (header is created at runtime, default is off)" NEWLINE); in print_usage() 162 printf(" switch -11: include HTTP 1.1 header (1.0 is default)" NEWLINE); in print_usage() 163 printf(" switch -nossi: no support for SSI (cannot calculate Content-Length for SSI)" NEWLINE); in print_usage() 164 …tf(" switch -ssi: ssi filename (ssi support controlled by file list, not by extension)" NEWLINE); in print_usage() 165 printf(" switch -c: precalculate checksums for all pages (default is off)" NEWLINE); in print_usage() 166 printf(" switch -f: target filename (default is \"fsdata.c\")" NEWLINE); in print_usage() [all …]
|
/third_party/gn/src/gn/ |
D | rust_project_writer.cc | 24 #define NEWLINE "\r\n" macro 26 #define NEWLINE "\n" macro 362 rust_project << "{" NEWLINE; in WriteCrates() 373 rust_project << NEWLINE << " {" NEWLINE in WriteCrates() 374 << " \"crate_id\": " << crate.index() << "," NEWLINE in WriteCrates() 375 << " \"root_module\": \"" << crate_module << "\"," NEWLINE in WriteCrates() 376 << " \"label\": \"" << crate.label() << "\"," NEWLINE in WriteCrates() 377 << " \"source\": {" NEWLINE in WriteCrates() 378 << " \"include_dirs\": [" NEWLINE in WriteCrates() 387 rust_project << "," NEWLINE << " \"" << gen_dir_path in WriteCrates() [all …]
|
/third_party/python/Lib/test/ |
D | test_tokenize.py | 6 NEWLINE) 29 if missing_trailing_nl and type == NEWLINE and end[0] == num_lines: 58 self.assertEqual(tokens[-2].type, NEWLINE) 597 NEWLINE '\\n' (1, 16) (1, 17) 617 NEWLINE '\\n' (1, 13) (1, 14) 639 NEWLINE '\\n' (1, 8) (1, 9) 643 NEWLINE '\\n' (2, 5) (2, 6) 656 NEWLINE '\\n' (1, 16) (1, 17) 668 NEWLINE '\\n' (1, 17) (1, 18) 759 NEWLINE '\\n' (1, 5) (1, 6) [all …]
|
D | test_univnewlines.py | 64 self.assertEqual(repr(fp.newlines), repr(self.NEWLINE)) 70 self.assertEqual(repr(fp.newlines), repr(self.NEWLINE)) 80 self.assertEqual(repr(fp.newlines), repr(self.NEWLINE)) 94 NEWLINE = '\r' variable in TestCRNewlines 100 NEWLINE = '\n' variable in TestLFNewlines 106 NEWLINE = '\r\n' variable in TestCRLFNewlines 114 self.assertEqual(repr(fp.newlines), repr(self.NEWLINE)) 119 NEWLINE = ('\r', '\n') variable in TestMixedNewlines
|
/third_party/mesa3d/src/compiler/glsl/glcpp/ |
D | glcpp-lex.l | 130 if (token != NEWLINE && token != SPACE && token != HASH_TOKEN && in glcpp_lex_update_state_per_token() 137 if (token == NEWLINE) { in glcpp_lex_update_state_per_token() 145 if (token == NEWLINE) { in glcpp_lex_update_state_per_token() 187 NEWLINE (\r\n|\n\r|\r|\n) 225 RETURN_TOKEN_NEVER_SKIP (NEWLINE); 273 <COMMENT>[^*\r\n]*{NEWLINE} { yylineno++; yycolumn = 0; parser->commented_newlines++; } 275 <COMMENT>"*"+[^*/\r\n]*{NEWLINE} { yylineno++; yycolumn = 0; parser->commented_newlines++; } 335 <HASH>{NEWLINE} { 340 RETURN_TOKEN_NEVER_SKIP (NEWLINE); 573 <*>{NEWLINE} { [all …]
|
D | glcpp-parse.y | 196 …OKEN GARBAGE IDENTIFIER IF_EXPANDED INTEGER INTEGER_STRING LINE_EXPANDED NEWLINE OTHER PLACEHOLDER… 236 IF_EXPANDED expression NEWLINE { 241 | ELIF_EXPANDED expression NEWLINE { 246 | LINE_EXPANDED integer_constant NEWLINE { 251 | LINE_EXPANDED integer_constant integer_constant NEWLINE { 260 | LINE_EXPANDED integer_constant PATH NEWLINE { 270 OBJ_IDENTIFIER replacement_list NEWLINE { 273 | FUNC_IDENTIFIER '(' ')' replacement_list NEWLINE { 276 | FUNC_IDENTIFIER '(' identifier_list ')' replacement_list NEWLINE { 286 | HASH_TOKEN LINE pp_tokens NEWLINE { [all …]
|
/third_party/python/Tools/peg_generator/pegen/ |
D | metagrammar.gram | 44 | "@" NAME NEWLINE { (name.string, None) } 45 | "@" a=NAME b=NAME NEWLINE { (a.string, b.string) } 46 | "@" NAME STRING NEWLINE { (name.string, literal_eval(string.string)) } 53 | rulename memoflag? ":" alts NEWLINE INDENT more_alts DEDENT { 55 | rulename memoflag? ":" NEWLINE INDENT more_alts DEDENT { 57 | rulename memoflag? ":" alts NEWLINE { Rule(rulename[0], rulename[1], alts, memo=opt) } 73 | "|" alts NEWLINE more_alts { Rhs(alts.alts + more_alts.alts) } 74 | "|" alts NEWLINE { Rhs(alts.alts) }
|
/third_party/python/Lib/test/test_peg_generator/ |
D | test_pegen.py | 7 from tokenize import TokenInfo, NAME, NEWLINE, NUMBER, OP 92 TokenInfo(NEWLINE, string="\n", start=(1, 2), end=(1, 3), line="42\n"), 100 TokenInfo(NEWLINE, string="\n", start=(1, 4), end=(1, 5), line="1, 2\n"), 113 TokenInfo(NEWLINE, string="\n", start=(1, 2), end=(1, 3), line="42\n"), 132 TokenInfo(NEWLINE, string="\n", start=(1, 3), end=(1, 4), line="1+2\n"), 137 TokenInfo(NEWLINE, string="\n", start=(1, 1), end=(1, 2), line="1\n"), 153 TokenInfo(NEWLINE, string="\n", start=(1, 2), end=(1, 3), line="1+\n"), 158 TokenInfo(NEWLINE, string="\n", start=(1, 1), end=(1, 2), line="1\n"), 177 TokenInfo(NEWLINE, string="\n", start=(1, 5), end=(1, 6), line="1 + 2\n"), 182 TokenInfo(NEWLINE, string="\n", start=(1, 1), end=(1, 2), line="1\n"), [all …]
|
/third_party/python/Grammar/ |
D | python.gram | 33 eval[mod_ty]: a=expressions NEWLINE* ENDMARKER { _PyAST_Expression(a, p->arena) } 34 func_type[mod_ty]: '(' a=[type_expressions] ')' '->' b=expression NEWLINE* ENDMARKER { _PyAST_Funct… 58 | a=compound_stmt NEWLINE { (asdl_stmt_seq*)_PyPegen_singleton_seq(p, a) } 60 | NEWLINE { (asdl_stmt_seq*)_PyPegen_singleton_seq(p, CHECK(stmt_ty, _PyAST_Pass(EXTRA))) } 63 …| a=simple_stmt !';' NEWLINE { (asdl_stmt_seq*)_PyPegen_singleton_seq(p, a) } # Not needed, there … 64 | a[asdl_stmt_seq*]=';'.simple_stmt+ [';'] NEWLINE { a } 134 | 'del' a=del_targets &(';' | NEWLINE) { _PyAST_Delete(a, EXTRA) } 224 … | "match" subject=subject_expr ':' NEWLINE INDENT cases[asdl_match_case_seq*]=case_block+ DEDENT { 418 | NEWLINE t=TYPE_COMMENT &(NEWLINE INDENT) { t } # Must be followed by indented block 482 decorators[asdl_expr_seq*]: a[asdl_expr_seq*]=('@' f=named_expression NEWLINE { f })+ { a } [all …]
|
/third_party/python/Lib/lib2to3/ |
D | Grammar.txt | 10 # NB: compound_stmt in single_input is followed by extra NEWLINE! 11 file_input: (NEWLINE | stmt)* ENDMARKER 12 single_input: NEWLINE | simple_stmt | compound_stmt NEWLINE 13 eval_input: testlist NEWLINE* ENDMARKER 15 decorator: '@' dotted_name [ '(' [arglist] ')' ] NEWLINE 75 simple_stmt: small_stmt (';' small_stmt)* [';'] NEWLINE 123 suite: simple_stmt | NEWLINE INDENT stmt+ DEDENT
|
/third_party/vk-gl-cts/framework/randomshaders/ |
D | rsgShader.cpp | 97 … Token::PRECISION << Token::MEDIUM_PRECISION << Token::FLOAT << Token::SEMICOLON << Token::NEWLINE; in tokenize() 106 str << Token::NEWLINE; in tokenize() 111 str << Token::NEWLINE; in tokenize() 144 str << Token::RIGHT_PAREN << Token::NEWLINE; in tokenize()
|
D | rsgStatement.cpp | 205 str << Token::LEFT_BRACE << Token::NEWLINE << Token::INDENT_INC; in tokenize() 210 str << Token::INDENT_DEC << Token::RIGHT_BRACE << Token::NEWLINE; in tokenize() 223 str << Token::SEMICOLON << Token::NEWLINE; in tokenize() 342 str << Token::SEMICOLON << Token::NEWLINE; in tokenize() 462 str << Token::RIGHT_PAREN << Token::NEWLINE; in tokenize() 484 str << Token::NEWLINE; in tokenize() 489 str << Token::NEWLINE << Token::INDENT_INC; in tokenize() 559 str << Token::SEMICOLON << Token::NEWLINE; in tokenize()
|
/third_party/python/Tools/scripts/ |
D | cleanfuture.py | 158 NEWLINE = tokenize.NEWLINE 168 while type in (COMMENT, NL, NEWLINE): 178 while type in (COMMENT, NL, NEWLINE): 210 if type is not NEWLINE:
|
/third_party/icu/ohos_icu4j/src/main/java/ohos/global/icu/util/ |
D | VTimeZone.java | 224 bw.write(NEWLINE); in write() 231 bw.write(NEWLINE); in write() 235 bw.write(NEWLINE); in write() 390 private static final String NEWLINE = "\r\n"; // CRLF field in VTimeZone 1224 w.write(NEWLINE); in writeZone() 1504 writer.write(NEWLINE); in writeZonePropsByTime() 1524 writer.write(NEWLINE); in writeZonePropsByDOM() 1545 writer.write(NEWLINE); in writeZonePropsByDOW() 1630 writer.write(NEWLINE); in writeZonePropsByDOW_GEQ_DOM_sub() 1782 writer.write(NEWLINE); in beginZoneProps() [all …]
|
/third_party/icu/icu4j/main/classes/core/src/com/ibm/icu/util/ |
D | VTimeZone.java | 243 bw.write(NEWLINE); in write() 250 bw.write(NEWLINE); in write() 254 bw.write(NEWLINE); in write() 419 private static final String NEWLINE = "\r\n"; // CRLF field in VTimeZone 1253 w.write(NEWLINE); in writeZone() 1533 writer.write(NEWLINE); in writeZonePropsByTime() 1553 writer.write(NEWLINE); in writeZonePropsByDOM() 1574 writer.write(NEWLINE); in writeZonePropsByDOW() 1659 writer.write(NEWLINE); in writeZonePropsByDOW_GEQ_DOM_sub() 1811 writer.write(NEWLINE); in beginZoneProps() [all …]
|
/third_party/ninja/src/ |
D | manifest_parser.cc | 85 case Lexer::NEWLINE: in Parse() 101 if (!ExpectToken(Lexer::NEWLINE, err)) in ParsePool() 138 if (!ExpectToken(Lexer::NEWLINE, err)) in ParseRule() 206 return ExpectToken(Lexer::NEWLINE, err); in ParseDefault() 303 if (!ExpectToken(Lexer::NEWLINE, err)) in ParseEdge() 440 if (!ExpectToken(Lexer::NEWLINE, err)) in ParseFileInclude()
|
D | lexer.in.cc | 84 case NEWLINE: return "newline"; in TokenName() 159 if (token != NEWLINE && token != TEOF) in ReadToken()
|
/third_party/python/Doc/reference/ |
D | toplevel_components.rst | 70 file_input: (NEWLINE | `statement`)* 89 interactive_input: [`stmt_list`] NEWLINE | `compound_stmt` NEWLINE 107 eval_input: `expression_list` NEWLINE*
|
/third_party/rust/crates/regex/regex-syntax/src/unicode_tables/ |
D | word_break.rs | 22 ("Newline", NEWLINE), 1036 pub const NEWLINE: &'static [(char, char)] = constant
|
/third_party/python/Include/ |
D | token.h | 17 #define NEWLINE 4 macro 83 (x) == NEWLINE || \
|
/third_party/python/Lib/ |
D | tokenize.py | 206 elif tok_type in (NEWLINE, NL): 217 if tok_type in (NEWLINE, NL): 225 startline = token[0] in (NEWLINE, NL) 251 elif toknum in (NEWLINE, NL): 542 yield TokenInfo(NEWLINE, token, spos, epos, line) 608 yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
|
D | tabnanny.py | 280 NEWLINE = tokenize.NEWLINE 286 if type == NEWLINE:
|
/third_party/python/Lib/lib2to3/fixes/ |
D | fix_metaclass.py | 96 if node.children and node.children[-1].type == token.NEWLINE: 220 node.append_child(Leaf(token.NEWLINE, '\n')) 228 suite.insert_child(-1, Leaf(token.NEWLINE, '\n'))
|
/third_party/python/Doc/library/ |
D | tokenize.rst | 241 1,16-1,17: NEWLINE '\n' 247 2,26-2,27: NEWLINE '\n' 253 4,11-4,12: NEWLINE '\n' 267 1,16-1,17: NEWLINE '\n' 273 2,26-2,27: NEWLINE '\n' 279 4,11-4,12: NEWLINE '\n'
|
/third_party/node/deps/npm/node_modules/json-parse-even-better-errors/lib/ |
D | index.js | 4 const NEWLINE = Symbol.for('newline') constant 105 result[NEWLINE] = match[1] ?? DEFAULT_NEWLINE
|