| /third_party/python/Lib/distutils/command/ |
| D | install_lib.py | 23 # 1) no compilation at all (--no-compile --no-optimize) 24 # 2) compile .pyc only (--compile --no-optimize; default) 25 # 3) compile .pyc and "opt-1" .pyc (--compile --optimize) 26 # 4) compile "opt-1" .pyc only (--no-compile --optimize) 27 # 5) compile .pyc and "opt-2" .pyc (--compile --optimize-more) 28 # 6) compile "opt-2" .pyc only (--no-compile --optimize-more) 30 # The UI for this is two options, 'compile' and 'optimize'. 32 # generate .pyc files. 'optimize' is three-way (0, 1, or 2), and 42 ('optimize=', 'O', 57 self.optimize = None [all …]
|
| D | build_py.py | 23 ('optimize=', 'O', 39 self.optimize = 0 60 if not isinstance(self.optimize, int): 62 self.optimize = int(self.optimize) 63 assert 0 <= self.optimize <= 2 65 raise DistutilsOptionError("optimize must be 0, 1, or 2") 318 if self.optimize > 0: 320 filename, optimization=self.optimize)) 388 byte_compile(files, optimize=0, 390 if self.optimize > 0: [all …]
|
| /third_party/rust/rust/tests/ui/feature-gates/ |
| D | feature-gate-optimize_attribute.rs | 2 #![optimize(speed)] //~ ERROR the `#[optimize]` attribute is an experimental feature 4 #[optimize(size)] //~ ERROR the `#[optimize]` attribute is an experimental feature 7 #[optimize(size)] //~ ERROR the `#[optimize]` attribute is an experimental feature 10 #[optimize(speed)] //~ ERROR the `#[optimize]` attribute is an experimental feature 13 #[optimize(banana)] 14 //~^ ERROR the `#[optimize]` attribute is an experimental feature
|
| D | feature-gate-optimize_attribute.stderr | 1 error[E0658]: the `#[optimize]` attribute is an experimental feature 4 LL | #[optimize(size)] 10 error[E0658]: the `#[optimize]` attribute is an experimental feature 13 LL | #[optimize(speed)] 19 error[E0658]: the `#[optimize]` attribute is an experimental feature 22 LL | #[optimize(banana)] 28 error[E0658]: the `#[optimize]` attribute is an experimental feature 31 LL | #[optimize(size)] 37 error[E0658]: the `#[optimize]` attribute is an experimental feature 40 LL | #![optimize(speed)] [all …]
|
| /third_party/python/Lib/distutils/tests/ |
| D | test_install_lib.py | 25 self.assertEqual(cmd.optimize, 0) 27 # optimize must be 0, 1, or 2 28 cmd.optimize = 'foo' 30 cmd.optimize = '4' 33 cmd.optimize = '2' 35 self.assertEqual(cmd.optimize, 2) 43 cmd.compile = cmd.optimize = 1 50 optimization=cmd.optimize) 61 cmd.compile = cmd.optimize = 1 81 cmd.compile = cmd.optimize = 1 [all …]
|
| /third_party/rust/rust/compiler/rustc_error_codes/src/error_codes/ |
| D | E0722.md | 1 The `optimize` attribute was malformed. 8 #[optimize(something)] // error: invalid argument 12 The `#[optimize]` attribute should be used as follows: 14 - `#[optimize(size)]` -- instructs the optimization pipeline to generate code 17 - `#[optimize(speed)]` -- instructs the optimization pipeline to generate code 25 #[optimize(size)] 31 [RFC 2412]: https://rust-lang.github.io/rfcs/2412-optimize-attr.html
|
| /third_party/mindspore/mindspore-src/source/tests/st/scipy_st/ |
| D | test_optimize.py | 15 """st for scipy.optimize.""" 21 from scipy.optimize.linesearch import line_search_wolfe2 as osp_line_search 28 from mindspore.scipy.optimize.line_search import line_search as msp_line_search 29 from mindspore.scipy.optimize import linear_sum_assignment 80 ms_res = msp.optimize.minimize(func(mnp), x0_tensor, method='BFGS', 82 scipy_res = osp.optimize.minimize(func(onp), x0, method='BFGS') 101 ms_res = msp.optimize.minimize(func(mnp), x0_tensor, method='BFGS', 103 scipy_res = osp.optimize.minimize(func(onp), x0, method='BFGS') 122 ms_res = msp.optimize.minimize(func(mnp), x0_tensor, method='BFGS', 124 scipy_res = osp.optimize.minimize(func(onp), x0, method='BFGS') [all …]
|
| /third_party/python/Lib/ |
| D | compileall.py | 49 rx=None, quiet=0, legacy=False, optimize=-1, workers=1, argument 64 optimize: int or list of optimization levels or -1 for level of 107 optimize=optimize, 118 legacy, optimize, invalidation_mode, 126 legacy=False, optimize=-1, argument 140 optimize: int or list of optimization levels or -1 for level of 183 if isinstance(optimize, int): 184 optimize = [optimize] 188 optimize = sorted(set(optimize)) 190 if hardlink_dupes and len(optimize) < 2: [all …]
|
| /third_party/mindspore/mindspore-src/source/docs/api/api_python_en/ |
| D | mindspore.scipy.rst | 29 mindspore.scipy.optimize 32 .. automodule:: mindspore.scipy.optimize 39 mindspore.scipy.optimize.line_search 40 mindspore.scipy.optimize.linear_sum_assignment 41 mindspore.scipy.optimize.minimize
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/plugin/device/ascend/hal/hardware/ |
| D | ge_graph_optimization.cc | 82 MS_LOG(DEBUG) << "Status record: start optimize ge graph. graph id: " << graph->graph_id(); in OptimizeGEGraph() 88 MS_LOG(DEBUG) << "Status record: end optimize ge graph. graph id: " << graph->graph_id(); in OptimizeGEGraph() 104 MS_LOG(DEBUG) << "Status record: end optimize ge graph. graph id: " << graph->graph_id(); in OptimizeGEGraph() 114 MS_LOG(DEBUG) << "Status record: start optimize acl graph. graph id: " << graph->graph_id(); in OptimizeACLGraph() 120 MS_LOG(DEBUG) << "Status record: end optimize acl graph. graph id: " << graph->graph_id(); in OptimizeACLGraph() 128 MS_LOG(DEBUG) << "Status record: end optimize acl graph. graph id: " << graph->graph_id(); in OptimizeACLGraph() 139 …MS_LOG(DEBUG) << "Status record: start optimize acl graph after kernel select. graph id: " << grap… in OptimizeACLGraphAfterKernelSelect() 145 …MS_LOG(DEBUG) << "Status record: end optimize acl graph after kernel select. graph id: " << graph-… in OptimizeACLGraphAfterKernelSelect() 159 …MS_LOG(DEBUG) << "Status record: end optimize acl graph after kernel select. graph id: " << graph-… in OptimizeACLGraphAfterKernelSelect() 164 …MS_LOG(DEBUG) << "Status record: start optimize acl graph after inline. graph id: " << graph->grap… in OptimizeACLGraphAfterInline() [all …]
|
| /third_party/skia/third_party/externals/swiftshader/src/Vulkan/ |
| D | VkPipelineCache.cpp | 23 bool optimize) in SpirvBinaryKey() argument 26 , optimize(optimize) in SpirvBinaryKey() 43 if(optimize != other.optimize) in operator <() 45 return !optimize && other.optimize; in operator <()
|
| /third_party/python/Mac/PythonLauncher/ |
| D | FileSettings.m | 76 optimize = source->optimize; 186 optimize = [source optimize]; 202 [NSNumber numberWithBool: optimize], @"optimize", 228 value = [dict objectForKey: @"optimize"]; 229 if (value) optimize = [value boolValue]; 283 optimize?" -O":"", 300 - (BOOL) optimize { return optimize;}; method
|
| /third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/WebAssembly/ |
| D | WebAssemblyOptimizeReturned.cpp | 1 //===-- WebAssemblyOptimizeReturned.cpp - Optimize "returned" attributes --===// 10 /// Optimize calls with "returned" attributes for WebAssembly. 21 #define DEBUG_TYPE "wasm-optimize-returned" 27 return "WebAssembly Optimize Returned"; in getPassName() 51 "Optimize calls with \"returned\" attributes for WebAssembly", 73 LLVM_DEBUG(dbgs() << "********** Optimize returned Attributes **********\n" in runOnFunction()
|
| /third_party/musl/ |
| D | configure | 31 --enable-optimize=... optimize listed components for speed over size [auto] 137 optimize=auto 161 --enable-optimize) optimize=yes ;; 162 --enable-optimize=*) optimize=${arg#*=} ;; 163 --disable-optimize) optimize=no ;; 432 # Possibly add a -O option to CFLAGS and select modules to optimize with 433 # -O3 based on the status of --enable-optimize and provided CFLAGS. 436 case "x$optimize" in 439 printf "using provided CFLAGS\n" ;optimize=no 441 printf "using defaults\n" ; optimize=yes [all …]
|
| /third_party/mindspore/mindspore-src/source/docs/api/api_python/ |
| D | mindspore.scipy.rst | 29 mindspore.scipy.optimize 39 mindspore.scipy.optimize.line_search 40 mindspore.scipy.optimize.linear_sum_assignment 41 mindspore.scipy.optimize.minimize
|
| /third_party/python/Lib/test/ |
| D | test_property.py | 103 @unittest.skipIf(sys.flags.optimize >= 2, 109 @unittest.skipIf(sys.flags.optimize >= 2, 121 @unittest.skipIf(sys.flags.optimize >= 2, 154 @unittest.skipIf(sys.flags.optimize >= 2, 162 @unittest.skipIf(sys.flags.optimize >= 2, 186 @unittest.skipIf(sys.flags.optimize >= 2, 196 @unittest.skipIf(sys.flags.optimize >= 2, 257 @unittest.skipIf(sys.flags.optimize >= 2, 269 @unittest.skipIf(sys.flags.optimize >= 2, 302 @unittest.skipIf(sys.flags.optimize >= 2,
|
| /third_party/mindspore/mindspore-src/source/tests/ut/cpp/pre_activate/ascend/ir_fission/ |
| D | concat_fission_test.cc | 54 FuncGraphPtr new_graph = optimizer->Optimize(kg); in TEST_F() 63 auto kg_after2 = optimizer2->Optimize(kg_after); in TEST_F() 86 FuncGraphPtr new_graph = optimizer->Optimize(kg); in TEST_F() 95 auto kg_after2 = optimizer2->Optimize(kg_after); in TEST_F() 118 FuncGraphPtr new_graph = optimizer->Optimize(kg); in TEST_F() 127 auto kg_after2 = optimizer2->Optimize(kg_after); in TEST_F() 150 FuncGraphPtr new_graph = optimizer->Optimize(kg); in TEST_F() 159 auto kg_after2 = optimizer2->Optimize(kg_after); in TEST_F() 181 FuncGraphPtr new_graph = optimizer->Optimize(kg); in TEST_F()
|
| D | addn_fission_test.cc | 54 FuncGraphPtr new_graph = optimizer->Optimize(kg); in TEST_F() 63 auto kg_after2 = optimizer2->Optimize(kg_after); in TEST_F() 86 FuncGraphPtr new_graph = optimizer->Optimize(kg); in TEST_F() 95 auto kg_after2 = optimizer2->Optimize(kg_after); in TEST_F() 118 FuncGraphPtr new_graph = optimizer->Optimize(kg); in TEST_F() 127 auto kg_after2 = optimizer2->Optimize(kg_after); in TEST_F() 150 FuncGraphPtr new_graph = optimizer->Optimize(kg); in TEST_F() 159 auto kg_after2 = optimizer2->Optimize(kg_after); in TEST_F() 181 FuncGraphPtr new_graph = optimizer->Optimize(kg); in TEST_F()
|
| /third_party/mindspore/mindspore-src/source/tests/ut/cpp/pre_activate/ascend/ir_fusion/ |
| D | adam_apply_one_with_decay_rule_test.cc | 48 FuncGraphPtr new_graph = optimizer->Optimize(fg); in TEST_F() 69 FuncGraphPtr new_graph = optimizer->Optimize(fg); in TEST_F() 90 FuncGraphPtr new_graph = optimizer->Optimize(fg); in TEST_F() 111 FuncGraphPtr new_graph = optimizer->Optimize(fg); in TEST_F() 132 FuncGraphPtr new_graph = optimizer->Optimize(fg); in TEST_F() 153 FuncGraphPtr new_graph = optimizer->Optimize(fg); in TEST_F() 174 FuncGraphPtr new_graph = optimizer->Optimize(fg); in TEST_F() 195 FuncGraphPtr new_graph = optimizer->Optimize(fg); in TEST_F() 216 FuncGraphPtr new_graph = optimizer->Optimize(fg); in TEST_F() 237 FuncGraphPtr new_graph = optimizer->Optimize(fg); in TEST_F()
|
| /third_party/mindspore/mindspore-src/source/tests/ut/cpp/pre_activate/pass/ |
| D | common_subexpression_elimination_test.cc | 71 optimizer->Optimize(func_graph); in TEST_F() 80 optimizer->Optimize(func_graph); in TEST_F() 111 optimizer->Optimize(func_graph); in TEST_F() 119 optimizer->Optimize(func_graph); in TEST_F() 123 optimizer->Optimize(func_graph); in TEST_F() 161 optimizer->Optimize(func_graph); in TEST_F() 172 optimizer->Optimize(func_graph); in TEST_F() 183 optimizer->Optimize(func_graph); in TEST_F() 194 optimizer->Optimize(func_graph); in TEST_F()
|
| /third_party/toybox/ |
| D | configure | 17 # CFLAGS and OPTIMIZE are different so you can add extra CFLAGS without 22 [ -z "$OPTIMIZE" ] && OPTIMIZE="-Os -ffunction-sections -fdata-sections -fno-asynchronous-unwind-ta… 24 [ -z "$ASAN" ] || { CFLAGS="$CFLAGS -O1 -g -fno-omit-frame-pointer -fno-optimize-sibling-calls -fsa…
|
| /third_party/rust/rust/compiler/rustc_mir_transform/src/ |
| D | match_branches.rs | 75 // Only optimize switch int statements in run_pass() 79 // Check that destinations are identical, and if not, then don't optimize this block in run_pass() 85 // and match up 1-1, if not don't optimize this block. in run_pass() 93 // If two statements are exactly the same, we can optimize. in run_pass() 96 … // If two statements are const bool assignments to the same place, we can optimize. in run_pass() 106 // Otherwise we cannot optimize. Try another block. in run_pass() 110 // Take ownership of items now that we know we can optimize. in run_pass()
|
| /third_party/mesa3d/src/compiler/nir/ |
| D | nir_lower_scratch_to_var.c | 9 * It is challenging to optimize the complex deref chains resulting from 21 * original derefs (that we failed to optimize), but instead just to model array 22 * access that other NIR passes can optimize. The resulting array accesses will 23 * generally optimize out if there are no indirects, or can be lowered to bcsel 31 * challenging to optimize the resulting pack/unpack on some drivers. Larger
|
| /third_party/skia/third_party/externals/angle2/src/compiler/translator/ |
| D | Pragma.h | 20 TPragma() : optimize(true), debug(false) {} in TPragma() 21 TPragma(bool o, bool d) : optimize(o), debug(d) {} in TPragma() 23 bool optimize; member
|
| /third_party/mesa3d/src/gallium/drivers/r600/sfn/tests/ |
| D | sfn_optimizer_test.cpp | 81 optimize(*sh); in TEST_F() 123 optimize(*sh); in TEST_F() 159 optimize(*sh); in TEST_F() 198 optimize(*sh); in TEST_F() 239 optimize(*sh); in TEST_F() 336 optimize(*sh); in TEST_F() 378 optimize(*sh); in TEST_F() 427 optimize(*sh); in TEST_F() 446 optimize(*sh); in TEST_F() 454 optimize(*sh); in TEST_F() [all …]
|