/external/tensorflow/tensorflow/python/distribute/ |
D | strategy_gather_test.py | 15 """Tests for common methods in strategy classes.""" 43 strategy=[ 60 strategy=[ 71 strategy): argument 72 distributed_values = strategy.experimental_distribute_values_from_function( 76 return strategy.gather(distributed_values, axis=axis) 82 value_on_replica for _ in range(strategy.num_replicas_in_sync) 87 def testGatherPerReplicaDense1D0Axis(self, strategy, pure_eager): argument 91 self._gather_same_shape_and_verify(single_value, axis, pure_eager, strategy) 93 def testGatherPerReplicaDense2D0Axis(self, strategy, pure_eager): argument [all …]
|
D | strategy_common_test.py | 15 """Tests for common methods in strategy classes.""" 42 strategy=[ 49 def testCaptureReplicaId(self, strategy): argument 61 return strategy.run(f) 65 def testMergeCallInitScope(self, strategy): argument 66 with strategy.scope(): 83 return strategy.run(replica_fn) 85 result = strategy.experimental_local_results(fn()) 86 self.assertAllClose(result, [12] * _get_num_replicas_per_client(strategy)) 217 strategy=[ [all …]
|
D | strategy_combinations_test.py | 40 strategy=strategy_combinations.two_replica_strategies, 42 def testTwoReplicaStrategy(self, strategy): argument 43 with strategy.scope(): 49 one_per_replica = strategy.run(one) 50 num_replicas = strategy.reduce( 56 strategy=strategy_combinations.four_replica_strategies, 58 def testFourReplicaStrategy(self, strategy): argument 59 with strategy.scope(): 65 one_per_replica = strategy.run(one) 66 num_replicas = strategy.reduce( [all …]
|
D | tpu_strategy_test.py | 85 strategy = tpu_lib.TPUStrategyV2(resolver) 86 strategy._enable_packed_variable_in_eager_mode = enable_packed_var 87 return strategy 259 strategy = get_tpu_strategy(enable_packed_var) 260 with strategy.scope(): 273 strategy = get_tpu_strategy(enable_packed_var) 275 with strategy.scope(): 300 strategy.experimental_distribute_datasets_from_function(dataset_fn)) 310 strategy.run(step_fn, args=(next(iterator),)) 322 strategy = get_tpu_strategy(enable_packed_var) [all …]
|
D | distribution_strategy_context.py | 15 """Utility to get tf.distribute.Strategy related contexts.""" 34 # replica or cross-replica context for a particular tf.distribute.Strategy. 40 self.strategy = dist 47 def __init__(self, strategy): argument 48 _ThreadMode.__init__(self, strategy, strategy, None) 54 _ThreadMode.__init__(self, replica_ctx.strategy, None, replica_ctx) 105 strategy = tf.distribute.MirroredStrategy(devices=["GPU:0", "GPU:1"]) 106 with strategy.scope(): 113 non_aggregated = strategy.run(replica_fn) 120 aggregated = strategy.run(replica_fn) [all …]
|
D | strategy_test_lib.py | 141 def is_mirrored_strategy(strategy: distribute_lib.Strategy) -> bool: argument 143 strategy, 148 strategy: distribute_lib.Strategy) -> bool: argument 149 return isinstance(strategy, (mwms_lib.CollectiveAllReduceStrategy, 153 def is_tpu_strategy(strategy: distribute_lib.Strategy) -> bool: argument 154 return isinstance(strategy, 358 self, strategy, input_fn, expected_values, ignore_order=False): argument 361 iterable = strategy.distribute_datasets_from_function(input_fn) 367 list(strategy.experimental_local_results(next(iterator)))) 371 self.evaluate(strategy.experimental_local_results(next(iterator))) [all …]
|
D | tpu_strategy_model_parallelism_test.py | 68 strategy = tpu_lib.TPUStrategyV2( 72 return strategy, num_replicas 82 strategy, num_replicas = get_tpu_strategy() 83 with strategy.scope(): 85 with strategy.extended.experimental_logical_device(1): 88 self.assertLen(strategy.experimental_local_results(v), num_replicas) 89 self.assertLen(strategy.experimental_local_results(w), num_replicas) 91 strategy.experimental_local_results(v)[0].device) 93 strategy.experimental_local_results(w)[0].device) 107 result = strategy.run(f, args=(5.,)) [all …]
|
/external/tensorflow/tensorflow/python/keras/distribute/ |
D | distributed_file_utils.py | 53 def _get_base_dirpath(strategy): argument 54 task_id = strategy.extended._task_id # pylint: disable=protected-access 58 def _is_temp_dir(dirpath, strategy): argument 59 return dirpath.endswith(_get_base_dirpath(strategy)) 62 def _get_temp_dir(dirpath, strategy): argument 63 if _is_temp_dir(dirpath, strategy): 66 temp_dir = os.path.join(dirpath, _get_base_dirpath(strategy)) 71 def write_dirpath(dirpath, strategy): argument 78 strategy: The tf.distribute strategy object currently used. 83 if strategy is None: [all …]
|
D | dataset_creator_model_fit_test.py | 40 strategy=strategy_combinations.all_strategies + 52 def testModelFit(self, strategy): argument 53 model = self._model_fit(strategy) 56 def testModelFitwithStepsPerEpochNegativeOne(self, strategy): argument 64 if strategy._should_use_with_coordinator: 67 strategy, 74 strategy, 80 def testModelFitWithNumpyData(self, strategy): argument 84 strategy, 92 def testModelFitWithTensorData(self, strategy): argument [all …]
|
D | dataset_creator_model_fit_ps_only_test.py | 31 strategy=strategy_combinations.parameter_server_strategies_multi_worker, 36 def testModelFitWithRunEagerly(self, strategy): argument 40 self._model_fit(strategy, run_eagerly=True) 42 def testModelFitWithDatasetInstance(self, strategy): argument 49 strategy, x=dataset_ops.DatasetV2.from_tensor_slices([1, 1])) 51 def testModelPredict(self, strategy): argument 52 model, _ = self._model_compile(strategy) 57 def testClusterCoordinatorSingleInstance(self, strategy): argument 58 model = self._model_fit(strategy) 59 strategy = model.distribute_strategy [all …]
|
/external/tensorflow/tensorflow/python/tpu/ |
D | tpu_outside_compilation_test.py | 133 strategy = get_tpu_strategy() 146 return strategy.run(tpu_fn, args=(25.0,)) 149 strategy.experimental_local_results(train_step()), 150 constant_op.constant(35., shape=(strategy.num_replicas_in_sync))) 153 strategy = get_tpu_strategy() 166 return strategy.run(tpu_fn, args=(25.0,)) 169 strategy.experimental_local_results(train_step()), 170 constant_op.constant(35., shape=(strategy.num_replicas_in_sync))) 173 strategy = get_tpu_strategy() 187 return strategy.run(tpu_fn, args=(25.0,)) [all …]
|
/external/jacoco/org.jacoco.core.test/src/org/jacoco/core/internal/instr/ |
D | ProbeArrayStrategyFactoryTest.java | 52 final IProbeArrayStrategy strategy = test(Opcodes.V1_1, 0, false, true, in testClass1() local 54 assertEquals(ClassFieldProbeArrayStrategy.class, strategy.getClass()); in testClass1() 61 final IProbeArrayStrategy strategy = test(Opcodes.V1_2, 0, false, true, in testClass2() local 63 assertEquals(ClassFieldProbeArrayStrategy.class, strategy.getClass()); in testClass2() 70 final IProbeArrayStrategy strategy = test(Opcodes.V1_3, 0, false, true, in testClass3() local 72 assertEquals(ClassFieldProbeArrayStrategy.class, strategy.getClass()); in testClass3() 79 final IProbeArrayStrategy strategy = test(Opcodes.V1_4, 0, false, true, in testClass4() local 81 assertEquals(ClassFieldProbeArrayStrategy.class, strategy.getClass()); in testClass4() 88 final IProbeArrayStrategy strategy = test(Opcodes.V1_5, 0, false, true, in testClass5() local 90 assertEquals(ClassFieldProbeArrayStrategy.class, strategy.getClass()); in testClass5() [all …]
|
/external/tensorflow/tensorflow/python/distribute/integration_test/ |
D | saved_model_test.py | 55 strategy=[ 69 def test_read_sync_on_read_variable(self, strategy): argument 90 with strategy.scope(): 100 strategy=[ 109 # tf.distribute.Strategy and used for serving later. Serving usually only uses 110 # one device and this is simulated by loading the model under no strategy 115 # tf.distribute.Strategy. The saved tf.function should be an inference 128 def test_read_sync_on_read_variable(self, strategy): argument 145 with strategy.scope(): 151 self.evaluate(strategy.experimental_local_results(m.v)), [0.5, 0.5]) [all …]
|
/external/ComputeLibrary/src/core/NEON/kernels/arm_gemm/ |
D | gemm_interleaved.hpp | 65 template<typename strategy, typename To, typename Tr, typename Tri, typename Tab> 70 strategy &strat, const To *a_ptr, const To *b_panel, size_t b_stride, Tri *c_panel, 79 template<typename strategy, typename To, typename Tr, typename Tri, typename Tab> 84 strategy &strat, const To *a_ptr, const To *b_panel, size_t, Tri *c_panel, in run() 89 const int bblocks = iceildiv(n_max - n_0, strategy::out_width()); in run() 93 …auto p=prof.ScopedProfiler(PROFILE_KERNEL, (strategy::out_height() * bblocks * strategy::out_width… in run() 101 …auto p=prof.ScopedProfiler(PROFILE_MERGE, (strategy::out_height() * bblocks * strategy::out_width(… in run() 109 template<typename strategy, typename To, typename Tr, typename Tri, typename Tab> 114 strategy &strat, const To *a_ptr, const To *b_panel, size_t b_stride, Tri *c_panel, in run() 121 const int bblocks = iceildiv(n_max - n_0, strategy::out_width()); in run() [all …]
|
D | gemm_hybrid_indirect.hpp | 61 template<typename strategy, typename Tlo, typename Tro, typename Tr> 66 …const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<… 72 template<typename strategy, typename Tlo, typename Tro, typename Tr> 77 …const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<… in run() 81 …auto p = prof.ScopedProfiler(PROFILE_KERNEL, (unsigned long)M * kern_k * roundup(N, strategy::out_… in run() 87 if (bias_ptr && !accumulate && (N % strategy::out_width() != 0)) { in run() 89 unsigned int N_remainder = N % strategy::out_width(); in run() 107 Tr *bias_pad_buffer = reinterpret_cast<Tr *>(alloca(strategy::out_width() * sizeof(Tr))); in run() 118 template<typename strategy, typename Tlo, typename Tro, typename Tr> 123 …const strategy &strat, unsigned int num_strings, const unsigned int *string_ptr, IndirectInputArg<… in run() [all …]
|
D | gemm_interleaved_pretransposed_2d.hpp | 51 template<typename strategy, typename To, typename Tr> 53 typedef typename strategy::operand_type Toi; 54 typedef typename strategy::result_type Tri; 89 const GemmInterleavedPretransposed2d<strategy, To, Tr> &_parent; 100 blockwalker(const GemmInterleavedPretransposed2d<strategy, To, Tr> &parent) in blockwalker() argument 105 …blockwalker(const GemmInterleavedPretransposed2d<strategy, To, Tr> &parent, unsigned int x0, unsig… in blockwalker() argument 169 return ROUND_UP(sizeof(Tri) * _x_block * strategy::out_height()); in get_c_working_size() 184 strategy strat(_ci); in execute_pretranspose() 187 const unsigned int window_per_batch = _Mround / strategy::out_height(); in execute_pretranspose() 192 unsigned int m_0 = (m_start - (batch_0 * window_per_batch)) * strategy::out_height(); in execute_pretranspose() [all …]
|
/external/tensorflow/tensorflow/python/tpu/tests/ |
D | tpu_embedding_v2_valid_input_test.py | 39 strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd') 45 dataset = self._create_sparse_dataset(strategy) 48 strategy.experimental_distribute_dataset( 61 return strategy.run(tpu_fn) 68 return strategy.run(tpu_fn) 71 embedding_and_set_gradients(data), strategy, 0) 72 second = self._get_replica_numpy(embedding_only(data), strategy, 0) 85 num_replicas = strategy.num_replicas_in_sync 94 strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd') 96 sparse = self._create_sparse_dataset(strategy) [all …]
|
D | tpu_embedding_v2_invalid_input_test.py | 53 strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd') 64 strategy.run(test_apply_1) 74 strategy.run(test_apply_2) 77 strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd') 79 dataset = self._create_dense_dataset(strategy, include_weights=True) 81 strategy.experimental_distribute_dataset( 93 return strategy.run(step) 100 strategy, mid_level_api, _ = self._create_strategy_and_mid_level('sgd') 102 sparse = self._create_sparse_dataset(strategy, include_weights=True) 103 ragged = self._create_ragged_dataset(strategy, include_weights=True) [all …]
|
/external/ComputeLibrary/examples/gemm_tuner/ |
D | GemmTuner.py | 40 # Gemm strategy 41 Strategy = Enum("Strategy", ["Native", "ReshapedOnlyRHS", "Reshaped"]) variable 61 # Gemm configuration for strategy Native 76 # Gemm configuration for strategy Reshaped Only RHS 102 # Gemm configuration for strategy Reshaped 196 strategy: Strategy 218 gemm_param, strategy, gemm_config, measurement = benchmark_result 220 self._strategies.add(strategy) 230 """ Get the best GEMMConfig set per GEMMParam per Strategy 233 Tuple[GEMMParam, Strategy], List[Tuple[GEMMConfig, Measurement]] [all …]
|
/external/gson/gson/src/test/java/com/google/gson/ |
D | ToNumberPolicyTest.java | 29 ToNumberStrategy strategy = ToNumberPolicy.DOUBLE; in testDouble() local 30 assertEquals(10.1, strategy.readNumber(fromString("10.1"))); in testDouble() 31 …assertEquals(3.141592653589793D, strategy.readNumber(fromString("3.141592653589793238462643383279"… in testDouble() 33 strategy.readNumber(fromString("1e400")); in testDouble() 39 strategy.readNumber(fromString("\"not-a-number\"")); in testDouble() 46 ToNumberStrategy strategy = ToNumberPolicy.LAZILY_PARSED_NUMBER; in testLazilyParsedNumber() local 47 assertEquals(new LazilyParsedNumber("10.1"), strategy.readNumber(fromString("10.1"))); in testLazilyParsedNumber() 48 …assertEquals(new LazilyParsedNumber("3.141592653589793238462643383279"), strategy.readNumber(fromS… in testLazilyParsedNumber() 49 assertEquals(new LazilyParsedNumber("1e400"), strategy.readNumber(fromString("1e400"))); in testLazilyParsedNumber() 53 ToNumberStrategy strategy = ToNumberPolicy.LONG_OR_DOUBLE; in testLongOrDouble() local [all …]
|
/external/objenesis/main/src/main/java/org/objenesis/ |
D | ObjenesisBase.java | 19 import org.objenesis.strategy.InstantiatorStrategy; 24 * Base class to extend if you want to have a class providing your own default strategy. Can also be 31 /** Strategy used by this Objenesi implementation to create classes */ 32 protected final InstantiatorStrategy strategy; field in ObjenesisBase 34 /** Strategy cache. Key = Class, Value = InstantiatorStrategy */ 38 * Constructor allowing to pick a strategy and using cache 40 * @param strategy Strategy to use 42 public ObjenesisBase(InstantiatorStrategy strategy) { in ObjenesisBase() argument 43 this(strategy, true); in ObjenesisBase() 47 * Flexible constructor allowing to pick the strategy and if caching should be used [all …]
|
/external/squashfs-tools/squashfs-tools/ |
D | gzip_wrapper.c | 33 static struct strategy strategy[] = { variable 112 for(i = 0; strategy[i].name; i++) { in gzip_options() 113 int n = strlen(strategy[i].name); in gzip_options() 114 if((strncmp(name, strategy[i].name, n) == 0) && in gzip_options() 117 if(strategy[i].selected == 0) { in gzip_options() 118 strategy[i].selected = 1; in gzip_options() 125 if(strategy[i].name == NULL) { in gzip_options() 127 "strategy\n"); in gzip_options() 152 if(strategy_count == 1 && strategy[0].selected) { in gzip_options_post() 154 strategy[0].selected = 0; in gzip_options_post() [all …]
|
/external/python/google-api-python-client/docs/dyn/ |
D | dfareporting_v3_5.placementStrategies.html | 82 <p class="firstline">Deletes an existing placement strategy.</p> 85 <p class="firstline">Gets one placement strategy by ID.</p> 88 <p class="firstline">Inserts a new placement strategy.</p> 97 <p class="firstline">Updates an existing placement strategy. This method supports patch semantics.<… 100 <p class="firstline">Updates an existing placement strategy.</p> 109 <pre>Deletes an existing placement strategy. 113 id: string, Placement strategy ID. (required) 123 <pre>Gets one placement strategy by ID. 127 id: string, Placement strategy ID. (required) 136 { # Contains properties of a placement strategy. [all …]
|
D | dfareporting_v3_4.placementStrategies.html | 82 <p class="firstline">Deletes an existing placement strategy.</p> 85 <p class="firstline">Gets one placement strategy by ID.</p> 88 <p class="firstline">Inserts a new placement strategy.</p> 97 <p class="firstline">Updates an existing placement strategy. This method supports patch semantics.<… 100 <p class="firstline">Updates an existing placement strategy.</p> 109 <pre>Deletes an existing placement strategy. 113 id: string, Placement strategy ID. (required) 123 <pre>Gets one placement strategy by ID. 127 id: string, Placement strategy ID. (required) 136 { # Contains properties of a placement strategy. [all …]
|
D | dfareporting_v3_3.placementStrategies.html | 82 <p class="firstline">Deletes an existing placement strategy.</p> 85 <p class="firstline">Gets one placement strategy by ID.</p> 88 <p class="firstline">Inserts a new placement strategy.</p> 97 <p class="firstline">Updates an existing placement strategy. This method supports patch semantics.<… 100 <p class="firstline">Updates an existing placement strategy.</p> 109 <pre>Deletes an existing placement strategy. 113 id: string, Placement strategy ID. (required) 123 <pre>Gets one placement strategy by ID. 127 id: string, Placement strategy ID. (required) 136 { # Contains properties of a placement strategy. [all …]
|