/external/tensorflow/tensorflow/python/distribute/ |
D | strategy_common_test.py | 15 """Tests for common methods in strategy classes.""" 46 strategy=[ 53 def testCaptureReplicaId(self, strategy): argument 65 return strategy.run(f) 72 strategy=[ 79 def testBasic(self, strategy): argument 80 per_replica_value = strategy.experimental_distribute_values_from_function( 85 return strategy.reduce( 89 # Run reduce under the strategy scope to explicitly enter 90 # strategy default_device scope. [all …]
|
D | strategy_gather_test.py | 15 """Tests for common methods in strategy classes.""" 45 strategy=[ 62 strategy=[ 73 strategy): argument 74 distributed_values = strategy.experimental_distribute_values_from_function( 78 return strategy.gather(distributed_values, axis=axis) 84 value_on_replica for _ in range(strategy.num_replicas_in_sync) 89 def testGatherPerReplicaDense1D0Axis(self, strategy, pure_eager): argument 93 self._gather_same_shape_and_verify(single_value, axis, pure_eager, strategy) 95 def testGatherPerReplicaDense2D0Axis(self, strategy, pure_eager): argument [all …]
|
D | tpu_strategy_test.py | 84 strategy = tpu_lib.TPUStrategyV2(resolver) 85 strategy._enable_packed_variable_in_eager_mode = enable_packed_var 86 return strategy 172 strategy = get_tpu_strategy(enable_packed_var) 173 with strategy.scope(): 185 strategy = get_tpu_strategy(enable_packed_var) 186 with strategy.scope(): 203 strategy = tpu_lib.TPUStrategyV2( 205 strategy._enable_packed_variable_in_eager_mode = enable_packed_var 218 outputs = strategy.experimental_local_results( [all …]
|
D | strategy_combinations_test.py | 43 strategy=strategy_combinations.two_replica_strategies, 45 def testTwoReplicaStrategy(self, strategy): argument 46 with strategy.scope(): 52 one_per_replica = strategy.run(one) 53 num_replicas = strategy.reduce( 59 strategy=strategy_combinations.four_replica_strategies, 61 def testFourReplicaStrategy(self, strategy): argument 62 with strategy.scope(): 68 one_per_replica = strategy.run(one) 69 num_replicas = strategy.reduce( [all …]
|
D | distribution_strategy_context.py | 15 """Utility to get tf.distribute.Strategy related contexts.""" 38 # replica or cross-replica context for a particular tf.distribute.Strategy. 44 self.strategy = dist 51 def __init__(self, strategy): argument 52 _ThreadMode.__init__(self, strategy, strategy, None) 58 _ThreadMode.__init__(self, replica_ctx.strategy, None, replica_ctx) 102 `None`) when entering a `with tf.distribute.Strategy.scope():` block; 103 3. switches to a (non-default) replica context inside `strategy.run(fn, ...)`; 108 Most `tf.distribute.Strategy` methods may only be executed in 115 with strategy.scope(): [all …]
|
D | strategy_test_lib.py | 341 self, strategy, input_fn, expected_values, ignore_order=False): argument 344 iterable = strategy.distribute_datasets_from_function(input_fn) 350 list(strategy.experimental_local_results(next(iterator)))) 354 self.evaluate(strategy.experimental_local_results(next(iterator))) 361 list(strategy.experimental_local_results(next(iterator)))) 365 self._test_input_fn_iterator(iterator, strategy.extended.worker_devices, 410 def _test_global_step_update(self, strategy): argument 411 with strategy.scope(): 426 train_ops, value = strategy.extended.call_for_each_replica(model_fn) 427 self.evaluate(strategy.group(train_ops)) [all …]
|
D | distribute_lib.py | 19 and it will be usable with a variety of different `tf.distribute.Strategy` 20 implementations. Each descendant will implement a different strategy for 24 model definition code can run unchanged. The `tf.distribute.Strategy` API works 36 The tutorials cover how to use `tf.distribute.Strategy` to do distributed 39 `tf.distribute.Strategy`. 94 when you execute the computation function that was called with `strategy.run`. 101 An _cross-replica context_ is entered when you enter a `strategy.scope`. This 102 is useful for calling `tf.distribute.Strategy` methods which operate across 116 `tf.distribute.Strategy.experimental_distribute_dataset` and 117 `tf.distribute.Strategy.distribute_datasets_from_function`. They [all …]
|
D | combinations.py | 15 """This module customizes `test_combinations` for `tf.distribute.Strategy`. 18 `tf.distribute.Strategy` customizations as a default. 52 # TODO(rchao): Rename `distribution` parameter to `strategy` or 58 `strategy` property. 63 # on the strategy object. This is a temporary flag for testing the variable 69 strategy = v.strategy 71 strategy.extended._use_var_policy = use_var_policy 72 distribution_arguments[k] = strategy 83 strategy = None 86 if strategy is not None and _num_total_workers(v.has_chief, [all …]
|
/external/tensorflow/tensorflow/python/keras/distribute/ |
D | distributed_file_utils.py | 56 def _get_base_dirpath(strategy): argument 57 task_id = strategy.extended._task_id # pylint: disable=protected-access 61 def _is_temp_dir(dirpath, strategy): argument 62 return dirpath.endswith(_get_base_dirpath(strategy)) 65 def _get_temp_dir(dirpath, strategy): argument 66 if _is_temp_dir(dirpath, strategy): 69 temp_dir = os.path.join(dirpath, _get_base_dirpath(strategy)) 74 def write_dirpath(dirpath, strategy): argument 81 strategy: The tf.distribute strategy object currently used. 86 if strategy is None: [all …]
|
D | distributed_file_utils_test.py | 60 strategy = DistributedFileUtilsTest.MockedChiefStrategy() 62 distributed_file_utils.write_filepath(filepath, strategy), filepath) 64 distributed_file_utils.write_dirpath(dirpath, strategy), dirpath) 69 strategy = DistributedFileUtilsTest.MockedWorkerStrategy() 71 distributed_file_utils.write_filepath(filepath, strategy), 74 distributed_file_utils.write_dirpath(dirpath, strategy), 79 strategy = DistributedFileUtilsTest.MockedChiefStrategy() 80 dir_to_write = distributed_file_utils.write_dirpath(temp_dir, strategy) 86 file_to_write, strategy) 91 strategy = DistributedFileUtilsTest.MockedWorkerStrategy() [all …]
|
/external/tensorflow/tensorflow/python/tpu/ |
D | tpu_outside_compilation_test.py | 135 strategy = get_tpu_strategy() 148 return strategy.run(tpu_fn, args=(25.0,)) 151 strategy.experimental_local_results(train_step()), 152 constant_op.constant(35., shape=(strategy.num_replicas_in_sync))) 155 strategy = get_tpu_strategy() 168 return strategy.run(tpu_fn, args=(25.0,)) 171 strategy.experimental_local_results(train_step()), 172 constant_op.constant(35., shape=(strategy.num_replicas_in_sync))) 175 strategy = get_tpu_strategy() 189 return strategy.run(tpu_fn, args=(25.0,)) [all …]
|
D | tpu_embedding_v2_test.py | 78 self.strategy = tpu_strategy.TPUStrategy(self.resolver) 79 self.num_rows = self.strategy.num_replicas_in_sync 83 with self.strategy.scope(): 190 with self.strategy.scope(): 192 self.strategy.num_replicas_in_sync * 2) 200 with self.strategy.scope(): 202 self.strategy.num_replicas_in_sync * 2) 207 with self.strategy.scope(): 213 self.assertAllClose(np.ones((self.strategy.num_replicas_in_sync * 2, 4)), 219 self.assertAllClose(np.ones((self.strategy.num_replicas_in_sync * 2, 4)), [all …]
|
/external/python/google-api-python-client/docs/dyn/ |
D | dfareporting_v3_3.placementStrategies.html | 79 <p class="firstline">Deletes an existing placement strategy.</p> 82 <p class="firstline">Gets one placement strategy by ID.</p> 85 <p class="firstline">Inserts a new placement strategy.</p> 94 <p class="firstline">Updates an existing placement strategy. This method supports patch semantics.<… 97 <p class="firstline">Updates an existing placement strategy.</p> 101 <pre>Deletes an existing placement strategy. 105 id: string, Placement strategy ID. (required) 111 <pre>Gets one placement strategy by ID. 115 id: string, Placement strategy ID. (required) 120 { # Contains properties of a placement strategy. [all …]
|
D | dfareporting_v3_1.placementStrategies.html | 79 <p class="firstline">Deletes an existing placement strategy.</p> 82 <p class="firstline">Gets one placement strategy by ID.</p> 85 <p class="firstline">Inserts a new placement strategy.</p> 94 <p class="firstline">Updates an existing placement strategy. This method supports patch semantics.<… 97 <p class="firstline">Updates an existing placement strategy.</p> 101 <pre>Deletes an existing placement strategy. 105 id: string, Placement strategy ID. (required) 111 <pre>Gets one placement strategy by ID. 115 id: string, Placement strategy ID. (required) 120 { # Contains properties of a placement strategy. [all …]
|
D | dfareporting_v2_8.placementStrategies.html | 79 <p class="firstline">Deletes an existing placement strategy.</p> 82 <p class="firstline">Gets one placement strategy by ID.</p> 85 <p class="firstline">Inserts a new placement strategy.</p> 94 <p class="firstline">Updates an existing placement strategy. This method supports patch semantics.<… 97 <p class="firstline">Updates an existing placement strategy.</p> 101 <pre>Deletes an existing placement strategy. 105 id: string, Placement strategy ID. (required) 111 <pre>Gets one placement strategy by ID. 115 id: string, Placement strategy ID. (required) 120 { # Contains properties of a placement strategy. [all …]
|
D | dfareporting_v2_7.placementStrategies.html | 79 <p class="firstline">Deletes an existing placement strategy.</p> 82 <p class="firstline">Gets one placement strategy by ID.</p> 85 <p class="firstline">Inserts a new placement strategy.</p> 94 <p class="firstline">Updates an existing placement strategy. This method supports patch semantics.<… 97 <p class="firstline">Updates an existing placement strategy.</p> 101 <pre>Deletes an existing placement strategy. 105 id: string, Placement strategy ID. (required) 111 <pre>Gets one placement strategy by ID. 115 id: string, Placement strategy ID. (required) 120 { # Contains properties of a placement strategy. [all …]
|
D | dfareporting_v3_2.placementStrategies.html | 79 <p class="firstline">Deletes an existing placement strategy.</p> 82 <p class="firstline">Gets one placement strategy by ID.</p> 85 <p class="firstline">Inserts a new placement strategy.</p> 94 <p class="firstline">Updates an existing placement strategy. This method supports patch semantics.<… 97 <p class="firstline">Updates an existing placement strategy.</p> 101 <pre>Deletes an existing placement strategy. 105 id: string, Placement strategy ID. (required) 111 <pre>Gets one placement strategy by ID. 115 id: string, Placement strategy ID. (required) 120 { # Contains properties of a placement strategy. [all …]
|
/external/jacoco/org.jacoco.core.test/src/org/jacoco/core/internal/instr/ |
D | ProbeArrayStrategyFactoryTest.java | 51 final IProbeArrayStrategy strategy = test(Opcodes.V1_1, 0, false, true, in testClass1() local 53 assertEquals(ClassFieldProbeArrayStrategy.class, strategy.getClass()); in testClass1() 60 final IProbeArrayStrategy strategy = test(Opcodes.V1_2, 0, false, true, in testClass2() local 62 assertEquals(ClassFieldProbeArrayStrategy.class, strategy.getClass()); in testClass2() 69 final IProbeArrayStrategy strategy = test(Opcodes.V1_3, 0, false, true, in testClass3() local 71 assertEquals(ClassFieldProbeArrayStrategy.class, strategy.getClass()); in testClass3() 78 final IProbeArrayStrategy strategy = test(Opcodes.V1_4, 0, false, true, in testClass4() local 80 assertEquals(ClassFieldProbeArrayStrategy.class, strategy.getClass()); in testClass4() 87 final IProbeArrayStrategy strategy = test(Opcodes.V1_5, 0, false, true, in testClass5() local 89 assertEquals(ClassFieldProbeArrayStrategy.class, strategy.getClass()); in testClass5() [all …]
|
/external/tensorflow/tensorflow/python/distribute/integration_test/ |
D | saved_model_test.py | 51 strategy=[ 59 # tf.distribute.Strategy and used for serving later. Serving usually only uses 60 # one device and this is simulated by loading the model under no strategy 65 # tf.distribute.Strategy. The saved tf.function should be an inference 78 def test_read_sync_on_read_variable(self, strategy): argument 95 with strategy.scope(): 101 self.evaluate(strategy.experimental_local_results(m.v)), [0.5, 0.5]) 109 def test_read_mirrored_variable(self, strategy): argument 111 # tf.distribute.Strategy.scope(). Most model parameters are this kind of 126 with strategy.scope(): [all …]
|
/external/objenesis/main/src/main/java/org/objenesis/ |
D | ObjenesisBase.java | 19 import org.objenesis.strategy.InstantiatorStrategy; 24 * Base class to extend if you want to have a class providing your own default strategy. Can also be 31 /** Strategy used by this Objenesi implementation to create classes */ 32 protected final InstantiatorStrategy strategy; field in ObjenesisBase 34 /** Strategy cache. Key = Class, Value = InstantiatorStrategy */ 38 * Constructor allowing to pick a strategy and using cache 40 * @param strategy Strategy to use 42 public ObjenesisBase(InstantiatorStrategy strategy) { in ObjenesisBase() argument 43 this(strategy, true); in ObjenesisBase() 47 * Flexible constructor allowing to pick the strategy and if caching should be used [all …]
|
/external/squashfs-tools/squashfs-tools/ |
D | gzip_wrapper.c | 33 static struct strategy strategy[] = { variable 112 for(i = 0; strategy[i].name; i++) { in gzip_options() 113 int n = strlen(strategy[i].name); in gzip_options() 114 if((strncmp(name, strategy[i].name, n) == 0) && in gzip_options() 117 if(strategy[i].selected == 0) { in gzip_options() 118 strategy[i].selected = 1; in gzip_options() 125 if(strategy[i].name == NULL) { in gzip_options() 127 "strategy\n"); in gzip_options() 152 if(strategy_count == 1 && strategy[0].selected) { in gzip_options_post() 154 strategy[0].selected = 0; in gzip_options_post() [all …]
|
/external/tensorflow/tensorflow/python/training/experimental/ |
D | loss_scaling_gradient_tape_test.py | 39 # If called outside any strategy.scope() calls, this will return the default 40 # strategy. 53 def _run_with_strategy(self, run_fn, strategy, use_tf_function=False): argument 54 """Runs `run_fn` under the DistributionStrategy `strategy`. 56 Runs `run_fn` with `strategy.run`. Returns a list of the 61 strategy: The DistributionStrategy to run `run_fn` with. 69 strategy_fn = lambda: strategy.run(run_fn) 77 return strategy.experimental_local_results(tensor) 90 strategy = strategy_fn() 91 with strategy.scope(): [all …]
|
D | loss_scale_optimizer_test.py | 42 # If called outside any strategy.scope() calls, this will return the default 43 # strategy. 124 def _run_fn_with_grad_check(self, strategy, var, opt, expected_grad): argument 127 loss = lambda: grad_check_fn(var) / strategy.num_replicas_in_sync 133 with strategy_fn().scope() as strategy: 140 # / strategy.num_replicas_in_sync will not be exact, which could lead to 142 self.assertEqual(loss_scale % strategy.num_replicas_in_sync, 0) 144 strategy, var, opt, loss_scale / strategy.num_replicas_in_sync) 145 run_op = strategy.experimental_run(run_fn) 169 strategy = strategy_fn() [all …]
|
/external/perfetto/src/traced/probes/ftrace/ |
D | event_info_unittest.cc | 50 ASSERT_FALSE(field.strategy); in TEST() 67 ASSERT_FALSE(field.strategy); in TEST() 73 TranslationStrategy strategy = kUint32ToUint32; in TEST() local 75 &strategy)); in TEST() 76 ASSERT_EQ(strategy, kUint32ToUint32); in TEST() 78 &strategy)); in TEST() 79 ASSERT_EQ(strategy, kCStringToString); in TEST() 81 SetTranslationStrategy(kFtracePid32, ProtoSchemaType::kInt32, &strategy)); in TEST() 82 ASSERT_EQ(strategy, kPid32ToInt32); in TEST() 84 &strategy)); in TEST() [all …]
|
/external/glide/library/src/main/java/com/bumptech/glide/load/engine/bitmap_recycle/ |
D | LruBitmapPool.java | 24 private final LruPoolStrategy strategy; field in LruBitmapPool 36 LruBitmapPool(int maxSize, LruPoolStrategy strategy) { in LruBitmapPool() argument 39 this.strategy = strategy; in LruBitmapPool() 65 if (!bitmap.isMutable() || strategy.getSize(bitmap) > maxSize) { in put() 67 Log.v(TAG, "Reject bitmap from pool=" + strategy.logBitmap(bitmap) + " is mutable=" in put() 73 final int size = strategy.getSize(bitmap); in put() 74 strategy.put(bitmap); in put() 81 Log.v(TAG, "Put bitmap in pool=" + strategy.logBitmap(bitmap)); in put() 111 final Bitmap result = strategy.get(width, height, config != null ? config : DEFAULT_CONFIG); in getDirty() 114 Log.d(TAG, "Missing bitmap=" + strategy.logBitmap(width, height, config)); in getDirty() [all …]
|