/external/tensorflow/tensorflow/python/kernel_tests/ |
D | decode_jpeg_op_test.py | 45 parallelism, argument 87 for _ in xrange(parallelism): 122 for parallelism in [1, 100]: 123 duration_decode = self._evalDecodeJpeg('small.jpg', parallelism, 125 duration_decode_crop = self._evalDecodeJpeg('small.jpg', parallelism, 128 'small.jpg', parallelism, num_iters, True, crop_window) 130 name='decode_jpeg_small_p%d' % (parallelism), 134 name='decode_crop_jpeg_small_p%d' % (parallelism), 138 name='decode_after_crop_jpeg_small_p%d' % (parallelism), 146 for parallelism in [1, 100]: [all …]
|
D | record_input_test.py | 53 parallelism=1, 70 parallelism=1, 89 parallelism=1, 108 parallelism=2, 137 parallelism=1, 165 parallelism=2,
|
/external/kotlinx.coroutines/kotlinx-coroutines-core/jvm/src/scheduling/ |
D | Dispatcher.kt | 86 public fun blocking(parallelism: Int = BLOCKING_DEFAULT_PARALLELISM): CoroutineDispatcher { in toString() 87 require(parallelism > 0) { "Expected positive parallelism level, but have $parallelism" } in toString() 88 return LimitingDispatcher(this, parallelism, TaskMode.PROBABLY_BLOCKING) in toString() 98 public fun limited(parallelism: Int): CoroutineDispatcher { in toString() 99 require(parallelism > 0) { "Expected positive parallelism level, but have $parallelism" } in toString() 100 …require(parallelism <= corePoolSize) { "Expected parallelism level lesser than core pool size ($co… in toString() 101 return LimitingDispatcher(this, parallelism, TaskMode.NON_BLOCKING) in toString() 134 val parallelism: Int, constant in kotlinx.coroutines.scheduling.LimitingDispatcher 157 if (inFlight <= parallelism) { in execute() 180 if (inFlightTasks.decrementAndGet() >= parallelism) { in execute()
|
/external/kotlinx.coroutines/kotlinx-coroutines-core/jvm/src/ |
D | CommonPool.kt | 39 val parallelism = property.toIntOrNull() in <lambda>() constant 40 if (parallelism == null || parallelism < 1) { in <lambda>() 43 parallelism in <lambda>() 46 private val parallelism: Int constant 70 … Try { fjpClass.getConstructor(Int::class.java).newInstance(parallelism) as? ExecutorService } in createPool() 92 return Executors.newFixedThreadPool(parallelism) { in createPlainPool()
|
/external/kotlinx.coroutines/benchmarks/src/jmh/kotlin/benchmarks/ |
D | SemaphoreBenchmark.kt | 81 enum class SemaphoreBenchDispatcherCreator(val create: (parallelism: Int) -> CoroutineDispatcher) { 82 FORK_JOIN({ parallelism -> ForkJoinPool(parallelism).asCoroutineDispatcher() }), in parallelism() method 83 …EXPERIMENTAL({ parallelism -> ExperimentalCoroutineDispatcher(corePoolSize = parallelism, maxPoolS… in parallelism() method
|
D | ChannelProducerConsumerBenchmark.kt | 126 enum class DispatcherCreator(val create: (parallelism: Int) -> CoroutineDispatcher) { 127 FORK_JOIN({ parallelism -> ForkJoinPool(parallelism).asCoroutineDispatcher() }) in parallelism() method
|
/external/tensorflow/tensorflow/core/framework/ |
D | model_test.cc | 31 const int64 parallelism = std::get<0>(GetParam()); in TEST_P() local 38 std::make_shared<SharedState>(parallelism, nullptr, nullptr), 1, in TEST_P() 39 parallelism)}); in TEST_P() 64 110 * parallelism / 10); in TEST_P() 91 100 + 250 / parallelism); in TEST_P() 98 50 + 250 / parallelism); in TEST_P() 111 const int64 parallelism = std::get<0>(GetParam()); in TEST_P() local 118 std::make_shared<SharedState>(parallelism, nullptr, nullptr), 1, in TEST_P() 119 parallelism)}); in TEST_P() 132 110 * parallelism / 10); in TEST_P() [all …]
|
D | model.cc | 252 double parallelism = num_inputs() - 1; // default to cycle length in OutputTimeLocked() local 255 parallelism = std::min(parallelism, (*parameter)->value); in OutputTimeLocked() 263 static_cast<double>(num_inputs() - 1) / parallelism; in OutputTimeLocked() 268 SelfProcessingTimeLocked() + output_time, old_input_time, parallelism, in OutputTimeLocked() 276 Square(parallelism); in OutputTimeLocked() 281 parallelism; in OutputTimeLocked() 302 static_cast<double>(num_inputs() - 1) / parallelism; in OutputTimeLocked() 304 SelfProcessingTimeLocked() + output_time, old_input_time, parallelism, in OutputTimeLocked() 427 double parallelism = 1.0; in OutputTimeLocked() local 432 parallelism = (*parallelism_parameter)->value; in OutputTimeLocked() [all …]
|
D | device_base.cc | 63 const int parallelism = std::max<int>( in eigen_cpu_device() local 66 return eigen_cpu_devices_[parallelism - 1]; in eigen_cpu_device()
|
/external/kotlinx.coroutines/kotlinx-coroutines-core/jvm/test/scheduling/ |
D | SchedulerTestBase.kt | 89 protected fun blockingDispatcher(parallelism: Int): CoroutineContext { in <lambda>() 91 return _dispatcher!!.blocking(parallelism) + handler in <lambda>() 94 protected fun view(parallelism: Int): CoroutineContext { in <lambda>() 96 return _dispatcher!!.limited(parallelism) + handler in <lambda>()
|
/external/tensorflow/tensorflow/core/api_def/base_api/ |
D | api_def_ExperimentalMaxIntraOpParallelismDataset.pbtxt | 6 Identifies the maximum intra-op parallelism to use. 10 Creates a dataset that overrides the maximum intra-op parallelism.
|
D | api_def_MaxIntraOpParallelismDataset.pbtxt | 6 Identifies the maximum intra-op parallelism to use. 10 Creates a dataset that overrides the maximum intra-op parallelism.
|
D | api_def_MapDefun.pbtxt | 49 limit the intra op parallelism. To limit inter-op parallelism, a user can
|
D | api_def_ThreadPoolHandle.pbtxt | 19 The maximum degree of parallelism to use within operations that execute on this
|
D | api_def_ExperimentalThreadPoolHandle.pbtxt | 19 The maximum degree of parallelism to use within operations that execute on this
|
D | api_def_TPUReplicateMetadata.pbtxt | 13 Number of cores per replica. Used for model parallelism.
|
/external/kotlinx.coroutines/kotlinx-coroutines-core/jvm/test/ |
D | CommonPoolTest.kt | 45 parallelism: Int, in createFJP() 50 parallelism, in createFJP()
|
/external/tensorflow/tensorflow/core/profiler/protobuf/ |
D | op_stats.proto | 78 // The number of replicas, corresponds to input parallelism. 79 // If there is no model parallelism, replica_count = device_core_count 81 // The number of cores used for a single replica, e.g. model parallelism. 82 // If there is no model parallelism, then num_cores_per_replica = 1
|
D | overview_page.proto | 133 // The number of replicas, corresponds to input parallelism. 134 // If there is no model parallelism, replica_count = device_core_count 136 // The number of cores used for a single replica, e.g. model parallelism. 137 // If there is no model parallelism, then num_cores_per_replica = 1
|
/external/libjpeg-turbo/simd/nasm/ |
D | jsimdcfg.inc.h | 102 ; To maximize parallelism, Type DCTELEM is changed to short (originally, int). variable 110 ; To maximize parallelism, Type MULTIPLIER is changed to short. variable
|
D | jsimdcfg.inc | 70 ; To maximize parallelism, Type DCTELEM is changed to short (originally, int). 76 ; To maximize parallelism, Type short is changed to short.
|
/external/skqp/third_party/libjpeg-turbo/ |
D | jsimdcfg.inc | 70 ; To maximize parallelism, Type DCTELEM is changed to short (originally, int). 76 ; To maximize parallelism, Type short is changed to short.
|
/external/skia/third_party/libjpeg-turbo/ |
D | jsimdcfg.inc | 70 ; To maximize parallelism, Type DCTELEM is changed to short (originally, int). 76 ; To maximize parallelism, Type short is changed to short.
|
/external/tensorflow/tensorflow/core/kernels/ |
D | record_yielder.cc | 28 "record_yielder", 1 + opts.parallelism, in RecordYielder() 133 const int N = opts_.parallelism; in MainLoop()
|
D | record_input_op.cc | 49 yopts.parallelism = file_parallelism; in RecordInputOp()
|