Home
last modified time | relevance | path

Searched refs:parallelism (Results 1 – 25 of 85) sorted by relevance

1234

/external/tensorflow/tensorflow/python/kernel_tests/
Ddecode_jpeg_op_test.py45 parallelism, argument
87 for _ in xrange(parallelism):
122 for parallelism in [1, 100]:
123 duration_decode = self._evalDecodeJpeg('small.jpg', parallelism,
125 duration_decode_crop = self._evalDecodeJpeg('small.jpg', parallelism,
128 'small.jpg', parallelism, num_iters, True, crop_window)
130 name='decode_jpeg_small_p%d' % (parallelism),
134 name='decode_crop_jpeg_small_p%d' % (parallelism),
138 name='decode_after_crop_jpeg_small_p%d' % (parallelism),
146 for parallelism in [1, 100]:
[all …]
Drecord_input_test.py53 parallelism=1,
70 parallelism=1,
89 parallelism=1,
108 parallelism=2,
137 parallelism=1,
165 parallelism=2,
/external/kotlinx.coroutines/kotlinx-coroutines-core/jvm/src/scheduling/
DDispatcher.kt86 public fun blocking(parallelism: Int = BLOCKING_DEFAULT_PARALLELISM): CoroutineDispatcher { in toString()
87 require(parallelism > 0) { "Expected positive parallelism level, but have $parallelism" } in toString()
88 return LimitingDispatcher(this, parallelism, TaskMode.PROBABLY_BLOCKING) in toString()
98 public fun limited(parallelism: Int): CoroutineDispatcher { in toString()
99 require(parallelism > 0) { "Expected positive parallelism level, but have $parallelism" } in toString()
100 …require(parallelism <= corePoolSize) { "Expected parallelism level lesser than core pool size ($co… in toString()
101 return LimitingDispatcher(this, parallelism, TaskMode.NON_BLOCKING) in toString()
134 val parallelism: Int, constant in kotlinx.coroutines.scheduling.LimitingDispatcher
157 if (inFlight <= parallelism) { in execute()
180 if (inFlightTasks.decrementAndGet() >= parallelism) { in execute()
/external/kotlinx.coroutines/kotlinx-coroutines-core/jvm/src/
DCommonPool.kt39 val parallelism = property.toIntOrNull() in <lambda>() constant
40 if (parallelism == null || parallelism < 1) { in <lambda>()
43 parallelism in <lambda>()
46 private val parallelism: Int constant
70 … Try { fjpClass.getConstructor(Int::class.java).newInstance(parallelism) as? ExecutorService } in createPool()
92 return Executors.newFixedThreadPool(parallelism) { in createPlainPool()
/external/kotlinx.coroutines/benchmarks/src/jmh/kotlin/benchmarks/
DSemaphoreBenchmark.kt81 enum class SemaphoreBenchDispatcherCreator(val create: (parallelism: Int) -> CoroutineDispatcher) {
82 FORK_JOIN({ parallelism -> ForkJoinPool(parallelism).asCoroutineDispatcher() }), in parallelism() method
83 …EXPERIMENTAL({ parallelism -> ExperimentalCoroutineDispatcher(corePoolSize = parallelism, maxPoolS… in parallelism() method
DChannelProducerConsumerBenchmark.kt126 enum class DispatcherCreator(val create: (parallelism: Int) -> CoroutineDispatcher) {
127 FORK_JOIN({ parallelism -> ForkJoinPool(parallelism).asCoroutineDispatcher() }) in parallelism() method
/external/tensorflow/tensorflow/core/framework/
Dmodel_test.cc31 const int64 parallelism = std::get<0>(GetParam()); in TEST_P() local
38 std::make_shared<SharedState>(parallelism, nullptr, nullptr), 1, in TEST_P()
39 parallelism)}); in TEST_P()
64 110 * parallelism / 10); in TEST_P()
91 100 + 250 / parallelism); in TEST_P()
98 50 + 250 / parallelism); in TEST_P()
111 const int64 parallelism = std::get<0>(GetParam()); in TEST_P() local
118 std::make_shared<SharedState>(parallelism, nullptr, nullptr), 1, in TEST_P()
119 parallelism)}); in TEST_P()
132 110 * parallelism / 10); in TEST_P()
[all …]
Dmodel.cc252 double parallelism = num_inputs() - 1; // default to cycle length in OutputTimeLocked() local
255 parallelism = std::min(parallelism, (*parameter)->value); in OutputTimeLocked()
263 static_cast<double>(num_inputs() - 1) / parallelism; in OutputTimeLocked()
268 SelfProcessingTimeLocked() + output_time, old_input_time, parallelism, in OutputTimeLocked()
276 Square(parallelism); in OutputTimeLocked()
281 parallelism; in OutputTimeLocked()
302 static_cast<double>(num_inputs() - 1) / parallelism; in OutputTimeLocked()
304 SelfProcessingTimeLocked() + output_time, old_input_time, parallelism, in OutputTimeLocked()
427 double parallelism = 1.0; in OutputTimeLocked() local
432 parallelism = (*parallelism_parameter)->value; in OutputTimeLocked()
[all …]
Ddevice_base.cc63 const int parallelism = std::max<int>( in eigen_cpu_device() local
66 return eigen_cpu_devices_[parallelism - 1]; in eigen_cpu_device()
/external/kotlinx.coroutines/kotlinx-coroutines-core/jvm/test/scheduling/
DSchedulerTestBase.kt89 protected fun blockingDispatcher(parallelism: Int): CoroutineContext { in <lambda>()
91 return _dispatcher!!.blocking(parallelism) + handler in <lambda>()
94 protected fun view(parallelism: Int): CoroutineContext { in <lambda>()
96 return _dispatcher!!.limited(parallelism) + handler in <lambda>()
/external/tensorflow/tensorflow/core/api_def/base_api/
Dapi_def_ExperimentalMaxIntraOpParallelismDataset.pbtxt6 Identifies the maximum intra-op parallelism to use.
10 Creates a dataset that overrides the maximum intra-op parallelism.
Dapi_def_MaxIntraOpParallelismDataset.pbtxt6 Identifies the maximum intra-op parallelism to use.
10 Creates a dataset that overrides the maximum intra-op parallelism.
Dapi_def_MapDefun.pbtxt49 limit the intra op parallelism. To limit inter-op parallelism, a user can
Dapi_def_ThreadPoolHandle.pbtxt19 The maximum degree of parallelism to use within operations that execute on this
Dapi_def_ExperimentalThreadPoolHandle.pbtxt19 The maximum degree of parallelism to use within operations that execute on this
Dapi_def_TPUReplicateMetadata.pbtxt13 Number of cores per replica. Used for model parallelism.
/external/kotlinx.coroutines/kotlinx-coroutines-core/jvm/test/
DCommonPoolTest.kt45 parallelism: Int, in createFJP()
50 parallelism, in createFJP()
/external/tensorflow/tensorflow/core/profiler/protobuf/
Dop_stats.proto78 // The number of replicas, corresponds to input parallelism.
79 // If there is no model parallelism, replica_count = device_core_count
81 // The number of cores used for a single replica, e.g. model parallelism.
82 // If there is no model parallelism, then num_cores_per_replica = 1
Doverview_page.proto133 // The number of replicas, corresponds to input parallelism.
134 // If there is no model parallelism, replica_count = device_core_count
136 // The number of cores used for a single replica, e.g. model parallelism.
137 // If there is no model parallelism, then num_cores_per_replica = 1
/external/libjpeg-turbo/simd/nasm/
Djsimdcfg.inc.h102 ; To maximize parallelism, Type DCTELEM is changed to short (originally, int). variable
110 ; To maximize parallelism, Type MULTIPLIER is changed to short. variable
Djsimdcfg.inc70 ; To maximize parallelism, Type DCTELEM is changed to short (originally, int).
76 ; To maximize parallelism, Type short is changed to short.
/external/skqp/third_party/libjpeg-turbo/
Djsimdcfg.inc70 ; To maximize parallelism, Type DCTELEM is changed to short (originally, int).
76 ; To maximize parallelism, Type short is changed to short.
/external/skia/third_party/libjpeg-turbo/
Djsimdcfg.inc70 ; To maximize parallelism, Type DCTELEM is changed to short (originally, int).
76 ; To maximize parallelism, Type short is changed to short.
/external/tensorflow/tensorflow/core/kernels/
Drecord_yielder.cc28 "record_yielder", 1 + opts.parallelism, in RecordYielder()
133 const int N = opts_.parallelism; in MainLoop()
Drecord_input_op.cc49 yopts.parallelism = file_parallelism; in RecordInputOp()

1234