Home
last modified time | relevance | path

Searched refs:num_chunks (Results 1 – 25 of 56) sorted by relevance

123

/external/tensorflow/tensorflow/core/common_runtime/
Dbase_collective_executor.cc56 int64 num_chunks) { in AlignedChunkElts() argument
57 DCHECK_GT(num_chunks, 0); in AlignedChunkElts()
58 int64 base_chunk_elts = (total_elts + (num_chunks - 1)) / num_chunks; in AlignedChunkElts()
68 << "total_elts=" << total_elts << " num_chunks=" << num_chunks in AlignedChunkElts()
80 << "total_elts=" << total_elts << " num_chunks=" << num_chunks in AlignedChunkElts()
92 CollectiveAdapterImpl(Tensor* output, int64 num_chunks, Allocator* allocator, in CollectiveAdapterImpl() argument
97 num_chunks_(num_chunks), in CollectiveAdapterImpl()
193 CollectiveAdapter* MakeCollectiveAdapter(Tensor* output, int num_chunks, in MakeCollectiveAdapter() argument
198 return new CollectiveAdapterImpl<Eigen::half>(output, num_chunks, in MakeCollectiveAdapter()
202 return new CollectiveAdapterImpl<float>(output, num_chunks, allocator, in MakeCollectiveAdapter()
[all …]
Dbase_collective_executor.h79 int64 num_chunks);
89 CollectiveAdapter* MakeCollectiveAdapter(Tensor* output, int num_chunks,
/external/webrtc/modules/audio_processing/vad/
Dvoice_activity_detector_unittest.cc72 size_t num_chunks = 0; in TEST() local
79 ++num_chunks; in TEST()
82 mean_probability /= num_chunks; in TEST()
107 size_t num_chunks = 0; in TEST() local
114 ++num_chunks; in TEST()
117 mean_probability /= num_chunks; in TEST()
/external/mesa3d/src/gallium/winsys/amdgpu/drm/
Damdgpu_cs.c1500 unsigned num_chunks = 0; in amdgpu_cs_submit_ib() local
1504 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_BO_HANDLES; in amdgpu_cs_submit_ib()
1505 chunks[num_chunks].length_dw = sizeof(struct drm_amdgpu_bo_list_in) / 4; in amdgpu_cs_submit_ib()
1506 chunks[num_chunks].chunk_data = (uintptr_t)&bo_list_in; in amdgpu_cs_submit_ib()
1507 num_chunks++; in amdgpu_cs_submit_ib()
1524 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_DEPENDENCIES; in amdgpu_cs_submit_ib()
1525 chunks[num_chunks].length_dw = sizeof(dep_chunk[0]) / 4 * num_dependencies; in amdgpu_cs_submit_ib()
1526 chunks[num_chunks].chunk_data = (uintptr_t)dep_chunk; in amdgpu_cs_submit_ib()
1527 num_chunks++; in amdgpu_cs_submit_ib()
1547 chunks[num_chunks].chunk_id = AMDGPU_CHUNK_ID_SYNCOBJ_IN; in amdgpu_cs_submit_ib()
[all …]
Damdgpu_bo.c833 for (unsigned i = 0; i < backing->num_chunks; ++i) in sparse_dump()
857 for (unsigned idx = 0; idx < backing->num_chunks; ++idx) { in sparse_backing_alloc()
906 best_backing->num_chunks = 1; in sparse_backing_alloc()
923 sizeof(*best_backing->chunks) * (best_backing->num_chunks - best_idx - 1)); in sparse_backing_alloc()
924 best_backing->num_chunks--; in sparse_backing_alloc()
959 unsigned high = backing->num_chunks; in sparse_backing_free()
971 assert(low >= backing->num_chunks || end_page <= backing->chunks[low].begin); in sparse_backing_free()
977 if (low < backing->num_chunks && end_page == backing->chunks[low].begin) { in sparse_backing_free()
980 sizeof(*backing->chunks) * (backing->num_chunks - low - 1)); in sparse_backing_free()
981 backing->num_chunks--; in sparse_backing_free()
[all …]
/external/perfetto/src/tracing/core/
Dshared_memory_abi_unittest.cc84 const size_t num_chunks = in TEST_P() local
88 for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) { in TEST_P()
112 (page_size() - sizeof(SharedMemoryABI::PageHeader)) / num_chunks; in TEST_P()
159 for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) { in TEST_P()
164 ASSERT_EQ(chunk_idx == num_chunks - 1, abi.is_page_complete(page_idx)); in TEST_P()
170 for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) { in TEST_P()
179 for (size_t chunk_idx = 0; chunk_idx < num_chunks; chunk_idx++) { in TEST_P()
186 ASSERT_EQ(chunk_idx == num_chunks - 1, abi.is_page_free(page_idx)); in TEST_P()
Dshared_memory_abi.cc56 size_t num_chunks = SharedMemoryABI::kNumChunksForLayout[i]; in InitChunkSizes() local
57 size_t size = num_chunks == 0 ? 0 : GetChunkSize(page_size, num_chunks); in InitChunkSizes()
147 const size_t num_chunks = GetNumChunksForLayout(page_layout); in GetChunkUnchecked() local
148 PERFETTO_DCHECK(chunk_idx < num_chunks); in GetChunkUnchecked()
169 const size_t num_chunks = GetNumChunksForLayout(layout); in TryAcquireChunk() local
172 if (chunk_idx >= num_chunks) in TryAcquireChunk()
222 const uint32_t num_chunks = GetNumChunksForLayout(layout); in GetFreeChunks() local
224 for (uint32_t i = 0; i < num_chunks; i++) { in GetFreeChunks()
/external/libdrm/amdgpu/
Damdgpu_cs.c230 uint32_t i, size, num_chunks, bo_list_handle = 0, sem_count = 0; in amdgpu_cs_submit_one() local
255 num_chunks = ibs_request->number_of_ibs; in amdgpu_cs_submit_one()
277 i = num_chunks++; in amdgpu_cs_submit_one()
309 i = num_chunks++; in amdgpu_cs_submit_one()
341 i = num_chunks++; in amdgpu_cs_submit_one()
349 r = amdgpu_cs_submit_raw2(dev, context, bo_list_handle, num_chunks, in amdgpu_cs_submit_one()
858 int num_chunks, in amdgpu_cs_submit_raw() argument
865 if (num_chunks == 0) in amdgpu_cs_submit_raw()
869 chunk_array = alloca(sizeof(uint64_t) * num_chunks); in amdgpu_cs_submit_raw()
870 for (i = 0; i < num_chunks; i++) in amdgpu_cs_submit_raw()
[all …]
/external/tensorflow/tensorflow/core/debug/
Ddebug_io_utils.cc62 const uint64 wall_time_us, const size_t num_chunks, in PrepareChunkEventProto() argument
84 metadata.set_num_chunks(num_chunks); in PrepareChunkEventProto()
156 const size_t num_chunks = cutoffs.size(); in WrapStringTensorAsEvents() local
158 for (size_t i = 0; i < num_chunks; ++i) { in WrapStringTensorAsEvents()
160 num_chunks, i, tensor_proto->dtype(), in WrapStringTensorAsEvents()
205 const size_t num_chunks = in WrapTensorAsEvents() local
209 for (size_t i = 0; i < num_chunks; ++i) { in WrapTensorAsEvents()
212 (i == num_chunks - 1) ? (total_length - pos) : chunk_size_ub; in WrapTensorAsEvents()
214 num_chunks, i, tensor_proto.dtype(), in WrapTensorAsEvents()
253 const size_t num_chunks = in PublishEncodedGraphDefInChunks() local
[all …]
Ddebugger_event_metadata.proto9 int32 num_chunks = 3; field
/external/autotest/client/tests/ebizzy/
Debizzy.py23 def run_once(self, args='', num_chunks=1000, chunk_size=512000, argument
29 args2 = '-m -n %s -P -R -s %s -S %s -t %s' % (num_chunks, chunk_size,
/external/perfetto/include/perfetto/ext/tracing/core/
Dshared_memory_abi.h483 const uint32_t num_chunks = GetNumChunksForLayout(layout); in is_page_complete() local
484 if (num_chunks == 0) in is_page_complete()
487 (kAllChunksComplete & ((1 << (num_chunks * kChunkShift)) - 1)); in is_page_complete()
576 const uint32_t num_chunks = GetNumChunksForLayout(page_layout); in GetUsedChunks() local
578 for (uint32_t i = 0; i < num_chunks; i++) { in GetUsedChunks()
/external/mesa3d/src/intel/vulkan/tests/
Dstate_pool_test_helper.h39 const unsigned num_chunks = STATES_PER_THREAD / chunk_size; in alloc_states() local
45 for (unsigned c = 0; c < num_chunks; c++) { in alloc_states()
/external/swiftshader/third_party/SPIRV-Tools/source/fuzz/
Dshrinker.cpp168 const uint32_t num_chunks = in Run() local
172 assert(num_chunks >= 1 && "There should be at least one chunk."); in Run()
173 assert(num_chunks * chunk_size >= in Run()
180 for (int chunk_index = num_chunks - 1; in Run()
/external/angle/third_party/vulkan-deps/spirv-tools/src/source/fuzz/
Dshrinker.cpp168 const uint32_t num_chunks = in Run() local
172 assert(num_chunks >= 1 && "There should be at least one chunk."); in Run()
173 assert(num_chunks * chunk_size >= in Run()
180 for (int chunk_index = num_chunks - 1; in Run()
/external/deqp-deps/SPIRV-Tools/source/fuzz/
Dshrinker.cpp168 const uint32_t num_chunks = in Run() local
172 assert(num_chunks >= 1 && "There should be at least one chunk."); in Run()
173 assert(num_chunks * chunk_size >= in Run()
180 for (int chunk_index = num_chunks - 1; in Run()
/external/tensorflow/tensorflow/python/debug/lib/
Dgrpc_debug_server.py252 num_chunks = debugger_plugin_metadata["numChunks"]
255 if num_chunks <= 1:
263 tensor_chunks[tensor_key] = [None] * num_chunks
307 num_chunks = int(event.graph_def[index_bar_1 + 1 : index_bar_2])
309 graph_def_chunks[graph_def_hash_device_timestamp] = [None] * num_chunks
/external/usrsctp/usrsctplib/netinet/
Dsctp_auth.c123 list->num_chunks++; in sctp_auth_add_chunk()
142 list->num_chunks--; in sctp_auth_delete_chunk()
156 return (list->num_chunks); in sctp_auth_get_chklist_size()
188 if (list->num_chunks <= 32) { in sctp_pack_auth_chunks()
213 sctp_unpack_auth_chunks(const uint8_t *ptr, uint8_t num_chunks, in sctp_unpack_auth_chunks() argument
222 if (num_chunks <= 32) { in sctp_unpack_auth_chunks()
224 for (i = 0; i < num_chunks; i++) { in sctp_unpack_auth_chunks()
227 size = num_chunks; in sctp_unpack_auth_chunks()
1410 uint16_t num_chunks = 0; local
1470 num_chunks = plen - sizeof(*chunks);
[all …]
/external/tensorflow/tensorflow/python/distribute/v1/
Dall_reduce.py325 num_chunks = num_devices * num_subchunks
332 splits, split_pad_len = _padded_split(input_tensors[d], num_chunks)
337 new_partial_reductions = [None for _ in range(0, num_chunks)]
395 num_chunks = len(chunks_by_dev[0])
396 if 0 != num_chunks % num_devices:
399 num_subchunks = int(num_chunks / num_devices)
402 passed_values = [None for _ in range(0, num_chunks)]
Dall_reduce_test.py120 num_chunks = 2 * len(input_tensors)
123 self.assertEqual(len(otl), num_chunks)
125 self.assertEqual(ot.shape, [tlen//num_chunks])
/external/libpng/
Dpngset.c1376 unsigned int num_chunks, old_num_chunks; in png_set_keep_unknown_chunks() local
1424 num_chunks = (unsigned int)/*SAFE*/(sizeof chunks_to_ignore)/5U; in png_set_keep_unknown_chunks()
1439 num_chunks = (unsigned int)num_chunks_in; in png_set_keep_unknown_chunks()
1448 if (num_chunks + old_num_chunks > UINT_MAX/5) in png_set_keep_unknown_chunks()
1462 5 * (num_chunks + old_num_chunks))); in png_set_keep_unknown_chunks()
1485 for (i=0; i<num_chunks; ++i) in png_set_keep_unknown_chunks()
1492 num_chunks = 0; in png_set_keep_unknown_chunks()
1500 ++num_chunks; in png_set_keep_unknown_chunks()
1505 if (num_chunks == 0) in png_set_keep_unknown_chunks()
1515 num_chunks = 0; in png_set_keep_unknown_chunks()
[all …]
/external/pdfium/third_party/libpng16/
Dpngset.c1376 unsigned int num_chunks, old_num_chunks; in png_set_keep_unknown_chunks() local
1424 num_chunks = (unsigned int)/*SAFE*/(sizeof chunks_to_ignore)/5U; in png_set_keep_unknown_chunks()
1439 num_chunks = (unsigned int)num_chunks_in; in png_set_keep_unknown_chunks()
1448 if (num_chunks + old_num_chunks > UINT_MAX/5) in png_set_keep_unknown_chunks()
1462 5 * (num_chunks + old_num_chunks))); in png_set_keep_unknown_chunks()
1485 for (i=0; i<num_chunks; ++i) in png_set_keep_unknown_chunks()
1492 num_chunks = 0; in png_set_keep_unknown_chunks()
1500 ++num_chunks; in png_set_keep_unknown_chunks()
1505 if (num_chunks == 0) in png_set_keep_unknown_chunks()
1515 num_chunks = 0; in png_set_keep_unknown_chunks()
[all …]
/external/mesa3d/src/gallium/winsys/radeon/drm/
Dradeon_drm_cs.c658 cs->cst->cs.num_chunks = 3; in radeon_drm_cs_flush()
667 cs->cst->cs.num_chunks = 3; in radeon_drm_cs_flush()
673 cs->cst->cs.num_chunks = 3; in radeon_drm_cs_flush()
681 cs->cst->cs.num_chunks = 3; in radeon_drm_cs_flush()
685 cs->cst->cs.num_chunks = 3; in radeon_drm_cs_flush()
689 cs->cst->cs.num_chunks = 3; in radeon_drm_cs_flush()
693 cs->cst->cs.num_chunks = 3; in radeon_drm_cs_flush()
/external/mesa3d/src/amd/vulkan/winsys/amdgpu/
Dradv_amdgpu_cs.c1626 int num_chunks; in radv_amdgpu_cs_submit() local
1656 num_chunks = request->number_of_ibs; in radv_amdgpu_cs_submit()
1675 i = num_chunks++; in radv_amdgpu_cs_submit()
1693 &chunks[num_chunks], in radv_amdgpu_cs_submit()
1698 &chunks[num_chunks], in radv_amdgpu_cs_submit()
1705 num_chunks++; in radv_amdgpu_cs_submit()
1731 i = num_chunks++; in radv_amdgpu_cs_submit()
1745 &chunks[num_chunks], in radv_amdgpu_cs_submit()
1750 &chunks[num_chunks], in radv_amdgpu_cs_submit()
1757 num_chunks++; in radv_amdgpu_cs_submit()
[all …]
/external/bsdiff/
Dsplit_patch_writer_unittest.cc19 void SetUpForSize(size_t num_chunks, in SetUpForSize() argument
22 fake_patches_.resize(num_chunks); in SetUpForSize()

123