/external/gemmlowp/internal/ |
D | block_params.h | 65 std::max(1, RoundUp<KernelFormat::kRows>(rows) / num_threads); in FindL2BlockSizes() 72 l2_depth = RoundUp<kRegisterSize>(depth); in FindL2BlockSizes() 80 RoundUp<KernelFormat::kCols>(CeilQuotient(cols, min_l2_cols_blocks)); in FindL2BlockSizes() 87 l2_rows = RoundUp<KernelFormat::kRows>(per_thread_rows); in FindL2BlockSizes() 94 l2_rows = RoundUp<KernelFormat::kRows>( in FindL2BlockSizes() 127 RoundUp<kRegisterSize>(CeilQuotient(depth, min_l1_depth_blocks)); in FindL1BlockSizes() 136 RoundUp<KernelFormat::kRows>(CeilQuotient(rows, min_l1_rows_blocks)); in FindL1BlockSizes()
|
/external/v8/src/init/ |
D | isolate-allocator.cc | 47 return RoundUp(Isolate::isolate_root_bias(), in GetIsolateRootBiasPageSize() 80 RoundUp(padded_reservation.address() + kIsolateRootBiasPageSize, in InitReservation() 117 RoundUp(reservation.address() + kIsolateRootBiasPageSize, in InitReservation() 148 size_t page_size = RoundUp(size_t{1} << kPageSizeBits, in CommitPagesForIsolate() 163 RoundUp(isolate_end, page_size) - reserved_region_address; in CommitPagesForIsolate() 176 RoundUp(isolate_end, commit_page_size) - committed_region_address; in CommitPagesForIsolate()
|
/external/tensorflow/tensorflow/core/common_runtime/gpu/ |
D | pool_allocator_test.cc | 236 EXPECT_EQ(1, rounder.RoundUp(1)); in TEST() 237 EXPECT_EQ(2, rounder.RoundUp(2)); in TEST() 238 EXPECT_EQ(16, rounder.RoundUp(9)); in TEST() 239 EXPECT_EQ(16, rounder.RoundUp(16)); in TEST() 240 EXPECT_EQ(65536, rounder.RoundUp(41234)); in TEST() 241 EXPECT_EQ(65536, rounder.RoundUp(65535)); in TEST() 242 EXPECT_EQ(65536, rounder.RoundUp(65536)); in TEST()
|
/external/v8/src/heap/cppgc/ |
D | virtual-memory.cc | 20 start_ = page_allocator->AllocatePages(hint, RoundUp(size, page_size), in VirtualMemory() 21 RoundUp(alignment, page_size), in VirtualMemory() 24 size_ = RoundUp(size, page_size); in VirtualMemory()
|
D | caged-heap.cc | 56 RoundUp(sizeof(CagedHeapLocalData), platform_allocator->CommitPageSize()), in CagedHeap() 67 RoundUp(reinterpret_cast<CagedAddress>(reserved_area_.address()) + in CagedHeap()
|
D | heap-page.cc | 26 RoundUp(reinterpret_cast<uintptr_t>(address), alignment)); in AlignAddress() 170 RoundUp(sizeof(NormalPage), kAllocationGranularity); in PayloadSize() 187 RoundUp(sizeof(LargePage), kAllocationGranularity); in Create()
|
D | gc-info-table.cc | 60 return RoundUp(GCInfoTable::kMaxIndex * kEntrySize, in MaxTableSize() 69 RoundUp(memory_wanted, page_allocator_->AllocatePageSize()) / kEntrySize; in InitialTableLimit()
|
D | object-allocator.h | 82 RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader)); in AllocateObject() 93 RoundUp<kAllocationGranularity>(size + sizeof(HeapObjectHeader)); in AllocateObject()
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | pool_allocator.h | 38 virtual size_t RoundUp(size_t num_bytes) = 0; 138 size_t RoundUp(size_t num_bytes) override { return num_bytes; } in RoundUp() function 144 size_t RoundUp(size_t num_bytes) override { in RoundUp() function
|
/external/gemmlowp/test/ |
D | test_math_helpers.cc | 40 Check(x <= RoundUp<Modulus>(x)); in test_round_up_down() 41 Check(x > RoundUp<Modulus>(x) - Modulus); in test_round_up_down() 42 Check(RoundUp<Modulus>(x) % Modulus == 0); in test_round_up_down()
|
/external/v8/src/heap/ |
D | memory-chunk-layout.cc | 17 return ::RoundUp(MemoryChunk::kHeaderSize + Bitmap::kSize, in CodePageGuardStartOffset() 44 return RoundUp(MemoryChunk::kHeaderSize + Bitmap::kSize, kTaggedSize); in ObjectStartOffsetInDataPage()
|
D | memory-allocator.cc | 52 capacity_(RoundUp(capacity, Page::kPageSize)), in MemoryAllocator() 80 requested += RoundUp(reserved_area, MemoryChunk::kPageSize); in InitializeCodePageAllocator() 116 Address aligned_base = RoundUp(base, MemoryChunk::kAlignment); in InitializeCodePageAllocator() 404 chunk_size = ::RoundUp(MemoryChunkLayout::ObjectStartOffsetInCodePage() + in AllocateBasicChunk() 410 size_t commit_size = ::RoundUp( in AllocateBasicChunk() 429 chunk_size = ::RoundUp( in AllocateBasicChunk() 432 size_t commit_size = ::RoundUp( in AllocateBasicChunk() 565 RoundUp(chunk->size(), allocator->AllocatePageSize())); in FreeReadOnlyPage() 731 Address discardable_start = RoundUp(addr + FreeSpace::kSize, page_size); in ComputeDiscardMemoryArea()
|
/external/v8/src/base/ |
D | bounded-page-allocator.cc | 71 size_t region_size = RoundUp(size, allocate_page_size_); in ReserveForSharedMemoryMapping() 102 size_t allocated_size = RoundUp(size, allocate_page_size_); in ReleasePages() 103 size_t new_allocated_size = RoundUp(new_size, allocate_page_size_); in ReleasePages()
|
/external/v8/src/execution/arm64/ |
D | frame-constants-arm64.h | 88 RoundUp<16>(TypedFrameConstants::kFixedFrameSizeFromFp) + 112 -RoundUp<16>(TypedFrameConstants::kFixedFrameSizeFromFp) -
|
D | frame-constants-arm64.cc | 34 int rounded_slot_count = RoundUp(slot_count, 2); in PaddingSlotCount()
|
/external/v8/src/utils/ |
D | allocation.cc | 221 alignment = RoundUp(alignment, page_size); in VirtualMemory() 226 page_allocator_, hint, RoundUp(size, page_size), alignment, permissions)); in VirtualMemory() 278 RoundUp(region.size(), page_allocator->AllocatePageSize()))); in Free() 291 RoundUp(region.size(), page_allocator->AllocatePageSize()))); in FreeReadOnly()
|
/external/v8/src/zone/ |
D | zone.cc | 50 size = RoundUp(size, kAlignmentInBytes); in AsanNew() 163 Address result = RoundUp(segment->start(), kAlignmentInBytes); in NewExpand()
|
D | zone.h | 60 size = RoundUp(size, kAlignmentInBytes); in Allocate() 86 size = RoundUp(size, kAlignmentInBytes); in Delete()
|
/external/v8/src/wasm/ |
D | struct-types.h | 73 offset = RoundUp(offset, field_size); in InitializeOffsets() 77 offset = RoundUp(offset, kTaggedSize); in InitializeOffsets()
|
/external/v8/src/base/platform/ |
D | platform-fuchsia.cc | 81 RoundUp(reinterpret_cast<uintptr_t>(base), alignment)); in Allocate() 92 size_t aligned_size = RoundUp(size, page_size); in Allocate()
|
/external/v8/src/snapshot/embedded/ |
D | embedded-data.cc | 70 const uint32_t allocation_code_size = RoundUp(d.code_size(), alignment); in CreateOffHeapInstructionStream() 78 const uint32_t allocation_data_size = RoundUp(d.data_size(), alignment); in CreateOffHeapInstructionStream() 116 CHECK(FreePages(page_allocator, code, RoundUp(code_size, page_size))); in FreeOffHeapInstructionStream() 117 CHECK(FreePages(page_allocator, data, RoundUp(data_size, page_size))); in FreeOffHeapInstructionStream()
|
D | embedded-data.h | 192 return RoundUp<kCodeAlignment>(size + 1); in PadAndAlignCode() 197 return RoundUp<Code::kMetadataAlignment>(size); in PadAndAlignData()
|
/external/compiler-rt/lib/tsan/rtl/ |
D | tsan_sync.cc | 137 uptr diff = RoundUp(p, kPageSize) - p; in ResetRange() 150 CHECK_EQ(p, RoundUp(p, kPageSize)); in ResetRange() 151 CHECK_EQ(sz, RoundUp(sz, kPageSize)); in ResetRange()
|
/external/XNNPACK/bench/ |
D | f16-gemm.cc | 43 const size_t nc_stride = benchmark::utils::RoundUp(nc, nr); in GEMMBenchmark() 44 const size_t kc_stride = benchmark::utils::RoundUp(kc, kr); in GEMMBenchmark()
|
D | f32-dwconv-spchw.cc | 202 …std::vector<float> input(input_height * benchmark::utils::RoundUp<size_t>(input_width, it) * chann… in DWConvHWoTCTBenchmark() 210 …const size_t o_elements = output_height * benchmark::utils::RoundUp<size_t>(output_width, ot) * ch… in DWConvHWoTCTBenchmark() 247 benchmark::utils::RoundUp<size_t>(input_width, it) * channels * sizeof(float), in DWConvHWoTCTBenchmark() 248 benchmark::utils::RoundUp<size_t>(output_width, ot) * channels * sizeof(float), in DWConvHWoTCTBenchmark()
|