/external/gemmlowp/test/ |
D | test_math_helpers.cc | 36 Check(x >= RoundDown<Modulus>(x)); in test_round_up_down() 37 Check(x < RoundDown<Modulus>(x) + Modulus); in test_round_up_down() 38 Check(RoundDown<Modulus>(x) % Modulus == 0); in test_round_up_down()
|
/external/gemmlowp/internal/ |
D | common.h | 137 Integer RoundDown(Integer i) { in RoundDown() function 145 return RoundDown<Modulus>(i + Modulus - 1); in RoundUp()
|
D | pack.h | 372 const int register_aligned_depth = RoundDown<kRegisterSize>(depth); in PackRun()
|
/external/v8/src/base/ |
D | macros.h | 339 inline T RoundDown(T x, intptr_t m) { in RoundDown() function 346 constexpr inline T RoundDown(T x) { in RoundDown() function 357 return RoundDown<T>(static_cast<T>(x + m - 1), m); in RoundUp() 362 return RoundDown<m, T>(static_cast<T>(x + (m - 1))); in RoundUp()
|
/external/v8/src/init/ |
D | isolate-allocator.cc | 67 Address hint = RoundDown(reinterpret_cast<Address>( in InitReservation() 174 RoundDown(isolate_address, commit_page_size); in CommitPagesForIsolate()
|
/external/v8/src/execution/arm64/ |
D | frame-constants-arm64.cc | 27 return RoundDown(register_count, 2) + 1; in RegisterStackSlotCount()
|
/external/v8/src/heap/cppgc/ |
D | caged-heap-local-data.cc | 28 const uintptr_t end = RoundDown(reinterpret_cast<uintptr_t>(table_.end()), in Reset()
|
D | object-allocator.cc | 35 ? RoundDown(offset_begin, kEntrySize) in MarkRangeAsYoung() 39 : RoundDown(offset_end, kEntrySize); in MarkRangeAsYoung()
|
D | caged-heap.cc | 33 void* hint = reinterpret_cast<void*>(RoundDown( in ReserveCagedHeap()
|
/external/v8/src/common/ |
D | ptr-compr-inl.h | 37 return RoundDown<kPtrComprIsolateRootAlignment>(on_heap_addr); in GetIsolateRootAddress()
|
/external/compiler-rt/lib/tsan/rtl/ |
D | tsan_defs.h | 134 T RoundDown(T p, u64 align) { in RoundDown() function
|
D | tsan_rtl_report.cc | 383 CHECK_EQ(RoundDown(epoch, kTracePartSize), hdr->epoch0); in RestoreStack() 384 const u64 epoch0 = RoundDown(epoch, TraceSize()); in RestoreStack() 386 const u64 ebegin = RoundDown(eend, kTracePartSize); in RestoreStack()
|
D | tsan_mman.cc | 54 diff = p + size - RoundDown(p + size, kPageSize); in OnUnmap()
|
D | tsan_sync.cc | 143 diff = p + sz - RoundDown(p + sz, kPageSize); in ResetRange()
|
/external/v8/src/base/platform/ |
D | platform-starboard.cc | 171 inline T RoundDown(T x, intptr_t m) { in RoundDown() function 179 return RoundDown<T>(static_cast<T>(x + m - 1), m); in RoundUpOld()
|
D | platform-posix.cc | 282 raw_addr = RoundDown(raw_addr, AllocatePageSize()); in GetRandomMmapAddr()
|
/external/v8/src/zone/ |
D | accounting-allocator.cc | 32 void* hint = reinterpret_cast<void*>(RoundDown( in ReserveAddressSpace()
|
D | zone.cc | 113 DCHECK_EQ(size, RoundDown(size, kAlignmentInBytes)); in NewExpand()
|
/external/v8/src/heap/ |
D | spaces.h | 151 return RoundDown(size, kCodeAlignment); in RoundSizeDownToObjectAlignment() 153 return RoundDown(size, kTaggedSize); in RoundSizeDownToObjectAlignment()
|
D | memory-allocator.cc | 88 RoundDown(code_range_address_hint.Pointer()->GetAddressHint(requested), in InitializeCodePageAllocator() 118 RoundDown(reservation.size() - (aligned_base - base) - reserved_area, in InitializeCodePageAllocator() 732 Address discardable_end = RoundDown(addr + size, page_size); in ComputeDiscardMemoryArea()
|
D | new-spaces.cc | 93 minimum_capacity_ = RoundDown(initial_capacity, Page::kPageSize); in SetUp() 95 maximum_capacity_ = RoundDown(maximum_capacity, Page::kPageSize); in SetUp()
|
D | spaces.cc | 164 size_t unused = RoundDown(static_cast<size_t>(area_end() - filler.address()), in ShrinkToHighWaterMark()
|
D | paged-spaces.h | 523 return RoundDown(size, Map::kSize); in RoundSizeDownToObjectAlignment()
|
/external/tensorflow/tensorflow/lite/kernels/internal/ |
D | common.h | 686 Integer RoundDown(Integer i) { in RoundDown() function 697 return RoundDown<Modulus>(i + Modulus - 1); in RoundUp()
|
/external/gemmlowp/standalone/ |
D | neon-gemm-kernel-benchmark.cc | 5020 Integer RoundDown(Integer i) { in RoundDown() function 5067 return RoundDown<kCacheLineSize>(clamped_unrounded_depth); in BenchmarkDepthToFitInCache()
|