/art/libartbase/base/ |
D | safe_copy_test.cc | 34 DCHECK_EQ(kPageSize, static_cast<decltype(kPageSize)>(PAGE_SIZE)); in TEST() 37 void* map = mmap(nullptr, kPageSize * 4, PROT_READ | PROT_WRITE, in TEST() 41 char* page2 = page1 + kPageSize; in TEST() 42 char* page3 = page2 + kPageSize; in TEST() 43 char* page4 = page3 + kPageSize; in TEST() 44 ASSERT_EQ(0, mprotect(page1 + kPageSize, kPageSize, PROT_NONE)); in TEST() 45 ASSERT_EQ(0, munmap(page4, kPageSize)); in TEST() 48 page1[kPageSize - 1] = 'z'; in TEST() 51 page3[kPageSize - 1] = 'y'; in TEST() 53 char buf[kPageSize]; in TEST() [all …]
|
D | mem_map_test.cc | 67 const size_t page_size = static_cast<size_t>(kPageSize); in RemapAtEndTest() 149 uintptr_t random_start = CreateStartPos(i * kPageSize); in TEST_F() 166 kPageSize, in TEST_F() 172 kPageSize, in TEST_F() 182 std::vector<uint8_t> data = RandomData(kPageSize); in TEST_F() 191 ASSERT_EQ(dest.Size(), static_cast<size_t>(kPageSize)); in TEST_F() 199 5 * kPageSize, // Need to make it larger in TEST_F() 209 3 * kPageSize, in TEST_F() 219 std::vector<uint8_t> data = RandomData(3 * kPageSize); in TEST_F() 223 dest.SetSize(kPageSize); in TEST_F() [all …]
|
D | mem_map.cc | 129 constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1); in CreateStartPos() 239 DCHECK_ALIGNED(reservation.Begin(), kPageSize); in CheckReservation() 320 size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize); in MapAnonymous() 396 const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize); in MapPlaceholder() 523 int page_offset = start % kPageSize; in MapFileAtAddress() 526 size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize); in MapFileAtAddress() 534 redzone_size = kPageSize; in MapFileAtAddress() 726 DCHECK_ALIGNED(begin_, kPageSize); in RemapAtEnd() 727 DCHECK_ALIGNED(base_begin_, kPageSize); in RemapAtEnd() 728 DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize); in RemapAtEnd() [all …]
|
D | utils.cc | 92 CHECK_EQ(RoundDown(start, kPageSize), RoundDown(limit - 1, kPageSize)) << "range spans pages"; in TouchAndFlushCacheLinesWithinPage() 140 uintptr_t next_page = RoundUp(start + 1, kPageSize); in FlushCpuCaches() 147 next_page += kPageSize; in FlushCpuCaches() 361 uintptr_t vmstart = reinterpret_cast<uintptr_t>(AlignDown(addr, kPageSize)); in IsAddressKnownBackedByFileOrShared() 362 off_t index = (vmstart / kPageSize) * sizeof(uint64_t); in IsAddressKnownBackedByFileOrShared()
|
/art/runtime/jit/ |
D | jit_memory_region_test.cc | 61 size_t size = kPageSize; in BasicTest() 67 mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0)); in BasicTest() 85 mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0)); in BasicTest() 95 addr2 = reinterpret_cast<int32_t*>(mremap(addr, 0, kPageSize, MREMAP_MAYMOVE)); in BasicTest() 102 addr2 = reinterpret_cast<int32_t*>(mremap(addr, kPageSize, 2 * kPageSize, MREMAP_MAYMOVE)); in BasicTest() 114 size_t size = kPageSize; in TestUnmapWritableAfterFork() 123 mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd.get(), 0)); in TestUnmapWritableAfterFork() 133 mmap(nullptr, kPageSize, PROT_READ, MAP_SHARED, fd.get(), 0)); in TestUnmapWritableAfterFork() 147 mmap(nullptr, kPageSize, PROT_READ | PROT_WRITE, MAP_SHARED, fd2.get(), 0)); in TestUnmapWritableAfterFork() 166 munmap(addr, kPageSize); in TestUnmapWritableAfterFork() [all …]
|
/art/runtime/gc/allocator/ |
D | rosalloc.cc | 52 size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 }; 66 DCHECK_ALIGNED(base, kPageSize); in RosAlloc() 67 DCHECK_EQ(RoundUp(capacity, kPageSize), capacity); in RosAlloc() 68 DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity); in RosAlloc() 70 CHECK_ALIGNED(page_release_size_threshold_, kPageSize); in RosAlloc() 91 size_t num_of_pages = footprint_ / kPageSize; in RosAlloc() 92 size_t max_num_of_pages = max_capacity_ / kPageSize; in RosAlloc() 95 RoundUp(max_num_of_pages, kPageSize), in RosAlloc() 109 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0)); in RosAlloc() 134 const size_t req_byte_size = num_pages * kPageSize; in AllocPages() [all …]
|
D | dlmalloc.cc | 72 start = reinterpret_cast<void*>(art::RoundUp(reinterpret_cast<uintptr_t>(start), art::kPageSize)); in DlmallocMadviseCallback() 73 end = reinterpret_cast<void*>(art::RoundDown(reinterpret_cast<uintptr_t>(end), art::kPageSize)); in DlmallocMadviseCallback()
|
D | rosalloc.h | 59 DCHECK_ALIGNED(byte_size, kPageSize); in ByteSize() 64 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0)); in SetByteSize() 105 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0)); in ReleasePages() 393 return reinterpret_cast<uint8_t*>(this) + kPageSize * numOfPages[size_bracket_idx_]; in End() 613 DCHECK_EQ(byte_offset % static_cast<size_t>(kPageSize), static_cast<size_t>(0)); in ToPageMapIndex() 614 return byte_offset / kPageSize; in ToPageMapIndex() 619 return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize; in RoundDownToPageMapIndex() 875 return RoundUp(bytes, kPageSize); in UsableSize() 914 DCHECK_LT(idx, capacity_ / kPageSize); in IsFreePage()
|
/art/runtime/gc/collector/ |
D | immune_spaces_test.cc | 74 reinterpret_cast<uint8_t*>(kPageSize), in ReserveBitmaps() 75 kPageSize)); in ReserveBitmaps() 191 constexpr size_t kImageSize = 123 * kPageSize; in TEST_F() 192 constexpr size_t kImageOatSize = 321 * kPageSize; in TEST_F() 193 constexpr size_t kOtherSpaceSize = 100 * kPageSize; in TEST_F() 250 constexpr size_t kImage1Size = kPageSize * 17; in TEST_F() 251 constexpr size_t kImage2Size = kPageSize * 13; in TEST_F() 252 constexpr size_t kImage3Size = kPageSize * 3; in TEST_F() 253 constexpr size_t kImage1OatSize = kPageSize * 5; in TEST_F() 254 constexpr size_t kImage2OatSize = kPageSize * 8; in TEST_F() [all …]
|
D | semi_space.cc | 378 if (LIKELY(size <= static_cast<size_t>(kPageSize))) { in CopyAvoidingDirtyingPages() 396 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest; in CopyAvoidingDirtyingPages() 401 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); in CopyAvoidingDirtyingPages() 404 while (byte_src + kPageSize < limit) { in CopyAvoidingDirtyingPages() 408 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) { in CopyAvoidingDirtyingPages() 417 saved_bytes += kPageSize; in CopyAvoidingDirtyingPages() 419 byte_src += kPageSize; in CopyAvoidingDirtyingPages() 420 byte_dest += kPageSize; in CopyAvoidingDirtyingPages()
|
/art/libartbase/arch/ |
D | instruction_set.cc | 96 static_assert(IsAligned<kPageSize>(kArmStackOverflowReservedBytes), "ARM gap not page aligned"); 97 static_assert(IsAligned<kPageSize>(kArm64StackOverflowReservedBytes), "ARM64 gap not page aligned"); 98 static_assert(IsAligned<kPageSize>(kX86StackOverflowReservedBytes), "X86 gap not page aligned"); 99 static_assert(IsAligned<kPageSize>(kX86_64StackOverflowReservedBytes),
|
/art/runtime/ |
D | image.cc | 67 CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize)); in ImageHeader() 68 CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kPageSize)); in ImageHeader() 69 CHECK_EQ(oat_data_begin, RoundUp(oat_data_begin, kPageSize)); in ImageHeader() 81 CHECK_ALIGNED(delta, kPageSize) << "relocation delta must be page aligned"; in RelocateImageReferences() 91 CHECK_ALIGNED(delta, kPageSize) << "relocation delta must be page aligned"; in RelocateBootImageReferences() 105 return image_reservation_size_ == RoundUp(image_size_, kPageSize); in IsAppImage() 123 if (!IsAligned<kPageSize>(image_reservation_size_)) { in IsValid()
|
D | runtime_globals.h | 30 return offset < kPageSize; in CanDoImplicitNullCheckOn() 36 static constexpr size_t kLargeObjectAlignment = kPageSize;
|
D | indirect_reference_table.cc | 84 const size_t table_bytes = RoundUp(max_count * sizeof(IrtEntry), kPageSize); in IndirectReferenceTable() 228 const size_t table_bytes = RoundUp(new_size * sizeof(IrtEntry), kPageSize); in Resize() 459 uint8_t* release_start = AlignUp(reinterpret_cast<uint8_t*>(&table_[top_index]), kPageSize); in Trim() 462 DCHECK_ALIGNED(release_end, kPageSize); in Trim() 463 DCHECK_ALIGNED(release_end - release_start, kPageSize); in Trim()
|
D | oat.cc | 105 if (!IsAligned<kPageSize>(executable_offset_)) { in IsValid() 127 if (!IsAligned<kPageSize>(executable_offset_)) { in GetValidationErrorMessage() 188 DCHECK_ALIGNED(executable_offset_, kPageSize); in GetExecutableOffset() 194 DCHECK_ALIGNED(executable_offset, kPageSize); in SetExecutableOffset()
|
D | thread_pool.cc | 58 stack_size += kPageSize; in ThreadPoolWorker() 65 CHECK_ALIGNED(stack_.Begin(), kPageSize); in ThreadPoolWorker() 69 kPageSize, in ThreadPoolWorker()
|
/art/runtime/gc/space/ |
D | malloc_space.cc | 105 *growth_limit = RoundUp(*growth_limit, kPageSize); in CreateMemMap() 106 *capacity = RoundUp(*capacity, kPageSize); in CreateMemMap() 143 growth_limit = RoundUp(growth_limit, kPageSize); in SetGrowthLimit() 186 SetEnd(reinterpret_cast<uint8_t*>(RoundUp(reinterpret_cast<uintptr_t>(End()), kPageSize))); in CreateZygoteSpace() 189 DCHECK_ALIGNED(begin_, kPageSize); in CreateZygoteSpace() 190 DCHECK_ALIGNED(End(), kPageSize); in CreateZygoteSpace() 191 size_t size = RoundUp(Size(), kPageSize); in CreateZygoteSpace() 203 SetGrowthLimit(RoundUp(size, kPageSize)); in CreateZygoteSpace()
|
/art/dexlayout/ |
D | dexdiag.cc | 189 size_t first_page_of_section = section.offset / kPageSize; in FindSectionTypeForPage() 290 uint64_t start_page = (dex_file_start - vdex_start) / kPageSize; in ProcessOneDexMapping() 291 uint64_t start_address = start_page * kPageSize; in ProcessOneDexMapping() 292 uint64_t end_page = RoundUp(start_address + dex_file_size, kPageSize) / kPageSize; in ProcessOneDexMapping() 296 map_start + start_page * kPageSize, in ProcessOneDexMapping() 297 map_start + end_page * kPageSize) in ProcessOneDexMapping()
|
D | dex_visualize.cc | 73 fprintf(out_file_, "\"%s\" %" PRIuPTR, s.name.c_str(), s.offset / kPageSize); in OpenAndPrintHeader() 101 const uint32_t low_page = from / kPageSize; in DumpAddressRange() 102 const uint32_t high_page = (size > 0) ? (from + size - 1) / kPageSize : low_page; in DumpAddressRange() 339 RoundUp(bytes, kPageSize) / kPageSize, in ShowDexSectionStatistics()
|
/art/libdexfile/dex/ |
D | dex_file_layout.cc | 36 begin = AlignUp(begin, kPageSize); in MadviseLargestPageAlignedRegion() 37 end = AlignDown(end, kPageSize); in MadviseLargestPageAlignedRegion()
|
/art/dex2oat/ |
D | common_compiler_driver_test.cc | 114 inaccessible_page_ = mmap(nullptr, kPageSize, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); in SetUp() 120 munmap(inaccessible_page_, kPageSize); in TearDown()
|
/art/libelffile/elf/ |
D | elf_builder.h | 205 header_.sh_addralign = kPageSize; // Page-align if R/W/X flags changed. in AddSection() 463 rodata_(this, ".rodata", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0), in ElfBuilder() 464 text_(this, ".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR, nullptr, 0, kPageSize, 0), in ElfBuilder() 466 this, ".data.bimg.rel.ro", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0), in ElfBuilder() 467 bss_(this, ".bss", SHT_NOBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0), in ElfBuilder() 468 dex_(this, ".dex", SHT_NOBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0), in ElfBuilder() 469 dynstr_(this, ".dynstr", SHF_ALLOC, kPageSize), in ElfBuilder() 472 dynamic_(this, ".dynamic", SHT_DYNAMIC, SHF_ALLOC, &dynstr_, 0, kPageSize, sizeof(Elf_Dyn)), in ElfBuilder() 543 CHECK(loaded_size_ == 0 || loaded_size_ == RoundUp(virtual_address_, kPageSize)) in End() 618 DCHECK_LE(section->header_.sh_offset, end + kPageSize) << "Large gap between sections"; in Strip() [all …]
|
D | xz_utils.cc | 112 dst->resize(RoundUp(dst_offset + kPageSize / 4, kPageSize)); in XzDecompress()
|
/art/runtime/base/ |
D | mem_map_arena_pool.cc | 49 static_assert(ArenaAllocator::kArenaAlignment <= kPageSize, in MemMapArena() 58 size = RoundUp(size, kPageSize); in Allocate()
|
/art/imgdiag/ |
D | imgdiag.cc | 282 current_page_idx = entry_address / kPageSize + page_off; in IsEntryOnDirtyPage() 288 } while ((current_page_idx * kPageSize) < RoundUp(entry_address + size, kObjectAlignment)); in IsEntryOnDirtyPage() 987 RegionCommon<T>::GetDirtyEntryBytes() * 1.0f / (mapping_data.dirty_pages * kPageSize); in ProcessRegion() 1226 const uint8_t* image_end = AlignUp(image_begin + image_header.GetImageSize(), kPageSize); in Init() 1227 size_t virtual_page_idx_begin = reinterpret_cast<uintptr_t>(image_begin) / kPageSize; in Init() 1228 size_t virtual_page_idx_end = reinterpret_cast<uintptr_t>(image_end) / kPageSize; in Init() 1328 for (uintptr_t begin = boot_map.start; begin != boot_map.end; begin += kPageSize) { in ComputeDirtyBytes() 1337 if (memcmp(local_ptr, remote_ptr, kPageSize) != 0) { in ComputeDirtyBytes() 1341 for (size_t i = 0; i < kPageSize / sizeof(uint32_t); ++i) { in ComputeDirtyBytes() 1366 virtual_page_idx = reinterpret_cast<uintptr_t>(local_ptr) / kPageSize; in ComputeDirtyBytes() [all …]
|