/art/runtime/base/ |
D | safe_copy_test.cc | 33 DCHECK_EQ(kPageSize, static_cast<decltype(kPageSize)>(PAGE_SIZE)); in TEST() 36 void* map = mmap(nullptr, kPageSize * 4, PROT_READ | PROT_WRITE, in TEST() 40 char* page2 = page1 + kPageSize; in TEST() 41 char* page3 = page2 + kPageSize; in TEST() 42 char* page4 = page3 + kPageSize; in TEST() 43 ASSERT_EQ(0, mprotect(page1 + kPageSize, kPageSize, PROT_NONE)); in TEST() 44 ASSERT_EQ(0, munmap(page4, kPageSize)); in TEST() 47 page1[kPageSize - 1] = 'z'; in TEST() 50 page3[kPageSize - 1] = 'y'; in TEST() 52 char buf[kPageSize]; in TEST() [all …]
|
/art/runtime/gc/allocator/ |
D | rosalloc.cc | 51 size_t RosAlloc::dedicated_full_run_storage_[kPageSize / sizeof(size_t)] = { 0 }; 65 DCHECK_ALIGNED(base, kPageSize); in RosAlloc() 66 DCHECK_EQ(RoundUp(capacity, kPageSize), capacity); in RosAlloc() 67 DCHECK_EQ(RoundUp(max_capacity, kPageSize), max_capacity); in RosAlloc() 69 CHECK_ALIGNED(page_release_size_threshold_, kPageSize); in RosAlloc() 90 size_t num_of_pages = footprint_ / kPageSize; in RosAlloc() 91 size_t max_num_of_pages = max_capacity_ / kPageSize; in RosAlloc() 94 RoundUp(max_num_of_pages, kPageSize), in RosAlloc() 106 DCHECK_EQ(capacity_ % kPageSize, static_cast<size_t>(0)); in RosAlloc() 131 const size_t req_byte_size = num_pages * kPageSize; in AllocPages() [all …]
|
D | dlmalloc.cc | 68 start = reinterpret_cast<void*>(art::RoundUp(reinterpret_cast<uintptr_t>(start), art::kPageSize)); in DlmallocMadviseCallback() 69 end = reinterpret_cast<void*>(art::RoundDown(reinterpret_cast<uintptr_t>(end), art::kPageSize)); in DlmallocMadviseCallback()
|
D | rosalloc.h | 59 DCHECK_ALIGNED(byte_size, kPageSize); in ByteSize() 64 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0)); in SetByteSize() 105 DCHECK_EQ(byte_size % kPageSize, static_cast<size_t>(0)); in ReleasePages() 392 return reinterpret_cast<uint8_t*>(this) + kPageSize * numOfPages[size_bracket_idx_]; in End() 612 DCHECK_EQ(byte_offset % static_cast<size_t>(kPageSize), static_cast<size_t>(0)); in ToPageMapIndex() 613 return byte_offset / kPageSize; in ToPageMapIndex() 618 return (reinterpret_cast<uintptr_t>(addr) - reinterpret_cast<uintptr_t>(base_)) / kPageSize; in RoundDownToPageMapIndex() 874 return RoundUp(bytes, kPageSize); in UsableSize() 913 DCHECK_LT(idx, capacity_ / kPageSize); in IsFreePage()
|
/art/runtime/arch/ |
D | instruction_set.cc | 146 static_assert(IsAligned<kPageSize>(kArmStackOverflowReservedBytes), "ARM gap not page aligned"); 147 static_assert(IsAligned<kPageSize>(kArm64StackOverflowReservedBytes), "ARM64 gap not page aligned"); 148 static_assert(IsAligned<kPageSize>(kMipsStackOverflowReservedBytes), "Mips gap not page aligned"); 149 static_assert(IsAligned<kPageSize>(kMips64StackOverflowReservedBytes), 151 static_assert(IsAligned<kPageSize>(kX86StackOverflowReservedBytes), "X86 gap not page aligned"); 152 static_assert(IsAligned<kPageSize>(kX86_64StackOverflowReservedBytes),
|
/art/runtime/gc/collector/ |
D | immune_spaces_test.cc | 73 reinterpret_cast<uint8_t*>(kPageSize), in ReserveBitmaps() 74 kPageSize)); in ReserveBitmaps() 210 constexpr size_t kImageSize = 123 * kPageSize; in TEST_F() 211 constexpr size_t kImageOatSize = 321 * kPageSize; in TEST_F() 212 constexpr size_t kOtherSpaceSize= 100 * kPageSize; in TEST_F() 257 constexpr size_t kImage1Size = kPageSize * 17; in TEST_F() 258 constexpr size_t kImage2Size = kPageSize * 13; in TEST_F() 259 constexpr size_t kImage3Size = kPageSize * 3; in TEST_F() 260 constexpr size_t kImage1OatSize = kPageSize * 5; in TEST_F() 261 constexpr size_t kImage2OatSize = kPageSize * 8; in TEST_F() [all …]
|
D | semi_space.cc | 465 if (LIKELY(size <= static_cast<size_t>(kPageSize))) { in CopyAvoidingDirtyingPages() 483 size_t page_remain = AlignUp(byte_dest, kPageSize) - byte_dest; in CopyAvoidingDirtyingPages() 488 DCHECK_ALIGNED(reinterpret_cast<uintptr_t>(byte_dest), kPageSize); in CopyAvoidingDirtyingPages() 491 while (byte_src + kPageSize < limit) { in CopyAvoidingDirtyingPages() 495 for (size_t i = 0; i < kPageSize / sizeof(*word_src); ++i) { in CopyAvoidingDirtyingPages() 504 saved_bytes += kPageSize; in CopyAvoidingDirtyingPages() 506 byte_src += kPageSize; in CopyAvoidingDirtyingPages() 507 byte_dest += kPageSize; in CopyAvoidingDirtyingPages()
|
D | semi_space-inl.h | 67 CHECK_ALIGNED(ref, kPageSize); in MarkObject()
|
D | immune_spaces.cc | 52 space_end = RoundUp(reinterpret_cast<uintptr_t>(image_space->GetImageEnd()), kPageSize); in CreateLargestImmuneRegion()
|
/art/runtime/ |
D | mem_map_test.cc | 56 const size_t page_size = static_cast<size_t>(kPageSize); in RemapAtEndTest() 134 uintptr_t random_start = CreateStartPos(i * kPageSize); in TEST_F() 160 kPageSize, in TEST_F() 173 reinterpret_cast<uint8_t*>(kPageSize), in TEST_F() 188 kPageSize, in TEST_F() 201 constexpr size_t kMapSize = kPageSize; in TEST_F() 223 uint8_t* valid_address = GetValidMapAddress(kPageSize, /*low_4gb*/false); in TEST_F() 227 kPageSize, in TEST_F() 238 kPageSize, in TEST_F() 249 kPageSize, in TEST_F() [all …]
|
D | mem_map.cc | 123 constexpr uintptr_t mask = mask_ones & ~(kPageSize - 1); in CreateStartPos() 304 size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize); in MapAnonymous() 391 const size_t page_aligned_byte_count = RoundUp(byte_count, kPageSize); in MapDummy() 428 int page_offset = start % kPageSize; in MapFileAtAddress() 431 size_t page_aligned_byte_count = RoundUp(byte_count + page_offset, kPageSize); in MapFileAtAddress() 439 redzone_size = kPageSize; in MapFileAtAddress() 545 DCHECK_ALIGNED(begin_, kPageSize); in RemapAtEnd() 546 DCHECK_ALIGNED(base_begin_, kPageSize); in RemapAtEnd() 547 DCHECK_ALIGNED(reinterpret_cast<uint8_t*>(base_begin_) + base_size_, kPageSize); in RemapAtEnd() 548 DCHECK_ALIGNED(new_end, kPageSize); in RemapAtEnd() [all …]
|
D | globals.h | 41 static constexpr int kPageSize = 4096; variable 46 return offset < kPageSize; in CanDoImplicitNullCheckOn() 52 static constexpr size_t kLargeObjectAlignment = kPageSize;
|
D | image.cc | 67 CHECK_EQ(image_begin, RoundUp(image_begin, kPageSize)); in ImageHeader() 68 CHECK_EQ(oat_file_begin, RoundUp(oat_file_begin, kPageSize)); in ImageHeader() 69 CHECK_EQ(oat_data_begin, RoundUp(oat_data_begin, kPageSize)); in ImageHeader() 81 CHECK_ALIGNED(delta, kPageSize) << " patch delta must be page aligned"; in RelocateImage() 122 if (!IsAligned<kPageSize>(patch_delta_)) { in IsValid()
|
D | oat.cc | 110 if (!IsAligned<kPageSize>(executable_offset_)) { in IsValid() 113 if (!IsAligned<kPageSize>(image_patch_delta_)) { in IsValid() 135 if (!IsAligned<kPageSize>(executable_offset_)) { in GetValidationErrorMessage() 138 if (!IsAligned<kPageSize>(image_patch_delta_)) { in GetValidationErrorMessage() 224 DCHECK_ALIGNED(executable_offset_, kPageSize); in GetExecutableOffset() 230 DCHECK_ALIGNED(executable_offset, kPageSize); in SetExecutableOffset() 372 CHECK_ALIGNED(delta, kPageSize); in RelocateOat() 381 CHECK_ALIGNED(off, kPageSize); in SetImagePatchDelta() 402 CHECK_ALIGNED(image_file_location_oat_data_begin, kPageSize); in SetImageFileLocationOatDataBegin()
|
D | thread_pool.cc | 46 stack_size += kPageSize; in ThreadPoolWorker() 51 CHECK_ALIGNED(stack_->Begin(), kPageSize); in ThreadPoolWorker() 52 int mprotect_result = mprotect(stack_->Begin(), kPageSize, PROT_NONE); in ThreadPoolWorker()
|
D | monitor_pool.h | 187 static constexpr size_t kChunkCapacity = kPageSize / kAlignedMonitorSize; 190 static constexpr size_t kChunkSize = kPageSize;
|
/art/runtime/gc/space/ |
D | malloc_space.cc | 92 *growth_limit = RoundUp(*growth_limit, kPageSize); in CreateMemMap() 93 *capacity = RoundUp(*capacity, kPageSize); in CreateMemMap() 127 growth_limit = RoundUp(growth_limit, kPageSize); in SetGrowthLimit() 170 SetEnd(reinterpret_cast<uint8_t*>(RoundUp(reinterpret_cast<uintptr_t>(End()), kPageSize))); in CreateZygoteSpace() 173 DCHECK_ALIGNED(begin_, kPageSize); in CreateZygoteSpace() 174 DCHECK_ALIGNED(End(), kPageSize); in CreateZygoteSpace() 175 size_t size = RoundUp(Size(), kPageSize); in CreateZygoteSpace() 187 SetGrowthLimit(RoundUp(size, kPageSize)); in CreateZygoteSpace()
|
/art/compiler/ |
D | elf_builder.h | 136 header_.sh_addralign = kPageSize; in Start() 513 rodata_(this, ".rodata", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0), in ElfBuilder() 514 text_(this, ".text", SHT_PROGBITS, SHF_ALLOC | SHF_EXECINSTR, nullptr, 0, kPageSize, 0), in ElfBuilder() 515 bss_(this, ".bss", SHT_NOBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0), in ElfBuilder() 516 dynstr_(this, ".dynstr", SHF_ALLOC, kPageSize), in ElfBuilder() 519 dynamic_(this, ".dynamic", SHT_DYNAMIC, SHF_ALLOC, &dynstr_, 0, kPageSize, sizeof(Elf_Dyn)), in ElfBuilder() 520 eh_frame_(this, ".eh_frame", SHT_PROGBITS, SHF_ALLOC, nullptr, 0, kPageSize, 0), in ElfBuilder() 528 abiflags_(this, ".MIPS.abiflags", SHT_MIPS_ABIFLAGS, SHF_ALLOC, nullptr, 0, kPageSize, 0, in ElfBuilder() 612 CHECK(loaded_size_ == 0 || loaded_size_ == RoundUp(virtual_address_, kPageSize)) in End() 682 DCHECK_EQ(rodata_.header_.sh_addralign, static_cast<Elf_Word>(kPageSize)); in PrepareDynamicSection() [all …]
|
/art/dexlayout/ |
D | dexdiag.cc | 179 size_t first_page_of_section = section.offset / kPageSize; in FindSectionTypeForPage() 280 uint64_t start_page = (dex_file_start - vdex_start) / kPageSize; in ProcessOneDexMapping() 281 uint64_t start_address = start_page * kPageSize; in ProcessOneDexMapping() 282 uint64_t end_page = RoundUp(start_address + dex_file_size, kPageSize) / kPageSize; in ProcessOneDexMapping() 286 map_start + start_page * kPageSize, in ProcessOneDexMapping() 287 map_start + end_page * kPageSize) in ProcessOneDexMapping()
|
D | dex_visualize.cc | 71 fprintf(out_file_, "\"%s\" %d", s.name.c_str(), s.offset / kPageSize); in OpenAndPrintHeader() 99 const uint32_t low_page = from / kPageSize; in DumpAddressRange() 100 const uint32_t high_page = (size > 0) ? (from + size - 1) / kPageSize : low_page; in DumpAddressRange() 341 RoundUp(bytes, kPageSize) / kPageSize, in ShowDexSectionStatistics()
|
/art/compiler/linker/ |
D | multi_oat_relative_patcher.cc | 39 DCHECK_ALIGNED(adjustment, kPageSize); in StartOatFile()
|
/art/compiler/jit/ |
D | jit_logger.cc | 215 marker_address_ = mmap(nullptr, kPageSize, PROT_READ | PROT_EXEC, MAP_PRIVATE, fd, 0); in OpenMarkerFile() 224 munmap(marker_address_, kPageSize); in CloseMarkerFile()
|
/art/compiler/utils/ |
D | swap_space.cc | 164 size_t next_part = std::max(RoundUp(min_size, kPageSize), RoundUp(kMininumMapSize, kPageSize)); in NewFileChunk()
|
/art/runtime/gc/accounting/ |
D | space_bitmap_test.cc | 120 for (uintptr_t i = range.second; i < range.second + kPageSize; i += kObjectAlignment) { in TEST_F() 207 RunTest<kPageSize>(); in TEST_F()
|
D | bitmap.cc | 48 RoundUp(num_bits, kBitsPerBitmapWord) / kBitsPerBitmapWord * sizeof(uintptr_t), kPageSize); in AllocateMemMap()
|