/tools/testing/selftests/vm/ |
D | mlock2-tests.c | 286 unsigned long page_size = getpagesize(); in lock_check() local 290 page2_flags = get_pageflags((unsigned long)map + page_size); in lock_check() 314 if (!is_vmflag_set((unsigned long)map + page_size, LOCKED)) { in lock_check() 324 unsigned long page_size = getpagesize(); in unlock_lock_check() local 328 page2_flags = get_pageflags((unsigned long)map + page_size); in unlock_lock_check() 342 if (is_vmflag_set((unsigned long)map + page_size, LOCKED)) { in unlock_lock_check() 354 unsigned long page_size = getpagesize(); in test_mlock_lock() local 356 map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE, in test_mlock_lock() 363 if (mlock2_(map, 2 * page_size, 0)) { in test_mlock_lock() 376 if (munlock(map, 2 * page_size)) { in test_mlock_lock() [all …]
|
D | userfaultfd.c | 71 static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; variable 87 ((pthread_mutex_t *) ((___area) + (___nr)*page_size)) 94 ((___area) + (___nr)*page_size + \ 167 if (!my_bcmp(area_dst + page_nr * page_size, zeropage, in locking_thread() 168 page_size)) in locking_thread() 179 while (!bcmp(area_dst + page_nr * page_size, zeropage, in locking_thread() 180 page_size)) { in locking_thread() 190 page_nr, cpu, area_dst + page_nr * page_size, in locking_thread() 224 if (offset >= nr_pages * page_size) in copy_page() 229 uffdio_copy.len = page_size; in copy_page() [all …]
|
D | compaction_test.c | 159 size_t page_size, i; in main() local 179 page_size = getpagesize(); in main() 208 for (i = 0; i < MAP_SIZE; i += page_size) in main()
|
/tools/testing/selftests/powerpc/primitives/ |
D | load_unaligned_zeropad.c | 42 static int page_size; variable 47 if (mprotect(mem_region + page_size, page_size, PROT_NONE)) { in protect_region() 57 if (mprotect(mem_region + page_size, page_size, PROT_READ|PROT_WRITE)) { in unprotect_region() 132 page_size = getpagesize(); in test_body() 133 mem_region = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE, in test_body() 138 for (i = 0; i < page_size; i++) in test_body() 141 memset(mem_region+page_size, 0, page_size); in test_body() 145 for (i = 0; i < page_size; i++) in test_body()
|
/tools/power/acpi/os_specific/service_layers/ |
D | osunixmap.c | 101 acpi_size page_size; in acpi_os_map_memory() local 112 page_size = acpi_os_get_page_size(); in acpi_os_map_memory() 113 offset = where % page_size; in acpi_os_map_memory() 146 acpi_size page_size; in acpi_os_unmap_memory() local 148 page_size = acpi_os_get_page_size(); in acpi_os_unmap_memory() 149 offset = ACPI_TO_INTEGER(where) % page_size; in acpi_os_unmap_memory()
|
/tools/perf/arch/x86/util/ |
D | intel-bts.c | 155 opts->auxtrace_mmap_pages = MiB(4) / page_size; in intel_bts_recording_options() 157 opts->auxtrace_mmap_pages = KiB(128) / page_size; in intel_bts_recording_options() 159 opts->mmap_pages = KiB(256) / page_size; in intel_bts_recording_options() 163 opts->mmap_pages = KiB(256) / page_size; in intel_bts_recording_options() 167 opts->auxtrace_mmap_pages * (size_t)page_size; in intel_bts_recording_options() 171 sz = round_up(sz, page_size) / page_size; in intel_bts_recording_options() 175 opts->auxtrace_mmap_pages * (size_t)page_size) { in intel_bts_recording_options() 178 opts->auxtrace_mmap_pages * (size_t)page_size); in intel_bts_recording_options() 192 opts->auxtrace_mmap_pages = MiB(4) / page_size; in intel_bts_recording_options() 194 opts->auxtrace_mmap_pages = KiB(128) / page_size; in intel_bts_recording_options() [all …]
|
D | intel-pt.c | 559 opts->auxtrace_mmap_pages = MiB(4) / page_size; in intel_pt_recording_options() 561 opts->auxtrace_mmap_pages = KiB(128) / page_size; in intel_pt_recording_options() 563 opts->mmap_pages = KiB(256) / page_size; in intel_pt_recording_options() 567 opts->mmap_pages = KiB(256) / page_size; in intel_pt_recording_options() 571 opts->auxtrace_mmap_pages * (size_t)page_size; in intel_pt_recording_options() 575 sz = round_up(sz, page_size) / page_size; in intel_pt_recording_options() 579 opts->auxtrace_mmap_pages * (size_t)page_size) { in intel_pt_recording_options() 582 opts->auxtrace_mmap_pages * (size_t)page_size); in intel_pt_recording_options() 601 opts->auxtrace_mmap_pages = MiB(4) / page_size; in intel_pt_recording_options() 603 opts->auxtrace_mmap_pages = KiB(128) / page_size; in intel_pt_recording_options() [all …]
|
/tools/testing/selftests/futex/functional/ |
D | futex_wait_uninitialized_heap.c | 72 long page_size; in main() local 92 page_size = sysconf(_SC_PAGESIZE); in main() 94 buf = mmap(NULL, page_size, PROT_READ|PROT_WRITE, in main()
|
/tools/perf/tests/ |
D | mmap-thread-lookup.c | 32 map = mmap(NULL, page_size, in thread_init() 69 munmap(td->map, page_size); in thread_fn() 116 munmap(td0->map, page_size); in threads_destroy()
|
D | vmlinux-kallsyms.c | 140 if (llabs(skew) >= page_size) in test__vmlinux_matches_kallsyms()
|
/tools/vm/ |
D | page-types.c | 178 static int page_size; variable 218 return (pages * page_size) >> 20; in pages2mb() 799 pg_start[nr_vmas] = vm_start / page_size; in parse_pid() 800 pg_end[nr_vmas] = vm_end / page_size; in parse_pid() 817 size, (size + page_size - 1) / page_size); in show_file() 858 nr_pages = (end - off + page_size - 1) / page_size; in walk_file() 861 len = nr_pages * page_size; in walk_file() 885 (void)*(volatile int *)(ptr + i * page_size); in walk_file() 893 if (pagemap_read(buf, (unsigned long)ptr / page_size, in walk_file() 910 add_page(off / page_size + i, pfn, flags, buf[i]); in walk_file() [all …]
|
D | slabinfo.c | 94 int page_size; variable 339 return s->slabs * (page_size << s->order); in slab_size() 540 s->slabs * (page_size << s->order)); in report() 545 page_size << s->order, s->partial, onoff(s->poison), in report() 546 s->slabs * (page_size << s->order) - s->objects * s->object_size); in report() 552 ((page_size << s->order) - s->objs_per_slab * s->slab_size) * in report() 632 (s->slabs * (page_size << s->order)) : 100, in slabcache() 1358 page_size = getpagesize(); in main()
|
/tools/perf/arch/x86/tests/ |
D | rdpmc.c | 117 addr = mmap(NULL, page_size, PROT_READ, MAP_SHARED, fd, 0); in __test__rdpmc() 141 munmap(addr, page_size); in __test__rdpmc()
|
D | intel-cqm.c | 102 mmap_len = page_size * 65; in test__intel_cqm_count_nmi_context()
|
/tools/perf/util/ |
D | symbol-elf.c | 1211 char *buf = malloc(page_size); in copy_bytes() 1223 n = page_size; in copy_bytes() 1367 .p_align = page_size, in kcore__add_phdr() 1523 kci->stext = round_down(kci->stext, page_size); in kcore_copy__calc_maps() 1525 kci->stext = round_down(kci->first_symbol, page_size); in kcore_copy__calc_maps() 1528 kci->etext = round_up(kci->etext, page_size); in kcore_copy__calc_maps() 1530 kci->etext = round_up(kci->last_symbol, page_size); in kcore_copy__calc_maps() 1531 kci->etext += page_size; in kcore_copy__calc_maps() 1538 kci->first_module = round_down(kci->first_module, page_size); in kcore_copy__calc_maps() 1542 page_size); in kcore_copy__calc_maps() [all …]
|
D | evlist.c | 717 unsigned char *data = md->base + page_size; in perf_evlist__mmap_read() 1032 max -= (page_size / 1024); in perf_evlist__mmap_size() 1035 pages = (max * 1024) / page_size; in perf_evlist__mmap_size() 1041 return (pages + 1) * page_size; in perf_evlist__mmap_size() 1062 pages = PERF_ALIGN(val, page_size) / page_size; in parse_pages_arg() 1079 pages * page_size, pages); in parse_pages_arg() 1093 if (max > SIZE_MAX / page_size) in __perf_evlist__parse_mmap_pages() 1094 max = SIZE_MAX / page_size; in __perf_evlist__parse_mmap_pages() 1149 mp.mask = evlist->mmap_len - page_size - 1; in perf_evlist__mmap_ex()
|
D | util.c | 30 unsigned int page_size; variable 149 pgoff = off_in & ~(page_size - 1); in copyfile_offset()
|
D | util.h | 281 extern unsigned int page_size;
|
D | trace-event-info.c | 477 if (write(output_fd, &page_size, 4) != 4) in tracing_data_header()
|
D | session.c | 1587 page_offset = page_size * (data_offset / page_size); in __perf_session__process_events() 1638 page_offset = page_size * (head / page_size); in __perf_session__process_events()
|
D | auxtrace.c | 107 mp->len = auxtrace_pages * (size_t)page_size; in auxtrace_mmap_params__init() 774 size_t adj = buffer->data_offset & (page_size - 1); in auxtrace_buffer__get_data()
|
/tools/lib/traceevent/ |
D | event-parse.h | 482 int page_size; member 739 return pevent->page_size; in pevent_get_page_size() 744 pevent->page_size = _page_size; in pevent_set_page_size()
|
/tools/testing/selftests/seccomp/ |
D | seccomp_bpf.c | 495 int page_size = sysconf(_SC_PAGESIZE); in TEST_SIGNAL() local 497 ASSERT_LT(0, page_size); in TEST_SIGNAL() 510 NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size); in TEST_SIGNAL() 514 NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); in TEST_SIGNAL() 518 munmap(map1, page_size); in TEST_SIGNAL() 519 munmap(map2, page_size); in TEST_SIGNAL()
|
/tools/perf/ |
D | perf.c | 532 page_size = sysconf(_SC_PAGE_SIZE); in main()
|
/tools/perf/Documentation/ |
D | intel-pt.txt | 409 The default auxtrace mmap size for Intel PT is 4MiB/page_size for privileged users 412 reduced from the default 512KiB/page_size to 256KiB/page_size, otherwise the
|