Home
last modified time | relevance | path

Searched refs:page_size (Results 1 – 25 of 67) sorted by relevance

123

/tools/testing/selftests/vm/
Dmremap_dontunmap.c22 unsigned long page_size; variable
49 void *source_mapping = mmap(NULL, num_pages * page_size, PROT_NONE, in kernel_support_for_mremap_dontunmap()
56 mremap(source_mapping, num_pages * page_size, num_pages * page_size, in kernel_support_for_mremap_dontunmap()
61 BUG_ON(munmap(dest_mapping, num_pages * page_size) == -1, in kernel_support_for_mremap_dontunmap()
65 BUG_ON(munmap(source_mapping, num_pages * page_size) == -1, in kernel_support_for_mremap_dontunmap()
74 BUG_ON(size & (page_size - 1), in check_region_contains_byte()
76 BUG_ON((unsigned long)addr & (page_size - 1), in check_region_contains_byte()
79 memset(page_buffer, byte, page_size); in check_region_contains_byte()
81 unsigned long num_pages = size / page_size; in check_region_contains_byte()
87 memcmp(addr + (i * page_size), page_buffer, page_size); in check_region_contains_byte()
[all …]
Dmlock2-tests.c195 unsigned long page_size = getpagesize(); in test_mlock_lock() local
197 map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE, in test_mlock_lock()
204 if (mlock2_(map, 2 * page_size, 0)) { in test_mlock_lock()
217 if (munlock(map, 2 * page_size)) { in test_mlock_lock()
225 munmap(map, 2 * page_size); in test_mlock_lock()
243 unsigned long page_size = getpagesize(); in unlock_onfault_check() local
246 is_vma_lock_on_fault((unsigned long)map + page_size)) { in unlock_onfault_check()
258 unsigned long page_size = getpagesize(); in test_mlock_onfault() local
260 map = mmap(NULL, 2 * page_size, PROT_READ | PROT_WRITE, in test_mlock_onfault()
267 if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) { in test_mlock_onfault()
[all …]
Dmap_fixed_noreplace.c50 unsigned long flags, addr, size, page_size; in main() local
53 page_size = sysconf(_SC_PAGE_SIZE); in main()
56 size = 5 * page_size; in main()
68 size = 5 * page_size; in main()
80 if (munmap((void *)addr, 5 * page_size) != 0) { in main()
88 addr = base_addr + page_size; in main()
89 size = 3 * page_size; in main()
109 size = 5 * page_size; in main()
129 addr = base_addr + (2 * page_size); in main()
130 size = page_size; in main()
[all …]
Dkhugepaged.c20 static unsigned long page_size; variable
459 for (i = start / page_size; i < end / page_size; i++) in fill_memory()
460 p[i * page_size / sizeof(*p)] = i + 0xdead0000; in fill_memory()
467 for (i = start / page_size; i < end / page_size; i++) { in validate_memory()
468 if (p[i * page_size / sizeof(*p)] != i + 0xdead0000) { in validate_memory()
470 i, p[i * page_size / sizeof(*p)]); in validate_memory()
526 madvise(p, page_size, MADV_DONTNEED); in alloc_at_fault()
570 fill_memory(p, 0, page_size); in collapse_single_pte_entry()
577 validate_memory(p, 0, page_size); in collapse_single_pte_entry()
592 fill_memory(p, 0, (hpage_pmd_nr - max_ptes_none - 1) * page_size); in collapse_max_ptes_none()
[all …]
Duserfaultfd.c64 static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size; variable
107 ((pthread_mutex_t *) ((___area) + (___nr)*page_size))
114 ((___area) + (___nr)*page_size + \
206 if (madvise(rel_area, nr_pages * page_size, MADV_DONTNEED)) in anon_release_pages()
212 *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, in anon_allocate_area()
225 rel_area == huge_fd_off0 ? 0 : nr_pages * page_size, in hugetlb_release_pages()
226 nr_pages * page_size)) in hugetlb_release_pages()
235 *alloc_area = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, in hugetlb_allocate_area()
239 nr_pages * page_size); in hugetlb_allocate_area()
244 area_alias = mmap(NULL, nr_pages * page_size, PROT_READ | PROT_WRITE, in hugetlb_allocate_area()
[all …]
Dhmm-tests.c55 unsigned int page_size; in FIXTURE() local
63 unsigned int page_size; in FIXTURE() local
82 self->page_size = sysconf(_SC_PAGE_SIZE); in FIXTURE_SETUP()
83 self->page_shift = ffs(self->page_size) - 1; in FIXTURE_SETUP()
91 self->page_size = sysconf(_SC_PAGE_SIZE); in FIXTURE_SETUP()
92 self->page_shift = ffs(self->page_size) - 1; in FIXTURE_SETUP()
230 npages = ALIGN(HMM_BUFFER_SIZE, self->page_size) >> self->page_shift; in TEST_F()
252 i = 2 * self->page_size / sizeof(*ptr); in TEST_F()
261 val = *(int *)(buffer->ptr + self->page_size); in TEST_F()
272 for (i = 0; i < 2 * self->page_size / sizeof(*ptr); ++i) in TEST_F()
[all …]
Dmlock-random-test.c144 int page_size = 0; in test_mlock_within_limit() local
184 page_size = get_proc_page_size((unsigned long)p); in test_mlock_within_limit()
185 if (page_size == 0) { in test_mlock_within_limit()
190 if (locked_vm_size > PAGE_ALIGN(alloc_size, page_size) + page_size) { in test_mlock_within_limit()
/tools/testing/selftests/mincore/
Dmincore_selftest.c34 int page_size; in TEST() local
38 page_size = sysconf(_SC_PAGESIZE); in TEST()
46 retval = mincore(NULL, page_size, vec); in TEST()
51 addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, in TEST()
59 retval = mincore(addr + 1, page_size, vec); in TEST()
71 retval = mincore(addr, page_size, NULL); in TEST()
74 munmap(addr, page_size); in TEST()
89 int page_size; in TEST() local
91 page_size = sysconf(_SC_PAGESIZE); in TEST()
95 addr = mmap(NULL, page_size, PROT_READ | PROT_WRITE, in TEST()
[all …]
/tools/testing/selftests/powerpc/primitives/
Dload_unaligned_zeropad.c38 static int page_size; variable
43 if (mprotect(mem_region + page_size, page_size, PROT_NONE)) { in protect_region()
53 if (mprotect(mem_region + page_size, page_size, PROT_READ|PROT_WRITE)) { in unprotect_region()
125 page_size = getpagesize(); in test_body()
126 mem_region = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE, in test_body()
131 for (i = 0; i < page_size; i++) in test_body()
134 memset(mem_region+page_size, 0, page_size); in test_body()
138 for (i = 0; i < page_size; i++) in test_body()
/tools/testing/selftests/bpf/prog_tests/
Dmmap.c12 long page_size = sysconf(_SC_PAGE_SIZE); in roundup_page() local
13 return (sz + page_size - 1) / page_size * page_size; in roundup_page()
21 const long page_size = sysconf(_SC_PAGE_SIZE); in test_mmap() local
186 tmp0 = mmap(NULL, 4 * page_size, PROT_READ, MAP_SHARED | MAP_ANONYMOUS, in test_mmap()
192 tmp1 = mmap(tmp0, 3 * page_size, PROT_READ, MAP_SHARED | MAP_FIXED, in test_mmap()
195 munmap(tmp0, 4 * page_size); in test_mmap()
200 err = munmap(tmp1 + page_size, page_size); in test_mmap()
202 munmap(tmp1, 4 * page_size); in test_mmap()
207 tmp2 = mmap(tmp1 + page_size, page_size, PROT_READ, in test_mmap()
210 munmap(tmp1, page_size); in test_mmap()
[all …]
/tools/power/acpi/os_specific/service_layers/
Dosunixmap.c67 acpi_size page_size; in acpi_os_map_memory() local
78 page_size = acpi_os_get_page_size(); in acpi_os_map_memory()
79 offset = where % page_size; in acpi_os_map_memory()
112 acpi_size page_size; in acpi_os_unmap_memory() local
114 page_size = acpi_os_get_page_size(); in acpi_os_unmap_memory()
115 offset = ACPI_TO_INTEGER(where) % page_size; in acpi_os_unmap_memory()
/tools/testing/selftests/powerpc/copyloops/
Dexc_validate.c81 int page_size; in test_copy_exception() local
85 page_size = getpagesize(); in test_copy_exception()
86 p = mmap(NULL, page_size * 2, PROT_READ|PROT_WRITE, in test_copy_exception()
94 memset(p, 0, page_size); in test_copy_exception()
98 if (mprotect(p + page_size, page_size, PROT_NONE)) { in test_copy_exception()
103 q = p + page_size - MAX_LEN; in test_copy_exception()
/tools/testing/selftests/kvm/lib/s390x/
Dprocessor.c22 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in virt_pgd_alloc()
23 vm->page_size); in virt_pgd_alloc()
30 memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_pgd_alloc()
47 memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size); in virt_alloc_region()
60 TEST_ASSERT((gva % vm->page_size) == 0, in virt_pg_map()
63 gva, vm->page_size); in virt_pg_map()
68 TEST_ASSERT((gpa % vm->page_size) == 0, in virt_pg_map()
71 gva, vm->page_size); in virt_pg_map()
75 gva, vm->max_gfn, vm->page_size); in virt_pg_map()
99 TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x", in addr_gva2gpa()
[all …]
/tools/testing/selftests/powerpc/mm/
Dstack_expansion_ldst.c157 static void test_one_type(enum access_type type, unsigned long page_size, unsigned long rlim_cur) in test_one_type() argument
162 for (delta = page_size; delta <= rlim_cur; delta += page_size) in test_one_type()
173 unsigned long page_size; in test() local
176 page_size = getpagesize(); in test()
181 test_one_type(LOAD, page_size, rlimit.rlim_cur); in test()
183 test_one_type(STORE, page_size, rlimit.rlim_cur); in test()
Dbad_accesses.c69 unsigned long i, j, addr, region_shift, page_shift, page_size; in test() local
82 page_size = sysconf(_SC_PAGESIZE); in test()
83 if (page_size == (64 * 1024)) in test()
88 if (page_size == (64 * 1024) || !hash_mmu) { in test()
93 } else if (page_size == (4 * 1024) && hash_mmu) { in test()
Dlarge_vm_fork_separation.c27 unsigned long page_size; in test() local
30 page_size = sysconf(_SC_PAGESIZE); in test()
31 SKIP_IF(page_size != 65536); in test()
34 p = mmap((void *)(512ul << 40), page_size, PROT_READ | PROT_WRITE, in test()
/tools/perf/arch/x86/util/
Dintel-bts.c154 opts->auxtrace_mmap_pages = MiB(4) / page_size; in intel_bts_recording_options()
156 opts->auxtrace_mmap_pages = KiB(128) / page_size; in intel_bts_recording_options()
158 opts->mmap_pages = KiB(256) / page_size; in intel_bts_recording_options()
162 opts->mmap_pages = KiB(256) / page_size; in intel_bts_recording_options()
166 opts->auxtrace_mmap_pages * (size_t)page_size; in intel_bts_recording_options()
170 sz = round_up(sz, page_size) / page_size; in intel_bts_recording_options()
174 opts->auxtrace_mmap_pages * (size_t)page_size) { in intel_bts_recording_options()
177 opts->auxtrace_mmap_pages * (size_t)page_size); in intel_bts_recording_options()
191 opts->auxtrace_mmap_pages = MiB(4) / page_size; in intel_bts_recording_options()
193 opts->auxtrace_mmap_pages = KiB(128) / page_size; in intel_bts_recording_options()
[all …]
/tools/testing/selftests/kvm/lib/
Dkvm_util.c148 unsigned int page_size; member
201 vm->page_size = vm_guest_mode_params[mode].page_size; in vm_create()
556 amt = vm->page_size - (ptr1 % vm->page_size); in kvm_memcmp_hva_gva()
558 amt = vm->page_size - (ptr2 % vm->page_size); in kvm_memcmp_hva_gva()
609 size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size; in vm_userspace_mem_region_add()
616 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical " in vm_userspace_mem_region_add()
619 guest_paddr, vm->page_size); in vm_userspace_mem_region_add()
625 guest_paddr, npages, vm->max_gfn, vm->page_size); in vm_userspace_mem_region_add()
632 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1); in vm_userspace_mem_region_add()
639 guest_paddr, npages, vm->page_size, in vm_userspace_mem_region_add()
[all …]
/tools/testing/selftests/kvm/lib/aarch64/
Dprocessor.c21 return (v + vm->page_size) & ~(vm->page_size - 1); in page_align()
81 page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size, in virt_pgd_alloc()
94 TEST_ASSERT((vaddr % vm->page_size) == 0, in _virt_pg_map()
96 " vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size); in _virt_pg_map()
100 TEST_ASSERT((paddr % vm->page_size) == 0, in _virt_pg_map()
102 " paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size); in _virt_pg_map()
106 paddr, vm->max_gfn, vm->page_size); in _virt_pg_map()
179 return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1)); in addr_gva2gpa()
318 size_t stack_size = vm->page_size == 4096 ? in aarch64_vcpu_add_default()
319 DEFAULT_STACK_PGS * vm->page_size : in aarch64_vcpu_add_default()
[all …]
/tools/lib/bpf/
Dringbuf.c37 size_t page_size; member
45 munmap(r->consumer_pos, rb->page_size); in ringbuf_unmap_ring()
49 munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1)); in ringbuf_unmap_ring()
101 tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0); in ring_buffer__add()
114 mmap_sz = rb->page_size + 2 * (__u64)info.max_entries; in ring_buffer__add()
119 tmp = mmap(NULL, (size_t)mmap_sz, PROT_READ, MAP_SHARED, map_fd, rb->page_size); in ring_buffer__add()
128 r->data = tmp + rb->page_size; in ring_buffer__add()
178 rb->page_size = getpagesize(); in ring_buffer__new()
/tools/testing/selftests/futex/functional/
Dfutex_wait_uninitialized_heap.c69 long page_size; in main() local
89 page_size = sysconf(_SC_PAGESIZE); in main()
91 buf = mmap(NULL, page_size, PROT_READ|PROT_WRITE, in main()
/tools/testing/selftests/arm64/mte/
Dcheck_child_memory.c23 static size_t page_size; variable
149 page_size = getpagesize(); in main()
150 if (!page_size) { in main()
154 sizes[item - 3] = page_size - 1; in main()
155 sizes[item - 2] = page_size; in main()
156 sizes[item - 1] = page_size + 1; in main()
/tools/testing/selftests/kvm/lib/x86_64/
Dvmx.c29 uint64_t page_size:1; member
406 TEST_ASSERT((nested_paddr % vm->page_size) == 0, in nested_pg_map()
409 nested_paddr, vm->page_size); in nested_pg_map()
413 paddr, vm->max_gfn, vm->page_size); in nested_pg_map()
414 TEST_ASSERT((paddr % vm->page_size) == 0, in nested_pg_map()
417 paddr, vm->page_size); in nested_pg_map()
421 paddr, vm->max_gfn, vm->page_size); in nested_pg_map()
441 pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size); in nested_pg_map()
453 pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size); in nested_pg_map()
465 pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size); in nested_pg_map()
[all …]
/tools/perf/arch/arm/util/
Dcs-etm.c309 opts->auxtrace_mmap_pages = MiB(4) / page_size; in cs_etm_recording_options()
312 KiB(128) / page_size; in cs_etm_recording_options()
314 opts->mmap_pages = KiB(256) / page_size; in cs_etm_recording_options()
318 opts->mmap_pages = KiB(256) / page_size; in cs_etm_recording_options()
327 opts->auxtrace_mmap_pages * (size_t)page_size; in cs_etm_recording_options()
338 sz = round_up(sz, page_size) / page_size; in cs_etm_recording_options()
344 opts->auxtrace_mmap_pages * (size_t)page_size) { in cs_etm_recording_options()
347 opts->auxtrace_mmap_pages * (size_t)page_size); in cs_etm_recording_options()
362 opts->auxtrace_mmap_pages = MiB(4) / page_size; in cs_etm_recording_options()
364 opts->auxtrace_mmap_pages = KiB(128) / page_size; in cs_etm_recording_options()
[all …]
/tools/perf/arch/arm64/util/
Darm-spe.c96 opts->auxtrace_mmap_pages = MiB(4) / page_size; in arm_spe_recording_options()
98 opts->auxtrace_mmap_pages = KiB(128) / page_size; in arm_spe_recording_options()
100 opts->mmap_pages = KiB(256) / page_size; in arm_spe_recording_options()
106 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size; in arm_spe_recording_options()

123