Home
last modified time | relevance | path

Searched refs:pages (Results 1 – 25 of 27) sorted by relevance

12

/kernel/dma/
Dremap.c17 return area->pages; in dma_common_find_pages()
24 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() argument
29 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap()
32 find_vm_area(vaddr)->pages = pages; in dma_common_pages_remap()
44 struct page **pages; in dma_common_contiguous_remap() local
48 pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap()
49 if (!pages) in dma_common_contiguous_remap()
52 pages[i] = nth_page(page, i); in dma_common_contiguous_remap()
53 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap()
54 kvfree(pages); in dma_common_contiguous_remap()
Dpool.c196 unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K); in dma_atomic_pool_init() local
197 pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES); in dma_atomic_pool_init()
198 atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K); in dma_atomic_pool_init()
Dcoherent.c42 int pages = size >> PAGE_SHIFT; in dma_init_coherent_memory() local
55 dma_mem->bitmap = bitmap_zalloc(pages, GFP_KERNEL); in dma_init_coherent_memory()
62 dma_mem->size = pages; in dma_init_coherent_memory()
Dcontiguous.c328 bool dma_release_from_contiguous(struct device *dev, struct page *pages, in dma_release_from_contiguous() argument
331 return cma_release(dev_get_cma_area(dev), pages, count); in dma_release_from_contiguous()
DKconfig242 For example, if your system defaults to 4KiB pages, the order value
273 dma_map_sg() API is used for general bulk mapping of pages rather than
/kernel/
Dkexec_core.c269 struct page *pages; in kimage_alloc_pages() local
273 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order); in kimage_alloc_pages()
274 if (pages) { in kimage_alloc_pages()
277 pages->mapping = NULL; in kimage_alloc_pages()
278 set_page_private(pages, order); in kimage_alloc_pages()
281 SetPageReserved(pages + i); in kimage_alloc_pages()
283 arch_kexec_post_alloc_pages(page_address(pages), count, in kimage_alloc_pages()
288 clear_highpage(pages + i); in kimage_alloc_pages()
291 return pages; in kimage_alloc_pages()
335 struct page *pages; in kimage_alloc_normal_control_pages() local
[all …]
Dwatch_queue.c241 struct page **pages; in watch_queue_set_size() local
282 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); in watch_queue_set_size()
283 if (!pages) in watch_queue_set_size()
287 pages[i] = alloc_page(GFP_KERNEL); in watch_queue_set_size()
288 if (!pages[i]) in watch_queue_set_size()
290 pages[i]->index = i * WATCH_QUEUE_NOTES_PER_PAGE; in watch_queue_set_size()
298 wqueue->notes = pages; in watch_queue_set_size()
306 __free_page(pages[i]); in watch_queue_set_size()
307 kfree(pages); in watch_queue_set_size()
Dkprobes.c134 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
153 list_for_each_entry_rcu(kip, &c->pages, list) { in __get_insn_slot()
193 list_add_rcu(&kip->list, &c->pages); in __get_insn_slot()
241 list_for_each_entry_safe(kip, next, &c->pages, list) { in collect_garbage_slots()
264 list_for_each_entry_rcu(kip, &c->pages, list) { in __free_insn_slot()
302 list_for_each_entry_rcu(kip, &c->pages, list) { in __is_insn_slot_addr()
321 list_for_each_entry_rcu(kip, &c->pages, list) { in kprobe_cache_get_kallsym()
352 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
Dfork.c273 ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, 0); in memcg_charge_kernel_stack()
281 memcg_kmem_uncharge_page(vm->pages[i], 0); in memcg_charge_kernel_stack()
530 mod_lruvec_page_state(vm->pages[i], NR_KERNEL_STACK_KB, in account_kernel_stack()
551 memcg_kmem_uncharge_page(vm->pages[i], 0); in exit_task_stack_account()
/kernel/module/
Ddecompress.c22 sizeof(info->pages), GFP_KERNEL); in module_extend_max_pages()
26 memcpy(new_pages, info->pages, info->max_pages * sizeof(info->pages)); in module_extend_max_pages()
27 kvfree(info->pages); in module_extend_max_pages()
28 info->pages = new_pages; in module_extend_max_pages()
49 info->pages[info->used_pages++] = page; in module_get_next_page()
317 info->hdr = vmap(info->pages, info->used_pages, VM_MAP, PAGE_KERNEL); in module_decompress()
339 __free_page(info->pages[i]); in module_decompress_cleanup()
341 kvfree(info->pages); in module_decompress_cleanup()
343 info->pages = NULL; in module_decompress_cleanup()
Dinternal.h78 struct page **pages; member
/kernel/bpf/
Dringbuf.c30 struct page **pages; member
96 struct page **pages, *page; in bpf_ringbuf_area_alloc() local
118 array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages); in bpf_ringbuf_area_alloc()
119 pages = bpf_map_area_alloc(array_size, numa_node); in bpf_ringbuf_area_alloc()
120 if (!pages) in bpf_ringbuf_area_alloc()
129 pages[i] = page; in bpf_ringbuf_area_alloc()
131 pages[nr_data_pages + i] = page; in bpf_ringbuf_area_alloc()
134 rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages, in bpf_ringbuf_area_alloc()
137 kmemleak_not_leak(pages); in bpf_ringbuf_area_alloc()
138 rb->pages = pages; in bpf_ringbuf_area_alloc()
[all …]
Darena.c423 struct page **pages; in arena_alloc_pages() local
441 pages = kvcalloc(page_cnt, sizeof(struct page *), GFP_KERNEL); in arena_alloc_pages()
442 if (!pages) in arena_alloc_pages()
457 node_id, page_cnt, pages); in arena_alloc_pages()
470 kern_vm_start + uaddr32 + page_cnt * PAGE_SIZE, pages); in arena_alloc_pages()
473 __free_page(pages[i]); in arena_alloc_pages()
476 kvfree(pages); in arena_alloc_pages()
481 kvfree(pages); in arena_alloc_pages()
Dlocal_storage.c482 static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages) in bpf_cgroup_storage_calculate_size() argument
488 *pages = round_up(sizeof(struct bpf_cgroup_storage) + size, in bpf_cgroup_storage_calculate_size()
492 *pages = round_up(round_up(size, 8) * num_possible_cpus(), in bpf_cgroup_storage_calculate_size()
506 u32 pages; in bpf_cgroup_storage_alloc() local
512 size = bpf_cgroup_storage_calculate_size(map, &pages); in bpf_cgroup_storage_alloc()
Dcore.c129 fp->pages = size / PAGE_SIZE; in bpf_prog_alloc_no_stats()
261 u32 pages; in bpf_prog_realloc() local
264 pages = size / PAGE_SIZE; in bpf_prog_realloc()
265 if (pages <= fp_old->pages) in bpf_prog_realloc()
270 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); in bpf_prog_realloc()
271 fp->pages = pages; in bpf_prog_realloc()
1447 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags); in bpf_prog_clone_create()
1453 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); in bpf_prog_clone_create()
Dsyscall.c486 unsigned long nr_pages, struct page **pages) in bpf_map_alloc_pages() argument
501 pages[i] = pg; in bpf_map_alloc_pages()
505 __free_page(pages[j]); in bpf_map_alloc_pages()
2334 prog->pages * 1ULL << PAGE_SHIFT, in bpf_prog_show_fdinfo()
/kernel/power/
Dsnapshot.c546 unsigned long pages; in create_zone_bm_rtree() local
548 pages = end - start; in create_zone_bm_rtree()
557 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); in create_zone_bm_rtree()
931 unsigned long bits, pfn, pages; in memory_bm_next_pfn() local
935 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; in memory_bm_next_pfn()
936 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK); in memory_bm_next_pfn()
1832 unsigned long saveable, size, max_size, count, highmem, pages = 0; in hibernate_preallocate_memory() local
1898 pages = preallocate_image_highmem(save_highmem); in hibernate_preallocate_memory()
1899 pages += preallocate_image_memory(saveable - pages, avail_normal); in hibernate_preallocate_memory()
1904 pages = minimum_image_size(saveable); in hibernate_preallocate_memory()
[all …]
Dswap.c954 unsigned long pages; in swsusp_write() local
957 pages = snapshot_get_image_size(); in swsusp_write()
963 trace_android_vh_hibernated_do_mem_alloc(pages, flags, &error); in swsusp_write()
976 if (!enough_swap(pages)) { in swsusp_write()
994 save_image(&handle, &snapshot, pages - 1) : in swsusp_write()
995 save_compressed_image(&handle, &snapshot, pages - 1); in swsusp_write()
1568 load_image(&handle, &snapshot, header->pages - 1) : in swsusp_read()
1569 load_compressed_image(&handle, &snapshot, header->pages - 1); in swsusp_read()
Dpower.h17 unsigned long pages; member
/kernel/trace/
Dtracing_map.c291 if (!a->pages) in tracing_map_array_clear()
295 memset(a->pages[i], 0, PAGE_SIZE); in tracing_map_array_clear()
305 if (!a->pages) in tracing_map_array_free()
309 if (!a->pages[i]) in tracing_map_array_free()
311 kmemleak_free(a->pages[i]); in tracing_map_array_free()
312 free_page((unsigned long)a->pages[i]); in tracing_map_array_free()
315 kfree(a->pages); in tracing_map_array_free()
339 a->pages = kcalloc(a->n_pages, sizeof(void *), GFP_KERNEL); in tracing_map_array_alloc()
340 if (!a->pages) in tracing_map_array_alloc()
344 a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL); in tracing_map_array_alloc()
[all …]
Dring_buffer.c449 struct list_head *pages; member
1264 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
1266 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
1339 list = cpu_buffer->pages; in rb_set_head_page()
1501 head = rb_list_head(cpu_buffer->pages); in rb_check_pages()
2031 long nr_pages, struct list_head *pages) in __rb_allocate_pages() argument
2087 list_add_tail(&bpage->list, pages); in __rb_allocate_pages()
2119 list_for_each_entry_safe(bpage, tmp, pages, list) { in __rb_allocate_pages()
2158 LIST_HEAD(pages); in rb_allocate_pages()
2162 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) in rb_allocate_pages()
[all …]
Dtracing_map.h170 void **pages; member
174 (array->pages[idx >> array->entry_shift] + \
Dftrace.c435 struct ftrace_profile_page *pages; member
600 pg = stat->pages = stat->start; in ftrace_profile_reset()
616 int pages; in ftrace_profile_pages_init() local
620 if (stat->pages) in ftrace_profile_pages_init()
623 stat->pages = (void *)get_zeroed_page(GFP_KERNEL); in ftrace_profile_pages_init()
624 if (!stat->pages) in ftrace_profile_pages_init()
640 pg = stat->start = stat->pages; in ftrace_profile_pages_init()
642 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); in ftrace_profile_pages_init()
644 for (i = 1; i < pages; i++) { in ftrace_profile_pages_init()
662 stat->pages = NULL; in ftrace_profile_pages_init()
[all …]
Dtrace.c6591 __free_page(spd->pages[idx]); in tracing_spd_release_pipe()
6651 .pages = pages_def, in tracing_splice_read_pipe()
6688 spd.pages[i] = alloc_page(GFP_KERNEL); in tracing_splice_read_pipe()
6689 if (!spd.pages[i]) in tracing_splice_read_pipe()
6696 page_address(spd.pages[i]), in tracing_splice_read_pipe()
6700 __free_page(spd.pages[i]); in tracing_splice_read_pipe()
8088 .pages = pages_def, in tracing_buffers_splice_read()
8154 spd.pages[i] = page; in tracing_buffers_splice_read()
9128 int pages; in buffer_subbuf_size_write() local
9137 pages = DIV_ROUND_UP(val, PAGE_SIZE); in buffer_subbuf_size_write()
[all …]
/kernel/sched/
Dfair.c3221 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) in task_numa_fault() argument
3293 p->numa_pages_migrated += pages; in task_numa_fault()
3295 p->numa_faults_locality[2] += pages; in task_numa_fault()
3297 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; in task_numa_fault()
3298 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; in task_numa_fault()
3299 p->numa_faults_locality[local] += pages; in task_numa_fault()
3368 long pages, virtpages; in task_numa_work() local
3414 pages = sysctl_numa_balancing_scan_size; in task_numa_work()
3415 pages <<= 20 - PAGE_SHIFT; /* MB in pages */ in task_numa_work()
3416 virtpages = pages * 8; /* Scan up to this much virtual space */ in task_numa_work()
[all …]

12