Home
last modified time | relevance | path

Searched refs:pages (Results 1 – 25 of 25) sorted by relevance

/kernel/dma/
Dremap.c15 return area->pages; in dma_common_find_pages()
22 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() argument
27 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap()
30 find_vm_area(vaddr)->pages = pages; in dma_common_pages_remap()
42 struct page **pages; in dma_common_contiguous_remap() local
46 pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap()
47 if (!pages) in dma_common_contiguous_remap()
50 pages[i] = nth_page(page, i); in dma_common_contiguous_remap()
51 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap()
52 kvfree(pages); in dma_common_contiguous_remap()
Dpool.c199 unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K); in dma_atomic_pool_init() local
200 pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES); in dma_atomic_pool_init()
201 atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K); in dma_atomic_pool_init()
Dcoherent.c46 int pages = size >> PAGE_SHIFT; in dma_init_coherent_memory() local
47 int bitmap_size = BITS_TO_LONGS(pages) * sizeof(long); in dma_init_coherent_memory()
74 dma_mem->size = pages; in dma_init_coherent_memory()
Dcontiguous.c279 bool dma_release_from_contiguous(struct device *dev, struct page *pages, in dma_release_from_contiguous() argument
282 return cma_release(dev_get_cma_area(dev), pages, count); in dma_release_from_contiguous()
DKconfig191 For example, if your system defaults to 4KiB pages, the order value
222 dma_map_sg() API is used for general bulk mapping of pages rather than
Ddebug.c1070 if (page != stack_vm_area->pages[i]) in check_for_stack()
/kernel/
Dkexec_core.c301 struct page *pages; in kimage_alloc_pages() local
305 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order); in kimage_alloc_pages()
306 if (pages) { in kimage_alloc_pages()
309 pages->mapping = NULL; in kimage_alloc_pages()
310 set_page_private(pages, order); in kimage_alloc_pages()
313 SetPageReserved(pages + i); in kimage_alloc_pages()
315 arch_kexec_post_alloc_pages(page_address(pages), count, in kimage_alloc_pages()
320 clear_highpage(pages + i); in kimage_alloc_pages()
323 return pages; in kimage_alloc_pages()
367 struct page *pages; in kimage_alloc_normal_control_pages() local
[all …]
Dwatch_queue.c245 struct page **pages; in watch_queue_set_size() local
278 pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL); in watch_queue_set_size()
279 if (!pages) in watch_queue_set_size()
283 pages[i] = alloc_page(GFP_KERNEL); in watch_queue_set_size()
284 if (!pages[i]) in watch_queue_set_size()
286 pages[i]->index = i * WATCH_QUEUE_NOTES_PER_PAGE; in watch_queue_set_size()
296 wqueue->notes = pages; in watch_queue_set_size()
304 __free_page(pages[i]); in watch_queue_set_size()
305 kfree(pages); in watch_queue_set_size()
Dkprobes.c129 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages),
148 list_for_each_entry_rcu(kip, &c->pages, list) { in __get_insn_slot()
192 list_add_rcu(&kip->list, &c->pages); in __get_insn_slot()
240 list_for_each_entry_safe(kip, next, &c->pages, list) { in collect_garbage_slots()
262 list_for_each_entry_rcu(kip, &c->pages, list) { in __free_insn_slot()
300 list_for_each_entry_rcu(kip, &c->pages, list) { in __is_insn_slot_addr()
319 list_for_each_entry_rcu(kip, &c->pages, list) { in kprobe_cache_get_kallsym()
340 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
Drelay.c1212 struct page *pages[PIPE_DEF_BUFFERS]; in subbuf_splice_actor() local
1215 .pages = pages, in subbuf_splice_actor()
1250 spd.pages[spd.nr_pages] = rbuf->page_array[pidx]; in subbuf_splice_actor()
Dfork.c287 memcg_kmem_uncharge_page(vm->pages[i], 0); in free_thread_stack()
391 mod_lruvec_page_state(vm->pages[0], NR_KERNEL_STACK_KB, in account_kernel_stack()
417 ret = memcg_kmem_charge_page(vm->pages[i], GFP_KERNEL, in memcg_charge_kernel_stack()
/kernel/bpf/
Dringbuf.c37 struct page **pages; member
69 struct page **pages, *page; in bpf_ringbuf_area_alloc() local
91 array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages); in bpf_ringbuf_area_alloc()
93 pages = vmalloc_node(array_size, numa_node); in bpf_ringbuf_area_alloc()
95 pages = kmalloc_node(array_size, flags, numa_node); in bpf_ringbuf_area_alloc()
96 if (!pages) in bpf_ringbuf_area_alloc()
105 pages[i] = page; in bpf_ringbuf_area_alloc()
107 pages[nr_data_pages + i] = page; in bpf_ringbuf_area_alloc()
110 rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages, in bpf_ringbuf_area_alloc()
113 kmemleak_not_leak(pages); in bpf_ringbuf_area_alloc()
[all …]
Dlocal_storage.c480 static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages) in bpf_cgroup_storage_calculate_size() argument
486 *pages = round_up(sizeof(struct bpf_cgroup_storage) + size, in bpf_cgroup_storage_calculate_size()
490 *pages = round_up(round_up(size, 8) * num_possible_cpus(), in bpf_cgroup_storage_calculate_size()
504 u32 pages; in bpf_cgroup_storage_alloc() local
510 size = bpf_cgroup_storage_calculate_size(map, &pages); in bpf_cgroup_storage_alloc()
512 if (bpf_map_charge_memlock(map, pages)) in bpf_cgroup_storage_alloc()
538 bpf_map_uncharge_memlock(map, pages); in bpf_cgroup_storage_alloc()
565 u32 pages; in bpf_cgroup_storage_free() local
572 bpf_cgroup_storage_calculate_size(map, &pages); in bpf_cgroup_storage_free()
573 bpf_map_uncharge_memlock(map, pages); in bpf_cgroup_storage_free()
Dcore.c102 fp->pages = size / PAGE_SIZE; in bpf_prog_alloc_no_stats()
231 u32 pages, delta; in bpf_prog_realloc() local
235 pages = size / PAGE_SIZE; in bpf_prog_realloc()
236 if (pages <= fp_old->pages) in bpf_prog_realloc()
239 delta = pages - fp_old->pages; in bpf_prog_realloc()
248 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); in bpf_prog_realloc()
249 fp->pages = pages; in bpf_prog_realloc()
547 prog->aux->ksym.end = addr + hdr->pages * PAGE_SIZE; in bpf_prog_ksym_set_addr()
841 int bpf_jit_charge_modmem(u32 pages) in bpf_jit_charge_modmem() argument
843 if (atomic_long_add_return(pages, &bpf_jit_current) > in bpf_jit_charge_modmem()
[all …]
Dsyscall.c362 static int bpf_charge_memlock(struct user_struct *user, u32 pages) in bpf_charge_memlock() argument
366 if (atomic_long_add_return(pages, &user->locked_vm) > memlock_limit) { in bpf_charge_memlock()
367 atomic_long_sub(pages, &user->locked_vm); in bpf_charge_memlock()
373 static void bpf_uncharge_memlock(struct user_struct *user, u32 pages) in bpf_uncharge_memlock() argument
376 atomic_long_sub(pages, &user->locked_vm); in bpf_uncharge_memlock()
381 u32 pages = round_up(size, PAGE_SIZE) >> PAGE_SHIFT; in bpf_map_charge_init() local
389 ret = bpf_charge_memlock(user, pages); in bpf_map_charge_init()
395 mem->pages = pages; in bpf_map_charge_init()
403 bpf_uncharge_memlock(mem->user, mem->pages); in bpf_map_charge_finish()
416 int bpf_map_charge_memlock(struct bpf_map *map, u32 pages) in bpf_map_charge_memlock() argument
[all …]
/kernel/power/
Dsnapshot.c505 unsigned long pages; in create_zone_bm_rtree() local
507 pages = end - start; in create_zone_bm_rtree()
516 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); in create_zone_bm_rtree()
883 unsigned long bits, pfn, pages; in memory_bm_next_pfn() local
887 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; in memory_bm_next_pfn()
888 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK); in memory_bm_next_pfn()
1701 unsigned long saveable, size, max_size, count, highmem, pages = 0; in hibernate_preallocate_memory() local
1760 pages = preallocate_image_highmem(save_highmem); in hibernate_preallocate_memory()
1761 pages += preallocate_image_memory(saveable - pages, avail_normal); in hibernate_preallocate_memory()
1766 pages = minimum_image_size(saveable); in hibernate_preallocate_memory()
[all …]
Dswap.c916 unsigned long pages; in swsusp_write() local
919 pages = snapshot_get_image_size(); in swsusp_write()
926 if (!enough_swap(pages)) { in swsusp_write()
944 save_image(&handle, &snapshot, pages - 1) : in swsusp_write()
945 save_image_lzo(&handle, &snapshot, pages - 1); in swsusp_write()
1505 load_image(&handle, &snapshot, header->pages - 1) : in swsusp_read()
1506 load_image_lzo(&handle, &snapshot, header->pages - 1); in swsusp_read()
Dpower.h14 unsigned long pages; member
/kernel/trace/
Dtracing_map.c291 if (!a->pages) in tracing_map_array_clear()
295 memset(a->pages[i], 0, PAGE_SIZE); in tracing_map_array_clear()
305 if (!a->pages) in tracing_map_array_free()
309 if (!a->pages[i]) in tracing_map_array_free()
311 kmemleak_free(a->pages[i]); in tracing_map_array_free()
312 free_page((unsigned long)a->pages[i]); in tracing_map_array_free()
315 kfree(a->pages); in tracing_map_array_free()
339 a->pages = kcalloc(a->n_pages, sizeof(void *), GFP_KERNEL); in tracing_map_array_alloc()
340 if (!a->pages) in tracing_map_array_alloc()
344 a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL); in tracing_map_array_alloc()
[all …]
Dring_buffer.c505 struct list_head *pages; member
1276 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
1278 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
1352 list = cpu_buffer->pages; in rb_set_head_page()
1477 struct list_head *head = rb_list_head(cpu_buffer->pages); in rb_check_pages()
1501 static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu) in __rb_allocate_pages() argument
1545 list_add(&bpage->list, pages); in __rb_allocate_pages()
1562 list_for_each_entry_safe(bpage, tmp, pages, list) { in __rb_allocate_pages()
1575 LIST_HEAD(pages); in rb_allocate_pages()
1579 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu)) in rb_allocate_pages()
[all …]
Dtracing_map.h170 void **pages; member
174 (array->pages[idx >> array->entry_shift] + \
Dftrace.c404 struct ftrace_profile_page *pages; member
572 pg = stat->pages = stat->start; in ftrace_profile_reset()
588 int pages; in ftrace_profile_pages_init() local
592 if (stat->pages) in ftrace_profile_pages_init()
595 stat->pages = (void *)get_zeroed_page(GFP_KERNEL); in ftrace_profile_pages_init()
596 if (!stat->pages) in ftrace_profile_pages_init()
612 pg = stat->start = stat->pages; in ftrace_profile_pages_init()
614 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); in ftrace_profile_pages_init()
616 for (i = 1; i < pages; i++) { in ftrace_profile_pages_init()
634 stat->pages = NULL; in ftrace_profile_pages_init()
[all …]
Dtrace.c6466 __free_page(spd->pages[idx]); in tracing_spd_release_pipe()
6526 .pages = pages_def, in tracing_splice_read_pipe()
6563 spd.pages[i] = alloc_page(GFP_KERNEL); in tracing_splice_read_pipe()
6564 if (!spd.pages[i]) in tracing_splice_read_pipe()
6571 page_address(spd.pages[i]), in tracing_splice_read_pipe()
6574 __free_page(spd.pages[i]); in tracing_splice_read_pipe()
7778 .pages = pages_def, in tracing_buffers_splice_read()
7841 spd.pages[i] = page; in tracing_buffers_splice_read()
/kernel/events/
Duprobes.c104 struct page *pages[2]; member
1499 area->xol_mapping.pages = area->pages; in __create_xol_area()
1500 area->pages[0] = alloc_page(GFP_HIGHUSER); in __create_xol_area()
1501 if (!area->pages[0]) in __create_xol_area()
1503 area->pages[1] = NULL; in __create_xol_area()
1510 arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE); in __create_xol_area()
1515 __free_page(area->pages[0]); in __create_xol_area()
1557 put_page(area->pages[0]); in uprobe_clear_state()
1624 arch_uprobe_copy_ixol(area->pages[0], xol_vaddr, in xol_get_insn_slot()
/kernel/sched/
Dfair.c2646 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) in task_numa_fault() argument
2709 p->numa_pages_migrated += pages; in task_numa_fault()
2711 p->numa_faults_locality[2] += pages; in task_numa_fault()
2713 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; in task_numa_fault()
2714 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; in task_numa_fault()
2715 p->numa_faults_locality[local] += pages; in task_numa_fault()
2745 long pages, virtpages; in task_numa_work() local
2789 pages = sysctl_numa_balancing_scan_size; in task_numa_work()
2790 pages <<= 20 - PAGE_SHIFT; /* MB in pages */ in task_numa_work()
2791 virtpages = pages * 8; /* Scan up to this much virtual space */ in task_numa_work()
[all …]