/kernel/dma/ |
D | remap.c | 15 return area->pages; in dma_common_find_pages() 22 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() argument 27 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap() 30 find_vm_area(vaddr)->pages = pages; in dma_common_pages_remap() 42 struct page **pages; in dma_common_contiguous_remap() local 46 pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap() 47 if (!pages) in dma_common_contiguous_remap() 50 pages[i] = nth_page(page, i); in dma_common_contiguous_remap() 51 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap() 52 kvfree(pages); in dma_common_contiguous_remap()
|
D | pool.c | 196 unsigned long pages = totalram_pages() / (SZ_1G / SZ_128K); in dma_atomic_pool_init() local 197 pages = min_t(unsigned long, pages, MAX_ORDER_NR_PAGES); in dma_atomic_pool_init() 198 atomic_pool_size = max_t(size_t, pages << PAGE_SHIFT, SZ_128K); in dma_atomic_pool_init()
|
D | coherent.c | 42 int pages = size >> PAGE_SHIFT; in dma_init_coherent_memory() local 55 dma_mem->bitmap = bitmap_zalloc(pages, GFP_KERNEL); in dma_init_coherent_memory() 62 dma_mem->size = pages; in dma_init_coherent_memory()
|
D | contiguous.c | 277 bool dma_release_from_contiguous(struct device *dev, struct page *pages, in dma_release_from_contiguous() argument 280 return cma_release(dev_get_cma_area(dev), pages, count); in dma_release_from_contiguous()
|
D | Kconfig | 199 For example, if your system defaults to 4KiB pages, the order value 230 dma_map_sg() API is used for general bulk mapping of pages rather than
|
D | mapping.c | 686 return vmap(sgt_handle(sgt)->pages, count, VM_MAP, PAGE_KERNEL); in dma_vmap_noncontiguous() 711 return vm_map_pages(vma, sgt_handle(sgt)->pages, count); in dma_mmap_noncontiguous()
|
/kernel/ |
D | kexec_core.c | 302 struct page *pages; in kimage_alloc_pages() local 306 pages = alloc_pages(gfp_mask & ~__GFP_ZERO, order); in kimage_alloc_pages() 307 if (pages) { in kimage_alloc_pages() 310 pages->mapping = NULL; in kimage_alloc_pages() 311 set_page_private(pages, order); in kimage_alloc_pages() 314 SetPageReserved(pages + i); in kimage_alloc_pages() 316 arch_kexec_post_alloc_pages(page_address(pages), count, in kimage_alloc_pages() 321 clear_highpage(pages + i); in kimage_alloc_pages() 324 return pages; in kimage_alloc_pages() 368 struct page *pages; in kimage_alloc_normal_control_pages() local [all …]
|
D | watch_queue.c | 242 struct page **pages; in watch_queue_set_size() local 274 pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL); in watch_queue_set_size() 275 if (!pages) in watch_queue_set_size() 279 pages[i] = alloc_page(GFP_KERNEL); in watch_queue_set_size() 280 if (!pages[i]) in watch_queue_set_size() 282 pages[i]->index = i * WATCH_QUEUE_NOTES_PER_PAGE; in watch_queue_set_size() 290 wqueue->notes = pages; in watch_queue_set_size() 298 __free_page(pages[i]); in watch_queue_set_size() 299 kfree(pages); in watch_queue_set_size()
|
D | kprobes.c | 134 .pages = LIST_HEAD_INIT(kprobe_insn_slots.pages), 153 list_for_each_entry_rcu(kip, &c->pages, list) { in __get_insn_slot() 193 list_add_rcu(&kip->list, &c->pages); in __get_insn_slot() 241 list_for_each_entry_safe(kip, next, &c->pages, list) { in collect_garbage_slots() 264 list_for_each_entry_rcu(kip, &c->pages, list) { in __free_insn_slot() 302 list_for_each_entry_rcu(kip, &c->pages, list) { in __is_insn_slot_addr() 321 list_for_each_entry_rcu(kip, &c->pages, list) { in kprobe_cache_get_kallsym() 352 .pages = LIST_HEAD_INIT(kprobe_optinsn_slots.pages),
|
D | relay.c | 1124 struct page *pages[PIPE_DEF_BUFFERS]; in subbuf_splice_actor() local 1127 .pages = pages, in subbuf_splice_actor() 1162 spd.pages[spd.nr_pages] = rbuf->page_array[pidx]; in subbuf_splice_actor()
|
/kernel/module/ |
D | decompress.c | 22 sizeof(info->pages), GFP_KERNEL); in module_extend_max_pages() 26 memcpy(new_pages, info->pages, info->max_pages * sizeof(info->pages)); in module_extend_max_pages() 27 kvfree(info->pages); in module_extend_max_pages() 28 info->pages = new_pages; in module_extend_max_pages() 49 info->pages[info->used_pages++] = page; in module_get_next_page() 225 info->hdr = vmap(info->pages, info->used_pages, VM_MAP, PAGE_KERNEL); in module_decompress() 247 __free_page(info->pages[i]); in module_decompress_cleanup() 249 kvfree(info->pages); in module_decompress_cleanup() 251 info->pages = NULL; in module_decompress_cleanup()
|
D | internal.h | 72 struct page **pages; member
|
/kernel/bpf/ |
D | ringbuf.c | 38 struct page **pages; member 102 struct page **pages, *page; in bpf_ringbuf_area_alloc() local 124 array_size = (nr_meta_pages + 2 * nr_data_pages) * sizeof(*pages); in bpf_ringbuf_area_alloc() 125 pages = bpf_map_area_alloc(array_size, numa_node); in bpf_ringbuf_area_alloc() 126 if (!pages) in bpf_ringbuf_area_alloc() 135 pages[i] = page; in bpf_ringbuf_area_alloc() 137 pages[nr_data_pages + i] = page; in bpf_ringbuf_area_alloc() 140 rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages, in bpf_ringbuf_area_alloc() 143 kmemleak_not_leak(pages); in bpf_ringbuf_area_alloc() 144 rb->pages = pages; in bpf_ringbuf_area_alloc() [all …]
|
D | local_storage.c | 475 static size_t bpf_cgroup_storage_calculate_size(struct bpf_map *map, u32 *pages) in bpf_cgroup_storage_calculate_size() argument 481 *pages = round_up(sizeof(struct bpf_cgroup_storage) + size, in bpf_cgroup_storage_calculate_size() 485 *pages = round_up(round_up(size, 8) * num_possible_cpus(), in bpf_cgroup_storage_calculate_size() 499 u32 pages; in bpf_cgroup_storage_alloc() local 505 size = bpf_cgroup_storage_calculate_size(map, &pages); in bpf_cgroup_storage_alloc()
|
D | core.c | 108 fp->pages = size / PAGE_SIZE; in bpf_prog_alloc_no_stats() 236 u32 pages; in bpf_prog_realloc() local 239 pages = size / PAGE_SIZE; in bpf_prog_realloc() 240 if (pages <= fp_old->pages) in bpf_prog_realloc() 245 memcpy(fp, fp_old, fp_old->pages * PAGE_SIZE); in bpf_prog_realloc() 246 fp->pages = pages; in bpf_prog_realloc() 1360 fp = __vmalloc(fp_other->pages * PAGE_SIZE, gfp_flags); in bpf_prog_clone_create() 1366 memcpy(fp, fp_other, fp_other->pages * PAGE_SIZE); in bpf_prog_clone_create()
|
/kernel/power/ |
D | snapshot.c | 544 unsigned long pages; in create_zone_bm_rtree() local 546 pages = end - start; in create_zone_bm_rtree() 555 nr_blocks = DIV_ROUND_UP(pages, BM_BITS_PER_BLOCK); in create_zone_bm_rtree() 929 unsigned long bits, pfn, pages; in memory_bm_next_pfn() local 933 pages = bm->cur.zone->end_pfn - bm->cur.zone->start_pfn; in memory_bm_next_pfn() 934 bits = min(pages - bm->cur.node_pfn, BM_BITS_PER_BLOCK); in memory_bm_next_pfn() 1783 unsigned long saveable, size, max_size, count, highmem, pages = 0; in hibernate_preallocate_memory() local 1849 pages = preallocate_image_highmem(save_highmem); in hibernate_preallocate_memory() 1850 pages += preallocate_image_memory(saveable - pages, avail_normal); in hibernate_preallocate_memory() 1855 pages = minimum_image_size(saveable); in hibernate_preallocate_memory() [all …]
|
D | swap.c | 923 unsigned long pages; in swsusp_write() local 926 pages = snapshot_get_image_size(); in swsusp_write() 934 if (!enough_swap(pages)) { in swsusp_write() 952 save_image(&handle, &snapshot, pages - 1) : in swsusp_write() 953 save_image_lzo(&handle, &snapshot, pages - 1); in swsusp_write() 1510 load_image(&handle, &snapshot, header->pages - 1) : in swsusp_read() 1511 load_image_lzo(&handle, &snapshot, header->pages - 1); in swsusp_read()
|
D | power.h | 16 unsigned long pages; member
|
/kernel/trace/ |
D | tracing_map.c | 291 if (!a->pages) in tracing_map_array_clear() 295 memset(a->pages[i], 0, PAGE_SIZE); in tracing_map_array_clear() 305 if (!a->pages) in tracing_map_array_free() 309 if (!a->pages[i]) in tracing_map_array_free() 311 kmemleak_free(a->pages[i]); in tracing_map_array_free() 312 free_page((unsigned long)a->pages[i]); in tracing_map_array_free() 315 kfree(a->pages); in tracing_map_array_free() 339 a->pages = kcalloc(a->n_pages, sizeof(void *), GFP_KERNEL); in tracing_map_array_alloc() 340 if (!a->pages) in tracing_map_array_alloc() 344 a->pages[i] = (void *)get_zeroed_page(GFP_KERNEL); in tracing_map_array_alloc() [all …]
|
D | trace_events_user.c | 85 struct page *pages; member 158 static void set_page_reservations(char *pages, bool set) in set_page_reservations() argument 163 void *addr = pages + (PAGE_SIZE * page); in set_page_reservations() 177 if (group->pages) in user_event_group_destroy() 178 __free_pages(group->pages, MAX_PAGE_ORDER); in user_event_group_destroy() 250 group->pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, MAX_PAGE_ORDER); in user_event_group_create() 252 if (!group->pages) in user_event_group_create() 255 group->register_page_data = page_address(group->pages); in user_event_group_create() 1750 char *pages; in user_status_mmap() local 1760 pages = group->register_page_data; in user_status_mmap() [all …]
|
D | tracing_map.h | 170 void **pages; member 174 (array->pages[idx >> array->entry_shift] + \
|
D | ring_buffer.c | 469 struct list_head *pages; member 1332 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate() 1334 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate() 1407 list = cpu_buffer->pages; in rb_set_head_page() 1532 struct list_head *head = rb_list_head(cpu_buffer->pages); in rb_check_pages() 1557 long nr_pages, struct list_head *pages) in __rb_allocate_pages() argument 1603 list_add(&bpage->list, pages); in __rb_allocate_pages() 1620 list_for_each_entry_safe(bpage, tmp, pages, list) { in __rb_allocate_pages() 1633 LIST_HEAD(pages); in rb_allocate_pages() 1637 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) in rb_allocate_pages() [all …]
|
D | ftrace.c | 401 struct ftrace_profile_page *pages; member 569 pg = stat->pages = stat->start; in ftrace_profile_reset() 585 int pages; in ftrace_profile_pages_init() local 589 if (stat->pages) in ftrace_profile_pages_init() 592 stat->pages = (void *)get_zeroed_page(GFP_KERNEL); in ftrace_profile_pages_init() 593 if (!stat->pages) in ftrace_profile_pages_init() 609 pg = stat->start = stat->pages; in ftrace_profile_pages_init() 611 pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); in ftrace_profile_pages_init() 613 for (i = 1; i < pages; i++) { in ftrace_profile_pages_init() 631 stat->pages = NULL; in ftrace_profile_pages_init() [all …]
|
/kernel/events/ |
D | uprobes.c | 104 struct page *pages[2]; member 1497 area->xol_mapping.pages = area->pages; in __create_xol_area() 1498 area->pages[0] = alloc_page(GFP_HIGHUSER); in __create_xol_area() 1499 if (!area->pages[0]) in __create_xol_area() 1501 area->pages[1] = NULL; in __create_xol_area() 1508 arch_uprobe_copy_ixol(area->pages[0], 0, &insn, UPROBE_SWBP_INSN_SIZE); in __create_xol_area() 1513 __free_page(area->pages[0]); in __create_xol_area() 1555 put_page(area->pages[0]); in uprobe_clear_state() 1622 arch_uprobe_copy_ixol(area->pages[0], xol_vaddr, in xol_get_insn_slot()
|
/kernel/sched/ |
D | fair.c | 2836 void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags) in task_numa_fault() argument 2908 p->numa_pages_migrated += pages; in task_numa_fault() 2910 p->numa_faults_locality[2] += pages; in task_numa_fault() 2912 p->numa_faults[task_faults_idx(NUMA_MEMBUF, mem_node, priv)] += pages; in task_numa_fault() 2913 p->numa_faults[task_faults_idx(NUMA_CPUBUF, cpu_node, priv)] += pages; in task_numa_fault() 2914 p->numa_faults_locality[local] += pages; in task_numa_fault() 2945 long pages, virtpages; in task_numa_work() local 2989 pages = sysctl_numa_balancing_scan_size; in task_numa_work() 2990 pages <<= 20 - PAGE_SHIFT; /* MB in pages */ in task_numa_work() 2991 virtpages = pages * 8; /* Scan up to this much virtual space */ in task_numa_work() [all …]
|