/kernel/dma/ |
D | remap.c | 14 struct page **dma_common_find_pages(void *cpu_addr) in dma_common_find_pages() 23 static struct vm_struct *__dma_common_pages_remap(struct page **pages, in __dma_common_pages_remap() 44 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() 62 void *dma_common_contiguous_remap(struct page *page, size_t size, in dma_common_contiguous_remap() argument 66 struct page **pages; in dma_common_contiguous_remap() 69 pages = kmalloc(sizeof(struct page *) << get_order(size), GFP_KERNEL); in dma_common_contiguous_remap() 74 pages[i] = nth_page(page, i); in dma_common_contiguous_remap() 127 struct page *page; in dma_atomic_pool_init() local 132 page = dma_alloc_from_contiguous(NULL, nr_pages, in dma_atomic_pool_init() 135 page = alloc_pages(dma_atomic_pool_gfp(), pool_size_order); in dma_atomic_pool_init() [all …]
|
D | direct.c | 85 struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, in __dma_direct_alloc_pages() 90 struct page *page = NULL; in __dma_direct_alloc_pages() local 100 page = dma_alloc_contiguous(dev, alloc_size, gfp); in __dma_direct_alloc_pages() 101 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages() 102 dma_free_contiguous(dev, page, alloc_size); in __dma_direct_alloc_pages() 103 page = NULL; in __dma_direct_alloc_pages() 106 if (!page) in __dma_direct_alloc_pages() 107 page = alloc_pages_node(node, gfp, get_order(alloc_size)); in __dma_direct_alloc_pages() 108 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages() 109 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages() [all …]
|
D | contiguous.c | 192 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, in dma_alloc_from_contiguous() 211 bool dma_release_from_contiguous(struct device *dev, struct page *pages, in dma_release_from_contiguous() 231 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) in dma_alloc_contiguous() 234 struct page *page = NULL; in dma_alloc_contiguous() local 247 page = cma_alloc(cma, count, cma_align, gfp & __GFP_NOWARN); in dma_alloc_contiguous() 250 return page; in dma_alloc_contiguous() 264 void dma_free_contiguous(struct device *dev, struct page *page, size_t size) in dma_free_contiguous() argument 266 if (!cma_release(dev_get_cma_area(dev), page, in dma_free_contiguous() 268 __free_pages(page, get_order(size)); in dma_free_contiguous()
|
D | virt.c | 29 static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page, in dma_virt_map_page() argument 34 return (uintptr_t)(page_address(page) + offset); in dma_virt_map_page()
|
D | debug.c | 561 void debug_dma_assert_idle(struct page *page) in debug_dma_assert_idle() argument 573 if (!page) in debug_dma_assert_idle() 576 cln = (phys_addr_t) page_to_pfn(page) << CACHELINE_PER_PAGE_SHIFT; in debug_dma_assert_idle() 1091 struct page *page, size_t offset) in check_for_stack() argument 1098 if (PageHighMem(page)) in check_for_stack() 1100 addr = page_address(page) + offset; in check_for_stack() 1108 if (page != stack_vm_area->pages[i]) in check_for_stack() 1252 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, in debug_dma_map_page() argument 1269 entry->pfn = page_to_pfn(page); in debug_dma_map_page() 1276 check_for_stack(dev, page, offset); in debug_dma_map_page() [all …]
|
D | mapping.c | 115 struct page *page; in dma_common_get_sgtable() local 128 page = pfn_to_page(pfn); in dma_common_get_sgtable() 130 page = virt_to_page(cpu_addr); in dma_common_get_sgtable() 135 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in dma_common_get_sgtable()
|
D | dummy.c | 14 static dma_addr_t dma_dummy_map_page(struct device *dev, struct page *page, in dma_dummy_map_page() argument
|
D | coherent.c | 198 int page = (vaddr - mem->virt_base) >> PAGE_SHIFT; in __dma_release_from_coherent() local 202 bitmap_release_region(mem->bitmap, page, order); in __dma_release_from_coherent()
|
/kernel/ |
D | kexec_core.c | 144 static struct page *kimage_alloc_page(struct kimage *image, 299 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) in kimage_alloc_pages() 301 struct page *pages; in kimage_alloc_pages() 326 static void kimage_free_pages(struct page *page) in kimage_free_pages() argument 330 order = page_private(page); in kimage_free_pages() 333 arch_kexec_pre_free_pages(page_address(page), count); in kimage_free_pages() 336 ClearPageReserved(page + i); in kimage_free_pages() 337 __free_pages(page, order); in kimage_free_pages() 342 struct page *page, *next; in kimage_free_page_list() local 344 list_for_each_entry_safe(page, next, list, lru) { in kimage_free_page_list() [all …]
|
D | crash_core.c | 417 VMCOREINFO_STRUCT_SIZE(page); in crash_save_vmcoreinfo_init() 423 VMCOREINFO_OFFSET(page, flags); in crash_save_vmcoreinfo_init() 424 VMCOREINFO_OFFSET(page, _refcount); in crash_save_vmcoreinfo_init() 425 VMCOREINFO_OFFSET(page, mapping); in crash_save_vmcoreinfo_init() 426 VMCOREINFO_OFFSET(page, lru); in crash_save_vmcoreinfo_init() 427 VMCOREINFO_OFFSET(page, _mapcount); in crash_save_vmcoreinfo_init() 428 VMCOREINFO_OFFSET(page, private); in crash_save_vmcoreinfo_init() 429 VMCOREINFO_OFFSET(page, compound_dtor); in crash_save_vmcoreinfo_init() 430 VMCOREINFO_OFFSET(page, compound_order); in crash_save_vmcoreinfo_init() 431 VMCOREINFO_OFFSET(page, compound_head); in crash_save_vmcoreinfo_init()
|
D | profile.c | 336 struct page *page; in profile_dead_cpu() local 344 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]); in profile_dead_cpu() 346 __free_page(page); in profile_dead_cpu() 355 struct page *page; in profile_prepare_cpu() local 363 page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); in profile_prepare_cpu() 364 if (!page) { in profile_prepare_cpu() 368 per_cpu(cpu_profile_hits, cpu)[i] = page_address(page); in profile_prepare_cpu()
|
D | relay.c | 44 struct page *page; in relay_buf_fault() local 51 page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT)); in relay_buf_fault() 52 if (!page) in relay_buf_fault() 54 get_page(page); in relay_buf_fault() 55 vmf->page = page; in relay_buf_fault() 71 static struct page **relay_alloc_page_array(unsigned int n_pages) in relay_alloc_page_array() 73 const size_t pa_size = n_pages * sizeof(struct page *); in relay_alloc_page_array() 82 static void relay_free_page_array(struct page **array) in relay_free_page_array() 1175 rbuf = (struct rchan_buf *)page_private(buf->page); in relay_pipe_buf_release() 1209 struct page *pages[PIPE_DEF_BUFFERS]; in subbuf_splice_actor()
|
D | futex.c | 531 struct page *page, *tail; in get_futex_key() local 568 err = get_user_pages_fast(address, 1, FOLL_WRITE, &page); in get_futex_key() 574 err = get_user_pages_fast(address, 1, 0, &page); in get_futex_key() 600 tail = page; in get_futex_key() 601 page = compound_head(page); in get_futex_key() 602 mapping = READ_ONCE(page->mapping); in get_futex_key() 627 lock_page(page); in get_futex_key() 628 shmem_swizzled = PageSwapCache(page) || page->mapping; in get_futex_key() 629 unlock_page(page); in get_futex_key() 630 put_page(page); in get_futex_key() [all …]
|
D | cfi.c | 64 unsigned long page = ptr >> PAGE_SHIFT; in ptr_to_shadow() local 66 if (unlikely(page < s->r.min_page)) in ptr_to_shadow() 69 index = page - s->r.min_page; in ptr_to_shadow()
|
D | scs.c | 93 static struct page *__scs_page(struct task_struct *tsk) in __scs_page() 144 static struct page *__scs_page(struct task_struct *tsk) in __scs_page()
|
D | kcov.c | 448 struct page *page; in kcov_mmap() local 466 page = vmalloc_to_page(kcov->area + off); in kcov_mmap() 467 if (vm_insert_page(vma, vma->vm_start + off, page)) in kcov_mmap()
|
/kernel/power/ |
D | snapshot.c | 80 static int swsusp_page_is_free(struct page *); 81 static void swsusp_set_page_forbidden(struct page *); 82 static void swsusp_unset_page_forbidden(struct page *); 192 static struct page *alloc_image_page(gfp_t gfp_mask) in alloc_image_page() 194 struct page *page; in alloc_image_page() local 196 page = alloc_page(gfp_mask); in alloc_image_page() 197 if (page) { in alloc_image_page() 198 swsusp_set_page_forbidden(page); in alloc_image_page() 199 swsusp_set_page_free(page); in alloc_image_page() 201 return page; in alloc_image_page() [all …]
|
D | swap.c | 241 struct page *page = bio_first_page_all(bio); in hib_end_io() local 250 put_page(page); in hib_end_io() 252 flush_icache_range((unsigned long)page_address(page), in hib_end_io() 253 (unsigned long)page_address(page) + PAGE_SIZE); in hib_end_io() 266 struct page *page = virt_to_page(addr); in hib_submit_io() local 275 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { in hib_submit_io() 679 unsigned char *page = NULL; in save_image_lzo() local 692 page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH); in save_image_lzo() 693 if (!page) { in save_image_lzo() 836 memcpy(page, data[thr].cmp + off, PAGE_SIZE); in save_image_lzo() [all …]
|
/kernel/events/ |
D | ring_buffer.c | 234 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); in __perf_output_begin() 236 handle->addr = rb->data_pages[handle->page] + offset; in __perf_output_begin() 567 static struct page *rb_alloc_aux_page(int node, int order) in rb_alloc_aux_page() 569 struct page *page; in rb_alloc_aux_page() local 575 page = alloc_pages_node(node, PERF_AUX_GFP, order); in rb_alloc_aux_page() 576 } while (!page && order--); in rb_alloc_aux_page() 578 if (page && order) { in rb_alloc_aux_page() 585 split_page(page, order); in rb_alloc_aux_page() 586 SetPagePrivate(page); in rb_alloc_aux_page() 587 set_page_private(page, order); in rb_alloc_aux_page() [all …]
|
D | uprobes.c | 104 struct page *pages[2]; 155 struct page *old_page, struct page *new_page) in __replace_page() 159 .page = compound_head(old_page), in __replace_page() 250 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) in copy_from_page() argument 252 void *kaddr = kmap_atomic(page); in copy_from_page() 257 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) in copy_to_page() argument 259 void *kaddr = kmap_atomic(page); in copy_to_page() 264 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) in verify_opcode() argument 278 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); in verify_opcode() 377 struct page *page; in __update_ref_ctr() local [all …]
|
D | internal.h | 95 extern struct page * 145 handle->page++; \ 146 handle->page &= rb->nr_pages - 1; \ 147 handle->addr = rb->data_pages[handle->page]; \
|
/kernel/trace/ |
D | ring_buffer.c | 325 struct buffer_data_page *page; /* Actual data page */ member 354 free_page((unsigned long)bpage->page); in free_buffer_page() 870 struct buffer_page *page, struct list_head *list) in rb_is_head_page() argument 876 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) in rb_is_head_page() 889 static bool rb_is_reader_page(struct buffer_page *page) in rb_is_reader_page() argument 891 struct list_head *list = page->list.prev; in rb_is_reader_page() 893 return rb_list_head(list->next) != &page->list; in rb_is_reader_page() 1010 struct buffer_page *page; in rb_set_head_page() local 1022 page = head = cpu_buffer->head_page; in rb_set_head_page() 1031 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) { in rb_set_head_page() [all …]
|
/kernel/bpf/ |
D | stackmap.c | 248 struct page *page; in stack_map_get_build_id() local 256 page = find_get_page(vma->vm_file->f_mapping, 0); in stack_map_get_build_id() 257 if (!page) in stack_map_get_build_id() 261 page_addr = kmap_atomic(page); in stack_map_get_build_id() 278 put_page(page); in stack_map_get_build_id()
|
D | cpumap.c | 290 struct page *page = virt_to_page(f); in cpu_map_kthread_run() local 296 prefetchw(page); in cpu_map_kthread_run()
|
/kernel/debug/kdb/ |
D | kdb_support.c | 381 struct page *page; in kdb_getphys() local 386 page = pfn_to_page(pfn); in kdb_getphys() 387 vaddr = kmap_atomic(page); in kdb_getphys()
|