Home
last modified time | relevance | path

Searched refs:page (Results 1 – 25 of 43) sorted by relevance

12

/kernel/dma/
Ddirect.c33 static inline struct page *dma_direct_to_page(struct device *dev, in dma_direct_to_page()
99 static void __dma_direct_free_pages(struct device *dev, struct page *page, in __dma_direct_free_pages() argument
102 if (swiotlb_free(dev, page, size)) in __dma_direct_free_pages()
104 dma_free_contiguous(dev, page, size); in __dma_direct_free_pages()
107 static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size) in dma_direct_alloc_swiotlb()
109 struct page *page = swiotlb_alloc(dev, size); in dma_direct_alloc_swiotlb() local
111 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in dma_direct_alloc_swiotlb()
112 swiotlb_free(dev, page, size); in dma_direct_alloc_swiotlb()
116 return page; in dma_direct_alloc_swiotlb()
119 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, in __dma_direct_alloc_pages()
[all …]
Dops_helpers.c8 static struct page *dma_common_vaddr_to_page(void *cpu_addr) in dma_common_vaddr_to_page()
22 struct page *page = dma_common_vaddr_to_page(cpu_addr); in dma_common_get_sgtable() local
27 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in dma_common_get_sgtable()
43 struct page *page = dma_common_vaddr_to_page(cpu_addr); in dma_common_mmap() local
55 page_to_pfn(page) + vma->vm_pgoff, in dma_common_mmap()
63 struct page *dma_common_alloc_pages(struct device *dev, size_t size, in dma_common_alloc_pages()
67 struct page *page; in dma_common_alloc_pages() local
69 page = dma_alloc_contiguous(dev, size, gfp); in dma_common_alloc_pages()
70 if (!page) in dma_common_alloc_pages()
71 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size)); in dma_common_alloc_pages()
[all …]
Dpool.c83 struct page *page = NULL; in atomic_pool_expand() local
93 page = dma_alloc_from_contiguous(NULL, 1 << order, in atomic_pool_expand()
95 if (!page) in atomic_pool_expand()
96 page = alloc_pages(gfp, order); in atomic_pool_expand()
97 } while (!page && order-- > 0); in atomic_pool_expand()
98 if (!page) in atomic_pool_expand()
101 arch_dma_prep_coherent(page, pool_size); in atomic_pool_expand()
104 addr = dma_common_contiguous_remap(page, pool_size, in atomic_pool_expand()
110 addr = page_to_virt(page); in atomic_pool_expand()
116 ret = set_memory_decrypted((unsigned long)page_to_virt(page), in atomic_pool_expand()
[all …]
Dcontiguous.c258 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, in dma_alloc_from_contiguous()
277 bool dma_release_from_contiguous(struct device *dev, struct page *pages, in dma_release_from_contiguous()
283 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp) in cma_alloc_aligned()
305 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) in dma_alloc_contiguous()
322 struct page *page; in dma_alloc_contiguous() local
325 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous()
326 if (page) in dma_alloc_contiguous()
327 return page; in dma_alloc_contiguous()
348 void dma_free_contiguous(struct device *dev, struct page *page, size_t size) in dma_free_contiguous() argument
354 if (cma_release(dev->cma_area, page, count)) in dma_free_contiguous()
[all …]
Dremap.c9 struct page **dma_common_find_pages(void *cpu_addr) in dma_common_find_pages()
22 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap()
38 void *dma_common_contiguous_remap(struct page *page, size_t size, in dma_common_contiguous_remap() argument
42 struct page **pages; in dma_common_contiguous_remap()
46 pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap()
50 pages[i] = nth_page(page, i); in dma_common_contiguous_remap()
Dmapping.c143 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, in dma_map_page_attrs() argument
156 arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) in dma_map_page_attrs()
157 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs()
159 addr = ops->map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs()
160 kmsan_handle_dma(page, offset, size, dir); in dma_map_page_attrs()
161 debug_dma_map_page(dev, page, offset, size, dir, addr, attrs); in dma_map_page_attrs()
549 static struct page *__dma_alloc_pages(struct device *dev, size_t size, in __dma_alloc_pages()
567 struct page *dma_alloc_pages(struct device *dev, size_t size, in dma_alloc_pages()
570 struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); in dma_alloc_pages() local
572 if (page) in dma_alloc_pages()
[all …]
Ddirect.h85 struct page *page, unsigned long offset, size_t size, in dma_direct_map_page() argument
88 phys_addr_t phys = page_to_phys(page) + offset; in dma_direct_map_page()
92 if (is_pci_p2pdma_page(page)) in dma_direct_map_page()
98 if (is_pci_p2pdma_page(page)) in dma_direct_map_page()
Ddebug.h12 extern void debug_dma_map_page(struct device *dev, struct page *page,
58 static inline void debug_dma_map_page(struct device *dev, struct page *page, in debug_dma_map_page() argument
Ddummy.c14 static dma_addr_t dma_dummy_map_page(struct device *dev, struct page *page, in dma_dummy_map_page() argument
Dswiotlb.c594 struct page *page; in swiotlb_bounce() local
602 page = pfn_to_page(pfn); in swiotlb_bounce()
604 memcpy_from_page(vaddr, page, offset, sz); in swiotlb_bounce()
606 memcpy_to_page(page, offset, vaddr, sz); in swiotlb_bounce()
993 struct page *swiotlb_alloc(struct device *dev, size_t size) in swiotlb_alloc()
1011 bool swiotlb_free(struct device *dev, struct page *page, size_t size) in swiotlb_free() argument
1013 phys_addr_t tlb_addr = page_to_phys(page); in swiotlb_free()
/kernel/
Dkexec_core.c145 static struct page *kimage_alloc_page(struct kimage *image,
300 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) in kimage_alloc_pages()
302 struct page *pages; in kimage_alloc_pages()
327 static void kimage_free_pages(struct page *page) in kimage_free_pages() argument
331 order = page_private(page); in kimage_free_pages()
334 arch_kexec_pre_free_pages(page_address(page), count); in kimage_free_pages()
337 ClearPageReserved(page + i); in kimage_free_pages()
338 __free_pages(page, order); in kimage_free_pages()
343 struct page *page, *next; in kimage_free_page_list() local
345 list_for_each_entry_safe(page, next, list, lru) { in kimage_free_page_list()
[all …]
Dcrash_core.c443 VMCOREINFO_STRUCT_SIZE(page); in crash_save_vmcoreinfo_init()
449 VMCOREINFO_OFFSET(page, flags); in crash_save_vmcoreinfo_init()
450 VMCOREINFO_OFFSET(page, _refcount); in crash_save_vmcoreinfo_init()
451 VMCOREINFO_OFFSET(page, mapping); in crash_save_vmcoreinfo_init()
452 VMCOREINFO_OFFSET(page, lru); in crash_save_vmcoreinfo_init()
453 VMCOREINFO_OFFSET(page, _mapcount); in crash_save_vmcoreinfo_init()
454 VMCOREINFO_OFFSET(page, private); in crash_save_vmcoreinfo_init()
455 VMCOREINFO_OFFSET(page, compound_dtor); in crash_save_vmcoreinfo_init()
456 VMCOREINFO_OFFSET(page, compound_order); in crash_save_vmcoreinfo_init()
457 VMCOREINFO_OFFSET(page, compound_head); in crash_save_vmcoreinfo_init()
Dwatch_queue.c62 struct page *page; in watch_queue_pipe_buf_release() local
74 page = buf->page; in watch_queue_pipe_buf_release()
75 bit += page->index; in watch_queue_pipe_buf_release()
104 struct page *page; in post_one_notification() local
120 page = wqueue->notes[note / WATCH_QUEUE_NOTES_PER_PAGE]; in post_one_notification()
122 get_page(page); in post_one_notification()
124 p = kmap_atomic(page); in post_one_notification()
129 buf->page = page; in post_one_notification()
242 struct page **pages; in watch_queue_set_size()
274 pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL); in watch_queue_set_size()
Dprofile.c320 struct page *page; in profile_dead_cpu() local
328 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]); in profile_dead_cpu()
330 __free_page(page); in profile_dead_cpu()
339 struct page *page; in profile_prepare_cpu() local
347 page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); in profile_prepare_cpu()
348 if (!page) { in profile_prepare_cpu()
352 per_cpu(cpu_profile_hits, cpu)[i] = page_address(page); in profile_prepare_cpu()
Drelay.c35 struct page *page; in relay_buf_fault() local
42 page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT)); in relay_buf_fault()
43 if (!page) in relay_buf_fault()
45 get_page(page); in relay_buf_fault()
46 vmf->page = page; in relay_buf_fault()
61 static struct page **relay_alloc_page_array(unsigned int n_pages) in relay_alloc_page_array()
63 return kvcalloc(n_pages, sizeof(struct page *), GFP_KERNEL); in relay_alloc_page_array()
69 static void relay_free_page_array(struct page **array) in relay_free_page_array()
1091 rbuf = (struct rchan_buf *)page_private(buf->page); in relay_pipe_buf_release()
1124 struct page *pages[PIPE_DEF_BUFFERS]; in subbuf_splice_actor()
/kernel/power/
Dsnapshot.c86 static inline void hibernate_map_page(struct page *page) in hibernate_map_page() argument
89 int ret = set_direct_map_default_noflush(page); in hibernate_map_page()
94 debug_pagealloc_map_pages(page, 1); in hibernate_map_page()
98 static inline void hibernate_unmap_page(struct page *page) in hibernate_unmap_page() argument
101 unsigned long addr = (unsigned long)page_address(page); in hibernate_unmap_page()
102 int ret = set_direct_map_invalid_noflush(page); in hibernate_unmap_page()
109 debug_pagealloc_unmap_pages(page, 1); in hibernate_unmap_page()
113 static int swsusp_page_is_free(struct page *);
114 static void swsusp_set_page_forbidden(struct page *);
115 static void swsusp_unset_page_forbidden(struct page *);
[all …]
Dswap.c252 struct page *page = bio_first_page_all(bio); in hib_end_io() local
261 put_page(page); in hib_end_io()
263 flush_icache_range((unsigned long)page_address(page), in hib_end_io()
264 (unsigned long)page_address(page) + PAGE_SIZE); in hib_end_io()
277 struct page *page = virt_to_page(addr); in hib_submit_io() local
284 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { in hib_submit_io()
698 unsigned char *page = NULL; in save_image_lzo() local
711 page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH); in save_image_lzo()
712 if (!page) { in save_image_lzo()
852 memcpy(page, data[thr].cmp + off, PAGE_SIZE); in save_image_lzo()
[all …]
/kernel/events/
Dring_buffer.c237 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); in __perf_output_begin()
239 handle->addr = rb->data_pages[handle->page] + offset; in __perf_output_begin()
608 static struct page *rb_alloc_aux_page(int node, int order) in rb_alloc_aux_page()
610 struct page *page; in rb_alloc_aux_page() local
616 page = alloc_pages_node(node, PERF_AUX_GFP, order); in rb_alloc_aux_page()
617 } while (!page && order--); in rb_alloc_aux_page()
619 if (page && order) { in rb_alloc_aux_page()
626 split_page(page, order); in rb_alloc_aux_page()
627 SetPagePrivate(page); in rb_alloc_aux_page()
628 set_page_private(page, order); in rb_alloc_aux_page()
[all …]
Duprobes.c104 struct page *pages[2];
155 struct page *old_page, struct page *new_page) in __replace_page()
240 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) in copy_from_page() argument
242 void *kaddr = kmap_atomic(page); in copy_from_page()
247 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) in copy_to_page() argument
249 void *kaddr = kmap_atomic(page); in copy_to_page()
254 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) in verify_opcode() argument
268 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); in verify_opcode()
368 struct page *page; in __update_ref_ctr() local
377 FOLL_WRITE, &page, &vma, NULL); in __update_ref_ctr()
[all …]
Dinternal.h96 extern struct page *
151 handle->page++; \
152 handle->page &= rb->nr_pages - 1; \
153 handle->addr = rb->data_pages[handle->page]; \
/kernel/module/
Ddecompress.c19 struct page **new_pages; in module_extend_max_pages()
34 static struct page *module_get_next_page(struct load_info *info) in module_get_next_page()
36 struct page *page; in module_get_next_page() local
45 page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); in module_get_next_page()
46 if (!page) in module_get_next_page()
49 info->pages[info->used_pages++] = page; in module_get_next_page()
50 return page; in module_get_next_page()
115 struct page *page = module_get_next_page(info); in module_gzip_decompress() local
117 if (IS_ERR(page)) { in module_gzip_decompress()
118 retval = PTR_ERR(page); in module_gzip_decompress()
[all …]
/kernel/futex/
Dcore.c226 struct page *page, *tail; in get_futex_key() local
272 err = get_user_pages_fast(address, 1, FOLL_WRITE, &page); in get_futex_key()
278 err = get_user_pages_fast(address, 1, 0, &page); in get_futex_key()
304 tail = page; in get_futex_key()
305 page = compound_head(page); in get_futex_key()
306 mapping = READ_ONCE(page->mapping); in get_futex_key()
331 lock_page(page); in get_futex_key()
332 shmem_swizzled = PageSwapCache(page) || page->mapping; in get_futex_key()
333 unlock_page(page); in get_futex_key()
334 put_page(page); in get_futex_key()
[all …]
/kernel/trace/
Dring_buffer.c308 struct buffer_data_page *page; /* Actual data page */ member
333 return local_read(&bpage->page->commit); in rb_page_commit()
338 free_page((unsigned long)bpage->page); in free_buffer_page()
705 struct buffer_page *page = cpu_buffer->commit_page; in verify_event() local
715 if (page == tail_page || WARN_ON_ONCE(stop++ > 100)) in verify_event()
717 commit = local_read(&page->page->commit); in verify_event()
718 write = local_read(&page->write); in verify_event()
719 if (addr >= (unsigned long)&page->page->data[commit] && in verify_event()
720 addr < (unsigned long)&page->page->data[write]) in verify_event()
723 next = rb_list_head(page->list.next); in verify_event()
[all …]
/kernel/bpf/
Dringbuf.c38 struct page **pages;
102 struct page **pages, *page; in bpf_ringbuf_area_alloc() local
130 page = alloc_pages_node(numa_node, flags, 0); in bpf_ringbuf_area_alloc()
131 if (!page) { in bpf_ringbuf_area_alloc()
135 pages[i] = page; in bpf_ringbuf_area_alloc()
137 pages[nr_data_pages + i] = page; in bpf_ringbuf_area_alloc()
221 struct page **pages = rb->pages; in bpf_ringbuf_free()
/kernel/debug/kdb/
Dkdb_support.c321 struct page *page; in kdb_getphys() local
326 page = pfn_to_page(pfn); in kdb_getphys()
327 vaddr = kmap_atomic(page); in kdb_getphys()

12