Home
last modified time | relevance | path

Searched refs:page (Results 1 – 25 of 41) sorted by relevance

12

/kernel/dma/
Ddirect.c33 static inline struct page *dma_direct_to_page(struct device *dev, in dma_direct_to_page()
79 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, in __dma_direct_alloc_pages()
83 struct page *page = NULL; in __dma_direct_alloc_pages() local
90 page = dma_alloc_contiguous(dev, size, gfp); in __dma_direct_alloc_pages()
91 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages()
92 dma_free_contiguous(dev, page, size); in __dma_direct_alloc_pages()
93 page = NULL; in __dma_direct_alloc_pages()
96 if (!page) in __dma_direct_alloc_pages()
97 page = alloc_pages_node(node, gfp, get_order(size)); in __dma_direct_alloc_pages()
98 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in __dma_direct_alloc_pages()
[all …]
Dops_helpers.c8 static struct page *dma_common_vaddr_to_page(void *cpu_addr) in dma_common_vaddr_to_page()
22 struct page *page = dma_common_vaddr_to_page(cpu_addr); in dma_common_get_sgtable() local
27 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in dma_common_get_sgtable()
42 struct page *page = dma_common_vaddr_to_page(cpu_addr); in dma_common_mmap() local
54 page_to_pfn(page) + vma->vm_pgoff, in dma_common_mmap()
61 struct page *dma_common_alloc_pages(struct device *dev, size_t size, in dma_common_alloc_pages()
65 struct page *page; in dma_common_alloc_pages() local
67 page = dma_alloc_contiguous(dev, size, gfp); in dma_common_alloc_pages()
68 if (!page) in dma_common_alloc_pages()
69 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size)); in dma_common_alloc_pages()
[all …]
Dpool.c86 struct page *page = NULL; in atomic_pool_expand() local
96 page = dma_alloc_from_contiguous(NULL, 1 << order, in atomic_pool_expand()
98 if (!page) in atomic_pool_expand()
99 page = alloc_pages(gfp, order); in atomic_pool_expand()
100 } while (!page && order-- > 0); in atomic_pool_expand()
101 if (!page) in atomic_pool_expand()
104 arch_dma_prep_coherent(page, pool_size); in atomic_pool_expand()
107 addr = dma_common_contiguous_remap(page, pool_size, in atomic_pool_expand()
113 addr = page_to_virt(page); in atomic_pool_expand()
119 ret = set_memory_decrypted((unsigned long)page_to_virt(page), in atomic_pool_expand()
[all …]
Dcontiguous.c259 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, in dma_alloc_from_contiguous()
279 bool dma_release_from_contiguous(struct device *dev, struct page *pages, in dma_release_from_contiguous()
285 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp) in cma_alloc_aligned()
308 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) in dma_alloc_contiguous()
330 struct page *page; in dma_alloc_contiguous() local
333 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous()
334 if (page) in dma_alloc_contiguous()
335 return page; in dma_alloc_contiguous()
356 void dma_free_contiguous(struct device *dev, struct page *page, size_t size) in dma_free_contiguous() argument
362 if (cma_release(dev->cma_area, page, count)) in dma_free_contiguous()
[all …]
Dremap.c9 struct page **dma_common_find_pages(void *cpu_addr) in dma_common_find_pages()
22 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap()
38 void *dma_common_contiguous_remap(struct page *page, size_t size, in dma_common_contiguous_remap() argument
42 struct page **pages; in dma_common_contiguous_remap()
46 pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap()
50 pages[i] = nth_page(page, i); in dma_common_contiguous_remap()
Dmapping.c140 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, in dma_map_page_attrs() argument
153 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs()
155 addr = ops->map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs()
156 debug_dma_map_page(dev, page, offset, size, dir, addr); in dma_map_page_attrs()
477 struct page *dma_alloc_pages(struct device *dev, size_t size, in dma_alloc_pages()
481 struct page *page; in dma_alloc_pages() local
490 page = dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); in dma_alloc_pages()
492 page = ops->alloc_pages(dev, size, dma_handle, dir, gfp); in dma_alloc_pages()
496 debug_dma_map_page(dev, page, 0, size, dir, *dma_handle); in dma_alloc_pages()
498 return page; in dma_alloc_pages()
[all …]
Ddebug.h12 extern void debug_dma_map_page(struct device *dev, struct page *page,
54 static inline void debug_dma_map_page(struct device *dev, struct page *page, in debug_dma_map_page() argument
Dvirt.c29 static dma_addr_t dma_virt_map_page(struct device *dev, struct page *page, in dma_virt_map_page() argument
34 return (uintptr_t)(page_address(page) + offset); in dma_virt_map_page()
Ddummy.c14 static dma_addr_t dma_dummy_map_page(struct device *dev, struct page *page, in dma_dummy_map_page() argument
Ddirect.h84 struct page *page, unsigned long offset, size_t size, in dma_direct_map_page() argument
87 phys_addr_t phys = page_to_phys(page) + offset; in dma_direct_map_page()
Ddebug.c1053 struct page *page, size_t offset) in check_for_stack() argument
1060 if (PageHighMem(page)) in check_for_stack()
1062 addr = page_address(page) + offset; in check_for_stack()
1070 if (page != stack_vm_area->pages[i]) in check_for_stack()
1215 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, in debug_dma_map_page() argument
1232 entry->pfn = page_to_pfn(page); in debug_dma_map_page()
1239 check_for_stack(dev, page, offset); in debug_dma_map_page()
1241 if (!PageHighMem(page)) { in debug_dma_map_page()
1242 void *addr = page_address(page) + offset; in debug_dma_map_page()
/kernel/
Dkexec_core.c144 static struct page *kimage_alloc_page(struct kimage *image,
299 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) in kimage_alloc_pages()
301 struct page *pages; in kimage_alloc_pages()
326 static void kimage_free_pages(struct page *page) in kimage_free_pages() argument
330 order = page_private(page); in kimage_free_pages()
333 arch_kexec_pre_free_pages(page_address(page), count); in kimage_free_pages()
336 ClearPageReserved(page + i); in kimage_free_pages()
337 __free_pages(page, order); in kimage_free_pages()
342 struct page *page, *next; in kimage_free_page_list() local
344 list_for_each_entry_safe(page, next, list, lru) { in kimage_free_page_list()
[all …]
Dwatch_queue.c62 struct page *page; in watch_queue_pipe_buf_release() local
74 page = buf->page; in watch_queue_pipe_buf_release()
75 bit += page->index; in watch_queue_pipe_buf_release()
104 struct page *page; in post_one_notification() local
123 page = wqueue->notes[note / WATCH_QUEUE_NOTES_PER_PAGE]; in post_one_notification()
125 get_page(page); in post_one_notification()
127 p = kmap_atomic(page); in post_one_notification()
132 buf->page = page; in post_one_notification()
245 struct page **pages; in watch_queue_set_size()
278 pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL); in watch_queue_set_size()
Dcrash_core.c469 VMCOREINFO_STRUCT_SIZE(page); in crash_save_vmcoreinfo_init()
475 VMCOREINFO_OFFSET(page, flags); in crash_save_vmcoreinfo_init()
476 VMCOREINFO_OFFSET(page, _refcount); in crash_save_vmcoreinfo_init()
477 VMCOREINFO_OFFSET(page, mapping); in crash_save_vmcoreinfo_init()
478 VMCOREINFO_OFFSET(page, lru); in crash_save_vmcoreinfo_init()
479 VMCOREINFO_OFFSET(page, _mapcount); in crash_save_vmcoreinfo_init()
480 VMCOREINFO_OFFSET(page, private); in crash_save_vmcoreinfo_init()
481 VMCOREINFO_OFFSET(page, compound_dtor); in crash_save_vmcoreinfo_init()
482 VMCOREINFO_OFFSET(page, compound_order); in crash_save_vmcoreinfo_init()
483 VMCOREINFO_OFFSET(page, compound_head); in crash_save_vmcoreinfo_init()
Dprofile.c344 struct page *page; in profile_dead_cpu() local
352 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]); in profile_dead_cpu()
354 __free_page(page); in profile_dead_cpu()
363 struct page *page; in profile_prepare_cpu() local
371 page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); in profile_prepare_cpu()
372 if (!page) { in profile_prepare_cpu()
376 per_cpu(cpu_profile_hits, cpu)[i] = page_address(page); in profile_prepare_cpu()
Drelay.c44 struct page *page; in relay_buf_fault() local
51 page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT)); in relay_buf_fault()
52 if (!page) in relay_buf_fault()
54 get_page(page); in relay_buf_fault()
55 vmf->page = page; in relay_buf_fault()
71 static struct page **relay_alloc_page_array(unsigned int n_pages) in relay_alloc_page_array()
73 const size_t pa_size = n_pages * sizeof(struct page *); in relay_alloc_page_array()
82 static void relay_free_page_array(struct page **array) in relay_free_page_array()
1179 rbuf = (struct rchan_buf *)page_private(buf->page); in relay_pipe_buf_release()
1212 struct page *pages[PIPE_DEF_BUFFERS]; in subbuf_splice_actor()
Dcfi.c74 unsigned long page = ptr >> PAGE_SHIFT; in ptr_to_shadow() local
76 if (unlikely(page < s->base)) in ptr_to_shadow()
79 index = page - s->base; in ptr_to_shadow()
/kernel/power/
Dsnapshot.c79 static int swsusp_page_is_free(struct page *);
80 static void swsusp_set_page_forbidden(struct page *);
81 static void swsusp_unset_page_forbidden(struct page *);
191 static struct page *alloc_image_page(gfp_t gfp_mask) in alloc_image_page()
193 struct page *page; in alloc_image_page() local
195 page = alloc_page(gfp_mask); in alloc_image_page()
196 if (page) { in alloc_image_page()
197 swsusp_set_page_forbidden(page); in alloc_image_page()
198 swsusp_set_page_free(page); in alloc_image_page()
200 return page; in alloc_image_page()
[all …]
Dswap.c248 struct page *page = bio_first_page_all(bio); in hib_end_io() local
257 put_page(page); in hib_end_io()
259 flush_icache_range((unsigned long)page_address(page), in hib_end_io()
260 (unsigned long)page_address(page) + PAGE_SIZE); in hib_end_io()
273 struct page *page = virt_to_page(addr); in hib_submit_io() local
282 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { in hib_submit_io()
688 unsigned char *page = NULL; in save_image_lzo() local
701 page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH); in save_image_lzo()
702 if (!page) { in save_image_lzo()
845 memcpy(page, data[thr].cmp + off, PAGE_SIZE); in save_image_lzo()
[all …]
/kernel/events/
Dring_buffer.c235 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); in __perf_output_begin()
237 handle->addr = rb->data_pages[handle->page] + offset; in __perf_output_begin()
605 static struct page *rb_alloc_aux_page(int node, int order) in rb_alloc_aux_page()
607 struct page *page; in rb_alloc_aux_page() local
613 page = alloc_pages_node(node, PERF_AUX_GFP, order); in rb_alloc_aux_page()
614 } while (!page && order--); in rb_alloc_aux_page()
616 if (page && order) { in rb_alloc_aux_page()
623 split_page(page, order); in rb_alloc_aux_page()
624 SetPagePrivate(page); in rb_alloc_aux_page()
625 set_page_private(page, order); in rb_alloc_aux_page()
[all …]
Duprobes.c104 struct page *pages[2];
155 struct page *old_page, struct page *new_page) in __replace_page()
159 .page = compound_head(old_page), in __replace_page()
244 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) in copy_from_page() argument
246 void *kaddr = kmap_atomic(page); in copy_from_page()
251 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) in copy_to_page() argument
253 void *kaddr = kmap_atomic(page); in copy_to_page()
258 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) in verify_opcode() argument
272 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); in verify_opcode()
371 struct page *page; in __update_ref_ctr() local
[all …]
Dinternal.h96 extern struct page *
151 handle->page++; \
152 handle->page &= rb->nr_pages - 1; \
153 handle->addr = rb->data_pages[handle->page]; \
/kernel/trace/
Dring_buffer.c335 struct buffer_data_page *page; /* Actual data page */ member
360 return local_read(&bpage->page->commit); in rb_page_commit()
365 free_page((unsigned long)bpage->page); in free_buffer_page()
1204 struct buffer_page *page, struct list_head *list) in rb_is_head_page() argument
1210 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) in rb_is_head_page()
1223 static bool rb_is_reader_page(struct buffer_page *page) in rb_is_reader_page() argument
1225 struct list_head *list = page->list.prev; in rb_is_reader_page()
1227 return rb_list_head(list->next) != &page->list; in rb_is_reader_page()
1344 struct buffer_page *page; in rb_set_head_page() local
1356 page = head = cpu_buffer->head_page; in rb_set_head_page()
[all …]
/kernel/bpf/
Dringbuf.c37 struct page **pages;
69 struct page **pages, *page; in bpf_ringbuf_area_alloc() local
100 page = alloc_pages_node(numa_node, flags, 0); in bpf_ringbuf_area_alloc()
101 if (!page) { in bpf_ringbuf_area_alloc()
105 pages[i] = page; in bpf_ringbuf_area_alloc()
107 pages[nr_data_pages + i] = page; in bpf_ringbuf_area_alloc()
205 struct page **pages = rb->pages; in bpf_ringbuf_free()
/kernel/futex/
Dcore.c492 struct page *page, *tail; in get_futex_key() local
538 err = get_user_pages_fast(address, 1, FOLL_WRITE, &page); in get_futex_key()
544 err = get_user_pages_fast(address, 1, 0, &page); in get_futex_key()
570 tail = page; in get_futex_key()
571 page = compound_head(page); in get_futex_key()
572 mapping = READ_ONCE(page->mapping); in get_futex_key()
597 lock_page(page); in get_futex_key()
598 shmem_swizzled = PageSwapCache(page) || page->mapping; in get_futex_key()
599 unlock_page(page); in get_futex_key()
600 put_user_page(page); in get_futex_key()
[all …]

12