| /kernel/dma/ |
| D | direct.c | 34 static inline struct page *dma_direct_to_page(struct device *dev, in dma_direct_to_page() 100 static void __dma_direct_free_pages(struct device *dev, struct page *page, in __dma_direct_free_pages() argument 103 if (swiotlb_free(dev, page, size)) in __dma_direct_free_pages() 105 dma_free_contiguous(dev, page, size); in __dma_direct_free_pages() 108 static struct page *dma_direct_alloc_swiotlb(struct device *dev, size_t size) in dma_direct_alloc_swiotlb() 110 struct page *page = swiotlb_alloc(dev, size); in dma_direct_alloc_swiotlb() local 112 if (page && !dma_coherent_ok(dev, page_to_phys(page), size)) { in dma_direct_alloc_swiotlb() 113 swiotlb_free(dev, page, size); in dma_direct_alloc_swiotlb() 117 return page; in dma_direct_alloc_swiotlb() 120 static struct page *__dma_direct_alloc_pages(struct device *dev, size_t size, in __dma_direct_alloc_pages() [all …]
|
| D | ops_helpers.c | 9 static struct page *dma_common_vaddr_to_page(void *cpu_addr) in dma_common_vaddr_to_page() 23 struct page *page = dma_common_vaddr_to_page(cpu_addr); in dma_common_get_sgtable() local 28 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0); in dma_common_get_sgtable() 43 struct page *page = dma_common_vaddr_to_page(cpu_addr); in dma_common_mmap() local 55 page_to_pfn(page) + vma->vm_pgoff, in dma_common_mmap() 62 struct page *dma_common_alloc_pages(struct device *dev, size_t size, in dma_common_alloc_pages() 66 struct page *page; in dma_common_alloc_pages() local 68 page = dma_alloc_contiguous(dev, size, gfp); in dma_common_alloc_pages() 69 if (!page) in dma_common_alloc_pages() 70 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size)); in dma_common_alloc_pages() [all …]
|
| D | pool.c | 83 struct page *page = NULL; in atomic_pool_expand() local 93 page = dma_alloc_from_contiguous(NULL, 1 << order, in atomic_pool_expand() 95 if (!page) in atomic_pool_expand() 96 page = alloc_pages(gfp, order); in atomic_pool_expand() 97 } while (!page && order-- > 0); in atomic_pool_expand() 98 if (!page) in atomic_pool_expand() 101 arch_dma_prep_coherent(page, pool_size); in atomic_pool_expand() 104 addr = dma_common_contiguous_remap(page, pool_size, in atomic_pool_expand() 110 addr = page_to_virt(page); in atomic_pool_expand() 116 ret = set_memory_decrypted((unsigned long)page_to_virt(page), in atomic_pool_expand() [all …]
|
| D | contiguous.c | 309 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, in dma_alloc_from_contiguous() 328 bool dma_release_from_contiguous(struct device *dev, struct page *pages, in dma_release_from_contiguous() 334 static struct page *cma_alloc_aligned(struct cma *cma, size_t size, gfp_t gfp) in cma_alloc_aligned() 356 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) in dma_alloc_contiguous() 373 struct page *page; in dma_alloc_contiguous() local 376 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous() 377 if (page) in dma_alloc_contiguous() 378 return page; in dma_alloc_contiguous() 383 page = cma_alloc_aligned(cma, size, gfp); in dma_alloc_contiguous() 384 if (page) in dma_alloc_contiguous() [all …]
|
| D | debug.h | 12 extern void debug_dma_map_page(struct device *dev, struct page *page, 57 extern void debug_dma_alloc_pages(struct device *dev, struct page *page, 61 extern void debug_dma_free_pages(struct device *dev, struct page *page, 65 static inline void debug_dma_map_page(struct device *dev, struct page *page, in debug_dma_map_page() argument 137 static inline void debug_dma_alloc_pages(struct device *dev, struct page *page, in debug_dma_alloc_pages() argument 144 static inline void debug_dma_free_pages(struct device *dev, struct page *page, in debug_dma_free_pages() argument
|
| D | mapping.c | 155 dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, in dma_map_page_attrs() argument 168 arch_dma_map_page_direct(dev, page_to_phys(page) + offset + size)) in dma_map_page_attrs() 169 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs() 171 addr = iommu_dma_map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs() 173 addr = ops->map_page(dev, page, offset, size, dir, attrs); in dma_map_page_attrs() 174 kmsan_handle_dma(page, offset, size, dir); in dma_map_page_attrs() 175 trace_dma_map_page(dev, page_to_phys(page) + offset, addr, size, dir, in dma_map_page_attrs() 177 debug_dma_map_page(dev, page, offset, size, dir, addr, attrs); in dma_map_page_attrs() 685 static struct page *__dma_alloc_pages(struct device *dev, size_t size, in __dma_alloc_pages() 707 struct page *dma_alloc_pages(struct device *dev, size_t size, in dma_alloc_pages() [all …]
|
| D | remap.c | 9 struct page **dma_common_find_pages(void *cpu_addr) in dma_common_find_pages() 24 void *dma_common_pages_remap(struct page **pages, size_t size, in dma_common_pages_remap() 40 void *dma_common_contiguous_remap(struct page *page, size_t size, in dma_common_contiguous_remap() argument 44 struct page **pages; in dma_common_contiguous_remap() 48 pages = kvmalloc_array(count, sizeof(struct page *), GFP_KERNEL); in dma_common_contiguous_remap() 52 pages[i] = nth_page(page, i); in dma_common_contiguous_remap()
|
| D | debug.c | 1058 struct page *page, size_t offset) in check_for_stack() argument 1065 if (PageHighMem(page)) in check_for_stack() 1067 addr = page_address(page) + offset; in check_for_stack() 1075 if (page != stack_vm_area->pages[i]) in check_for_stack() 1210 void debug_dma_map_page(struct device *dev, struct page *page, size_t offset, in debug_dma_map_page() argument 1228 entry->paddr = page_to_phys(page) + offset; in debug_dma_map_page() 1234 check_for_stack(dev, page, offset); in debug_dma_map_page() 1236 if (!PageHighMem(page)) { in debug_dma_map_page() 1237 void *addr = page_address(page) + offset; in debug_dma_map_page() 1388 struct page *page; in virt_to_paddr() local [all …]
|
| D | swiotlb.c | 574 static struct page *alloc_dma_pages(gfp_t gfp, size_t bytes, u64 phys_limit) in alloc_dma_pages() 577 struct page *page; in alloc_dma_pages() local 581 page = alloc_pages(gfp, order); in alloc_dma_pages() 582 if (!page) in alloc_dma_pages() 585 paddr = page_to_phys(page); in alloc_dma_pages() 587 __free_pages(page, order); in alloc_dma_pages() 594 return page; in alloc_dma_pages() 599 __free_pages(page, order); in alloc_dma_pages() 612 static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes, in swiotlb_alloc_tlb() 615 struct page *page; in swiotlb_alloc_tlb() local [all …]
|
| D | direct.h | 84 struct page *page, unsigned long offset, size_t size, in dma_direct_map_page() argument 87 phys_addr_t phys = page_to_phys(page) + offset; in dma_direct_map_page() 91 if (is_pci_p2pdma_page(page)) in dma_direct_map_page() 98 if (is_pci_p2pdma_page(page)) in dma_direct_map_page()
|
| D | dummy.c | 14 static dma_addr_t dma_dummy_map_page(struct device *dev, struct page *page, in dma_dummy_map_page() argument
|
| /kernel/ |
| D | kexec_core.c | 104 static struct page *kimage_alloc_page(struct kimage *image, 267 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) in kimage_alloc_pages() 269 struct page *pages; in kimage_alloc_pages() 294 static void kimage_free_pages(struct page *page) in kimage_free_pages() argument 298 order = page_private(page); in kimage_free_pages() 301 arch_kexec_pre_free_pages(page_address(page), count); in kimage_free_pages() 304 ClearPageReserved(page + i); in kimage_free_pages() 305 __free_pages(page, order); in kimage_free_pages() 310 struct page *page, *next; in kimage_free_page_list() local 312 list_for_each_entry_safe(page, next, list, lru) { in kimage_free_page_list() [all …]
|
| D | vmcore_info.c | 166 VMCOREINFO_STRUCT_SIZE(page); in crash_save_vmcoreinfo_init() 172 VMCOREINFO_OFFSET(page, flags); in crash_save_vmcoreinfo_init() 173 VMCOREINFO_OFFSET(page, _refcount); in crash_save_vmcoreinfo_init() 174 VMCOREINFO_OFFSET(page, mapping); in crash_save_vmcoreinfo_init() 175 VMCOREINFO_OFFSET(page, lru); in crash_save_vmcoreinfo_init() 176 VMCOREINFO_OFFSET(page, _mapcount); in crash_save_vmcoreinfo_init() 177 VMCOREINFO_OFFSET(page, private); in crash_save_vmcoreinfo_init() 178 VMCOREINFO_OFFSET(page, compound_head); in crash_save_vmcoreinfo_init()
|
| D | watch_queue.c | 61 struct page *page; in watch_queue_pipe_buf_release() local 73 page = buf->page; in watch_queue_pipe_buf_release() 74 bit += page->index; in watch_queue_pipe_buf_release() 103 struct page *page; in post_one_notification() local 119 page = wqueue->notes[note / WATCH_QUEUE_NOTES_PER_PAGE]; in post_one_notification() 121 get_page(page); in post_one_notification() 123 p = kmap_atomic(page); in post_one_notification() 128 buf->page = page; in post_one_notification() 241 struct page **pages; in watch_queue_set_size() 282 pages = kcalloc(nr_pages, sizeof(struct page *), GFP_KERNEL); in watch_queue_set_size()
|
| D | relay.c | 35 struct page *page; in relay_buf_fault() local 42 page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT)); in relay_buf_fault() 43 if (!page) in relay_buf_fault() 45 get_page(page); in relay_buf_fault() 46 vmf->page = page; in relay_buf_fault() 61 static struct page **relay_alloc_page_array(unsigned int n_pages) in relay_alloc_page_array() 63 return kvcalloc(n_pages, sizeof(struct page *), GFP_KERNEL); in relay_alloc_page_array() 69 static void relay_free_page_array(struct page **array) in relay_free_page_array()
|
| /kernel/power/ |
| D | snapshot.c | 88 static inline void hibernate_map_page(struct page *page) in hibernate_map_page() argument 91 int ret = set_direct_map_default_noflush(page); in hibernate_map_page() 96 debug_pagealloc_map_pages(page, 1); in hibernate_map_page() 100 static inline void hibernate_unmap_page(struct page *page) in hibernate_unmap_page() argument 103 unsigned long addr = (unsigned long)page_address(page); in hibernate_unmap_page() 104 int ret = set_direct_map_invalid_noflush(page); in hibernate_unmap_page() 111 debug_pagealloc_unmap_pages(page, 1); in hibernate_unmap_page() 115 static int swsusp_page_is_free(struct page *); 116 static void swsusp_set_page_forbidden(struct page *); 117 static void swsusp_unset_page_forbidden(struct page *); [all …]
|
| D | swap.c | 250 struct page *page = bio_first_page_all(bio); in hib_end_io() local 259 put_page(page); in hib_end_io() 261 flush_icache_range((unsigned long)page_address(page), in hib_end_io() 262 (unsigned long)page_address(page) + PAGE_SIZE); in hib_end_io() 275 struct page *page = virt_to_page(addr); in hib_submit_io() local 283 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { in hib_submit_io() 712 unsigned char *page = NULL; in save_compressed_image() local 727 page = (void *)__get_free_page(GFP_NOIO | __GFP_HIGH); in save_compressed_image() 728 if (!page) { in save_compressed_image() 875 memcpy(page, data[thr].cmp + off, PAGE_SIZE); in save_compressed_image() [all …]
|
| /kernel/events/ |
| D | ring_buffer.c | 242 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); in __perf_output_begin() 244 handle->addr = rb->data_pages[handle->page] + offset; in __perf_output_begin() 615 static struct page *rb_alloc_aux_page(int node, int order) in rb_alloc_aux_page() 617 struct page *page; in rb_alloc_aux_page() local 623 page = alloc_pages_node(node, PERF_AUX_GFP, order); in rb_alloc_aux_page() 624 } while (!page && order--); in rb_alloc_aux_page() 626 if (page && order) { in rb_alloc_aux_page() 633 split_page(page, order); in rb_alloc_aux_page() 634 SetPagePrivate(page); in rb_alloc_aux_page() 635 set_page_private(page, order); in rb_alloc_aux_page() [all …]
|
| D | uprobes.c | 109 struct page *page; member 165 struct page *old_page, struct page *new_page) in __replace_page() 262 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) in copy_from_page() argument 264 void *kaddr = kmap_atomic(page); in copy_from_page() 269 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) in copy_to_page() argument 271 void *kaddr = kmap_atomic(page); in copy_to_page() 276 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) in verify_opcode() argument 290 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); in verify_opcode() 390 struct page *page; in __update_ref_ctr() local 398 FOLL_WRITE, &page, NULL); in __update_ref_ctr() [all …]
|
| D | internal.h | 98 extern struct page * 153 handle->page++; \ 154 handle->page &= rb->nr_pages - 1; \ 155 handle->addr = rb->data_pages[handle->page]; \
|
| /kernel/module/ |
| D | decompress.c | 19 struct page **new_pages; in module_extend_max_pages() 34 static struct page *module_get_next_page(struct load_info *info) in module_get_next_page() 36 struct page *page; in module_get_next_page() local 45 page = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); in module_get_next_page() 46 if (!page) in module_get_next_page() 49 info->pages[info->used_pages++] = page; in module_get_next_page() 50 return page; in module_get_next_page() 115 struct page *page = module_get_next_page(info); in module_gzip_decompress() local 117 if (IS_ERR(page)) { in module_gzip_decompress() 118 retval = PTR_ERR(page); in module_gzip_decompress() [all …]
|
| /kernel/bpf/ |
| D | arena.c | 146 struct page *page; in existing_page_cb() local 152 page = pte_page(pte); in existing_page_cb() 160 __free_page(page); in existing_page_cb() 260 struct page *page; in arena_vm_fault() local 268 page = vmalloc_to_page((void *)kaddr); in arena_vm_fault() 269 if (page) in arena_vm_fault() 282 ret = bpf_map_alloc_pages(map, GFP_KERNEL | __GFP_ZERO, NUMA_NO_NODE, 1, &page); in arena_vm_fault() 288 ret = vm_area_map_pages(arena->kern_vm, kaddr, kaddr + PAGE_SIZE, &page); in arena_vm_fault() 291 __free_page(page); in arena_vm_fault() 295 page_ref_add(page, 1); in arena_vm_fault() [all …]
|
| D | ringbuf.c | 30 struct page **pages; 96 struct page **pages, *page; in bpf_ringbuf_area_alloc() local 124 page = alloc_pages_node(numa_node, flags, 0); in bpf_ringbuf_area_alloc() 125 if (!page) { in bpf_ringbuf_area_alloc() 129 pages[i] = page; in bpf_ringbuf_area_alloc() 131 pages[nr_data_pages + i] = page; in bpf_ringbuf_area_alloc() 221 struct page **pages = rb->pages; in bpf_ringbuf_free() 346 usage += (nr_meta_pages + 2 * nr_data_pages) * sizeof(struct page *); in ringbuf_map_mem_usage()
|
| /kernel/trace/ |
| D | ring_buffer.c | 337 struct buffer_data_page *page; /* Actual data page */ member 362 return local_read(&bpage->page->commit); in rb_page_commit() 369 free_pages((unsigned long)bpage->page, bpage->order); in free_buffer_page() 595 struct buffer_page *page = cpu_buffer->commit_page; in verify_event() local 605 if (page == tail_page || WARN_ON_ONCE(stop++ > 100)) in verify_event() 607 commit = local_read(&page->page->commit); in verify_event() 608 write = local_read(&page->write); in verify_event() 609 if (addr >= (unsigned long)&page->page->data[commit] && in verify_event() 610 addr < (unsigned long)&page->page->data[write]) in verify_event() 613 next = rb_list_head(page->list.next); in verify_event() [all …]
|
| D | trace_sched_switch.c | 204 struct page *page; in allocate_cmdlines_buffer() local 212 page = alloc_pages(GFP_KERNEL, order); in allocate_cmdlines_buffer() 213 if (!page) in allocate_cmdlines_buffer() 216 s = page_address(page); in allocate_cmdlines_buffer()
|