Searched refs:page_address (Results 1 – 15 of 15) sorted by relevance
/kernel/power/ |
D | snapshot.c | 61 static inline void hibernate_restore_protect_page(void *page_address) in hibernate_restore_protect_page() argument 64 set_memory_ro((unsigned long)page_address, 1); in hibernate_restore_protect_page() 67 static inline void hibernate_restore_unprotect_page(void *page_address) in hibernate_restore_unprotect_page() argument 70 set_memory_rw((unsigned long)page_address, 1); in hibernate_restore_unprotect_page() 75 static inline void hibernate_restore_protect_page(void *page_address) {} in hibernate_restore_protect_page() argument 76 static inline void hibernate_restore_unprotect_page(void *page_address) {} in hibernate_restore_unprotect_page() argument 101 unsigned long addr = (unsigned long)page_address(page); in hibernate_unmap_page() 237 static void recycle_safe_page(void *page_address) in recycle_safe_page() argument 239 struct linked_page *lp = page_address; in recycle_safe_page() 1413 zeros_only = do_copy_page(dst, page_address(s_page)); in safe_copy_page() [all …]
|
D | swap.c | 263 flush_icache_range((unsigned long)page_address(page), in hib_end_io() 264 (unsigned long)page_address(page) + PAGE_SIZE); in hib_end_io()
|
/kernel/ |
D | kexec_core.c | 316 arch_kexec_post_alloc_pages(page_address(pages), count, in kimage_alloc_pages() 334 arch_kexec_pre_free_pages(page_address(page), count); in kimage_free_pages() 478 arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0); in kimage_alloc_crash_control_pages() 548 ind_page = page_address(page); in kimage_add_entry() 875 arch_kexec_post_alloc_pages(page_address(page), 1, 0); in kimage_load_crash_segment() 893 arch_kexec_pre_free_pages(page_address(page), 1); in kimage_load_crash_segment()
|
D | profile.c | 352 per_cpu(cpu_profile_hits, cpu)[i] = page_address(page); in profile_prepare_cpu()
|
D | fork.c | 371 tsk->stack = kasan_reset_tag(page_address(page)); in alloc_thread_stack_node()
|
/kernel/dma/ |
D | direct.c | 300 ret = page_address(page); in dma_direct_alloc() 318 if (dma_set_encrypted(dev, page_address(page), size)) in dma_direct_alloc() 385 ret = page_address(page); in dma_direct_alloc_pages() 400 void *vaddr = page_address(page); in dma_direct_free_pages()
|
D | ops_helpers.c | 82 memset(page_address(page), 0, size); in dma_common_alloc_pages()
|
D | debug.c | 1059 addr = page_address(page) + offset; in check_for_stack() 1230 void *addr = page_address(page) + offset; in debug_dma_map_page()
|
D | mapping.c | 687 return page_address(sg_page(sgt->sgl)); in dma_vmap_noncontiguous()
|
/kernel/events/ |
D | ring_buffer.c | 725 rb->aux_pages[rb->aux_nr_pages] = page_address(page++); in rb_alloc_aux() 803 return page_address(page); in perf_mmap_alloc_page()
|
/kernel/time/ |
D | namespace.c | 220 vdata = arch_get_vdso_data(page_address(ns->vvar_page)); in timens_set_vvar_page()
|
/kernel/trace/ |
D | ring_buffer.c | 1608 bpage->page = page_address(page); in __rb_allocate_pages() 1690 bpage->page = page_address(page); in rb_allocate_cpu_buffer() 5765 bpage = page_address(page); in ring_buffer_alloc_read_page()
|
D | trace_events_user.c | 255 group->register_page_data = page_address(group->pages); in user_event_group_create()
|
D | trace_uprobe.c | 880 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p); in uprobe_buffer_init()
|
D | trace.c | 2292 s = page_address(page); in allocate_cmdlines_buffer() 2703 event = page_address(page); in trace_buffered_event_enable() 7007 page_address(spd.pages[i]), in tracing_splice_read_pipe()
|