Searched refs:page_address (Results 1 – 15 of 15) sorted by relevance
/kernel/power/ |
D | snapshot.c | 61 static inline void hibernate_restore_protect_page(void *page_address) in hibernate_restore_protect_page() argument 64 set_memory_ro((unsigned long)page_address, 1); in hibernate_restore_protect_page() 67 static inline void hibernate_restore_unprotect_page(void *page_address) in hibernate_restore_unprotect_page() argument 70 set_memory_rw((unsigned long)page_address, 1); in hibernate_restore_unprotect_page() 75 static inline void hibernate_restore_protect_page(void *page_address) {} in hibernate_restore_protect_page() argument 76 static inline void hibernate_restore_unprotect_page(void *page_address) {} in hibernate_restore_unprotect_page() argument 203 static void recycle_safe_page(void *page_address) in recycle_safe_page() argument 205 struct linked_page *lp = page_address; in recycle_safe_page() 1357 do_copy_page(dst, page_address(s_page)); in safe_copy_page() 1360 do_copy_page(dst, page_address(s_page)); in safe_copy_page() [all …]
|
D | swap.c | 259 flush_icache_range((unsigned long)page_address(page), in hib_end_io() 260 (unsigned long)page_address(page) + PAGE_SIZE); in hib_end_io()
|
/kernel/ |
D | kexec_core.c | 315 arch_kexec_post_alloc_pages(page_address(pages), count, in kimage_alloc_pages() 333 arch_kexec_pre_free_pages(page_address(page), count); in kimage_free_pages() 477 arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0); in kimage_alloc_crash_control_pages() 547 ind_page = page_address(page); in kimage_add_entry() 881 arch_kexec_post_alloc_pages(page_address(page), 1, 0); in kimage_load_crash_segment() 899 arch_kexec_pre_free_pages(page_address(page), 1); in kimage_load_crash_segment()
|
D | profile.c | 376 per_cpu(cpu_profile_hits, cpu)[i] = page_address(page); in profile_prepare_cpu()
|
D | fork.c | 271 tsk->stack = kasan_reset_tag(page_address(page)); in alloc_thread_stack_node()
|
/kernel/dma/ |
D | direct.c | 212 ret = page_address(page); in dma_direct_alloc() 235 err = set_memory_encrypted((unsigned long)page_address(page), in dma_direct_alloc() 303 ret = page_address(page); in dma_direct_alloc_pages() 320 void *vaddr = page_address(page); in dma_direct_free_pages()
|
D | virt.c | 34 return (uintptr_t)(page_address(page) + offset); in dma_virt_map_page()
|
D | ops_helpers.c | 80 memset(page_address(page), 0, size); in dma_common_alloc_pages()
|
D | debug.c | 1062 addr = page_address(page) + offset; in check_for_stack() 1242 void *addr = page_address(page) + offset; in debug_dma_map_page()
|
D | mapping.c | 529 return page_address(page); in dma_alloc_noncoherent()
|
/kernel/events/ |
D | ring_buffer.c | 717 rb->aux_pages[rb->aux_nr_pages] = page_address(page++); in rb_alloc_aux() 798 return page_address(page); in perf_mmap_alloc_page()
|
/kernel/time/ |
D | namespace.c | 220 vdata = arch_get_vdso_data(page_address(ns->vvar_page)); in timens_set_vvar_page()
|
/kernel/trace/ |
D | ring_buffer.c | 1550 bpage->page = page_address(page); in __rb_allocate_pages() 1632 bpage->page = page_address(page); in rb_allocate_cpu_buffer() 5358 bpage = page_address(page); in ring_buffer_alloc_read_page()
|
D | trace_uprobe.c | 876 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p); in uprobe_buffer_init()
|
D | trace.c | 2282 s = page_address(page); in allocate_cmdlines_buffer() 2683 event = page_address(page); in trace_buffered_event_enable() 6571 page_address(spd.pages[i]), in tracing_splice_read_pipe()
|