Home
last modified time | relevance | path

Searched refs:page_address (Results 1 – 15 of 15) sorted by relevance

/kernel/power/
Dsnapshot.c61 static inline void hibernate_restore_protect_page(void *page_address) in hibernate_restore_protect_page() argument
64 set_memory_ro((unsigned long)page_address, 1); in hibernate_restore_protect_page()
67 static inline void hibernate_restore_unprotect_page(void *page_address) in hibernate_restore_unprotect_page() argument
70 set_memory_rw((unsigned long)page_address, 1); in hibernate_restore_unprotect_page()
75 static inline void hibernate_restore_protect_page(void *page_address) {} in hibernate_restore_protect_page() argument
76 static inline void hibernate_restore_unprotect_page(void *page_address) {} in hibernate_restore_unprotect_page() argument
203 static void recycle_safe_page(void *page_address) in recycle_safe_page() argument
205 struct linked_page *lp = page_address; in recycle_safe_page()
1357 do_copy_page(dst, page_address(s_page)); in safe_copy_page()
1360 do_copy_page(dst, page_address(s_page)); in safe_copy_page()
[all …]
Dswap.c259 flush_icache_range((unsigned long)page_address(page), in hib_end_io()
260 (unsigned long)page_address(page) + PAGE_SIZE); in hib_end_io()
/kernel/
Dkexec_core.c315 arch_kexec_post_alloc_pages(page_address(pages), count, in kimage_alloc_pages()
333 arch_kexec_pre_free_pages(page_address(page), count); in kimage_free_pages()
477 arch_kexec_post_alloc_pages(page_address(pages), 1 << order, 0); in kimage_alloc_crash_control_pages()
547 ind_page = page_address(page); in kimage_add_entry()
881 arch_kexec_post_alloc_pages(page_address(page), 1, 0); in kimage_load_crash_segment()
899 arch_kexec_pre_free_pages(page_address(page), 1); in kimage_load_crash_segment()
Dprofile.c376 per_cpu(cpu_profile_hits, cpu)[i] = page_address(page); in profile_prepare_cpu()
Dfork.c271 tsk->stack = kasan_reset_tag(page_address(page)); in alloc_thread_stack_node()
/kernel/dma/
Ddirect.c212 ret = page_address(page); in dma_direct_alloc()
235 err = set_memory_encrypted((unsigned long)page_address(page), in dma_direct_alloc()
303 ret = page_address(page); in dma_direct_alloc_pages()
320 void *vaddr = page_address(page); in dma_direct_free_pages()
Dvirt.c34 return (uintptr_t)(page_address(page) + offset); in dma_virt_map_page()
Dops_helpers.c80 memset(page_address(page), 0, size); in dma_common_alloc_pages()
Ddebug.c1062 addr = page_address(page) + offset; in check_for_stack()
1242 void *addr = page_address(page) + offset; in debug_dma_map_page()
Dmapping.c529 return page_address(page); in dma_alloc_noncoherent()
/kernel/events/
Dring_buffer.c717 rb->aux_pages[rb->aux_nr_pages] = page_address(page++); in rb_alloc_aux()
798 return page_address(page); in perf_mmap_alloc_page()
/kernel/time/
Dnamespace.c220 vdata = arch_get_vdso_data(page_address(ns->vvar_page)); in timens_set_vvar_page()
/kernel/trace/
Dring_buffer.c1550 bpage->page = page_address(page); in __rb_allocate_pages()
1632 bpage->page = page_address(page); in rb_allocate_cpu_buffer()
5358 bpage = page_address(page); in ring_buffer_alloc_read_page()
Dtrace_uprobe.c876 per_cpu_ptr(uprobe_cpu_buffer, cpu)->buf = page_address(p); in uprobe_buffer_init()
Dtrace.c2282 s = page_address(page); in allocate_cmdlines_buffer()
2683 event = page_address(page); in trace_buffered_event_enable()
6571 page_address(spd.pages[i]), in tracing_splice_read_pipe()