/kernel/power/ |
D | snapshot.c | 78 static int swsusp_page_is_free(struct page *); 79 static void swsusp_set_page_forbidden(struct page *); 80 static void swsusp_unset_page_forbidden(struct page *); 190 static struct page *alloc_image_page(gfp_t gfp_mask) in alloc_image_page() 192 struct page *page; in alloc_image_page() local 194 page = alloc_page(gfp_mask); in alloc_image_page() 195 if (page) { in alloc_image_page() 196 swsusp_set_page_forbidden(page); in alloc_image_page() 197 swsusp_set_page_free(page); in alloc_image_page() 199 return page; in alloc_image_page() [all …]
|
D | swap.c | 241 struct page *page = bio->bi_io_vec[0].bv_page; in hib_end_io() local 251 put_page(page); in hib_end_io() 253 flush_icache_range((unsigned long)page_address(page), in hib_end_io() 254 (unsigned long)page_address(page) + PAGE_SIZE); in hib_end_io() 267 struct page *page = virt_to_page(addr); in hib_submit_io() local 276 if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { in hib_submit_io() 681 unsigned char *page = NULL; in save_image_lzo() local 694 page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH); in save_image_lzo() 695 if (!page) { in save_image_lzo() 843 memcpy(page, data[thr].cmp + off, PAGE_SIZE); in save_image_lzo() [all …]
|
/kernel/ |
D | kexec_core.c | 151 static struct page *kimage_alloc_page(struct kimage *image, 305 static struct page *kimage_alloc_pages(gfp_t gfp_mask, unsigned int order) in kimage_alloc_pages() 307 struct page *pages; in kimage_alloc_pages() 323 static void kimage_free_pages(struct page *page) in kimage_free_pages() argument 327 order = page_private(page); in kimage_free_pages() 330 ClearPageReserved(page + i); in kimage_free_pages() 331 __free_pages(page, order); in kimage_free_pages() 336 struct page *page, *next; in kimage_free_page_list() local 338 list_for_each_entry_safe(page, next, list, lru) { in kimage_free_page_list() 339 list_del(&page->lru); in kimage_free_page_list() [all …]
|
D | memremap.c | 185 void get_zone_device_page(struct page *page) in get_zone_device_page() argument 187 percpu_ref_get(page->pgmap->ref); in get_zone_device_page() 191 void put_zone_device_page(struct page *page) in put_zone_device_page() argument 193 put_dev_pagemap(page->pgmap); in put_zone_device_page() 380 struct page *page = pfn_to_page(pfn); in devm_memremap_pages() local 388 list_del(&page->lru); in devm_memremap_pages() 389 page->pgmap = pgmap; in devm_memremap_pages() 425 struct page *page = (struct page *) memmap_start; in to_vmem_altmap() local 435 pgmap = find_dev_pagemap(__pfn_to_phys(page_to_pfn(page))); in to_vmem_altmap()
|
D | pid.c | 62 find_next_zero_bit((map)->page, BITS_PER_PAGE, off) 111 clear_bit(offset, map->page); in free_pidmap() 171 if (unlikely(!map->page)) { in alloc_pidmap() 172 void *page = kzalloc(PAGE_SIZE, GFP_KERNEL); in alloc_pidmap() local 178 if (!map->page) { in alloc_pidmap() 179 map->page = page; in alloc_pidmap() 180 page = NULL; in alloc_pidmap() 183 kfree(page); in alloc_pidmap() 184 if (unlikely(!map->page)) in alloc_pidmap() 189 if (!test_and_set_bit(offset, map->page)) { in alloc_pidmap() [all …]
|
D | profile.c | 333 struct page *page; in profile_dead_cpu() local 341 page = virt_to_page(per_cpu(cpu_profile_hits, cpu)[i]); in profile_dead_cpu() 343 __free_page(page); in profile_dead_cpu() 352 struct page *page; in profile_prepare_cpu() local 360 page = __alloc_pages_node(node, GFP_KERNEL | __GFP_ZERO, 0); in profile_prepare_cpu() 361 if (!page) { in profile_prepare_cpu() 365 per_cpu(cpu_profile_hits, cpu)[i] = page_address(page); in profile_prepare_cpu()
|
D | relay.c | 44 struct page *page; in relay_buf_fault() local 51 page = vmalloc_to_page(buf->start + (pgoff << PAGE_SHIFT)); in relay_buf_fault() 52 if (!page) in relay_buf_fault() 54 get_page(page); in relay_buf_fault() 55 vmf->page = page; in relay_buf_fault() 71 static struct page **relay_alloc_page_array(unsigned int n_pages) in relay_alloc_page_array() 73 const size_t pa_size = n_pages * sizeof(struct page *); in relay_alloc_page_array() 82 static void relay_free_page_array(struct page **array) in relay_free_page_array() 1172 rbuf = (struct rchan_buf *)page_private(buf->page); in relay_pipe_buf_release() 1207 struct page *pages[PIPE_DEF_BUFFERS]; in subbuf_splice_actor()
|
D | futex.c | 502 struct page *page, *tail; in get_futex_key() local 539 err = get_user_pages_fast(address, 1, 1, &page); in get_futex_key() 545 err = get_user_pages_fast(address, 1, 0, &page); in get_futex_key() 571 tail = page; in get_futex_key() 572 page = compound_head(page); in get_futex_key() 573 mapping = READ_ONCE(page->mapping); in get_futex_key() 598 lock_page(page); in get_futex_key() 599 shmem_swizzled = PageSwapCache(page) || page->mapping; in get_futex_key() 600 unlock_page(page); in get_futex_key() 601 put_page(page); in get_futex_key() [all …]
|
D | pid_namespace.c | 113 ns->pidmap[0].page = kzalloc(PAGE_SIZE, GFP_KERNEL); in create_pid_namespace() 114 if (!ns->pidmap[0].page) in create_pid_namespace() 134 set_bit(0, ns->pidmap[0].page); in create_pid_namespace() 143 kfree(ns->pidmap[0].page); in create_pid_namespace() 168 kfree(ns->pidmap[i].page); in destroy_pid_namespace()
|
D | kcov.c | 275 struct page *page; in kcov_mmap() local 293 page = vmalloc_to_page(kcov->area + off); in kcov_mmap() 294 if (vm_insert_page(vma, vma->vm_start + off, page)) in kcov_mmap()
|
D | cfi.c | 66 unsigned long page = ptr >> PAGE_SHIFT; in ptr_to_shadow() local 68 if (unlikely(page < s->r.min_page)) in ptr_to_shadow() 71 index = page - s->r.min_page; in ptr_to_shadow()
|
D | fork.c | 208 struct page *page = alloc_pages_node(node, THREADINFO_GFP, in alloc_thread_stack_node() 211 return page ? page_address(page) : NULL; in alloc_thread_stack_node() 308 struct page *first_page = virt_to_page(stack); in account_kernel_stack() 542 tsk->task_frag.page = NULL; in dup_task_struct()
|
D | exit.c | 883 if (tsk->task_frag.page) in do_exit() 884 put_page(tsk->task_frag.page); in do_exit()
|
D | kprobes.c | 128 void __weak free_insn_page(void *page) in free_insn_page() argument 130 module_memfree(page); in free_insn_page()
|
/kernel/events/ |
D | ring_buffer.c | 202 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); in __perf_output_begin() 204 handle->addr = rb->data_pages[handle->page] + offset; in __perf_output_begin() 501 static struct page *rb_alloc_aux_page(int node, int order) in rb_alloc_aux_page() 503 struct page *page; in rb_alloc_aux_page() local 509 page = alloc_pages_node(node, PERF_AUX_GFP, order); in rb_alloc_aux_page() 510 } while (!page && order--); in rb_alloc_aux_page() 512 if (page && order) { in rb_alloc_aux_page() 519 split_page(page, order); in rb_alloc_aux_page() 520 SetPagePrivate(page); in rb_alloc_aux_page() 521 set_page_private(page, order); in rb_alloc_aux_page() [all …]
|
D | uprobes.c | 104 struct page *pages[2]; 153 struct page *old_page, struct page *new_page) in __replace_page() 235 static void copy_from_page(struct page *page, unsigned long vaddr, void *dst, int len) in copy_from_page() argument 237 void *kaddr = kmap_atomic(page); in copy_from_page() 242 static void copy_to_page(struct page *page, unsigned long vaddr, const void *src, int len) in copy_to_page() argument 244 void *kaddr = kmap_atomic(page); in copy_to_page() 249 static int verify_opcode(struct page *page, unsigned long vaddr, uprobe_opcode_t *new_opcode) in verify_opcode() argument 263 copy_from_page(page, vaddr, &old_opcode, UPROBE_SWBP_INSN_SIZE); in verify_opcode() 297 struct page *old_page, *new_page; in uprobe_write_opcode() 537 struct page *page; in __copy_insn() local [all …]
|
D | internal.h | 93 extern struct page * 143 handle->page++; \ 144 handle->page &= rb->nr_pages - 1; \ 145 handle->addr = rb->data_pages[handle->page]; \
|
D | core.c | 4905 vmf->page = perf_mmap_to_page(rb, vmf->pgoff); in perf_mmap_fault() 4906 if (!vmf->page) in perf_mmap_fault() 4909 get_page(vmf->page); in perf_mmap_fault() 4910 vmf->page->mapping = vma->vm_file->f_mapping; in perf_mmap_fault() 4911 vmf->page->index = vmf->pgoff; in perf_mmap_fault() 8707 char *page) in nr_addr_filters_show() argument 8711 return snprintf(page, PAGE_SIZE - 1, "%d\n", pmu->nr_addr_filters); in nr_addr_filters_show() 8718 type_show(struct device *dev, struct device_attribute *attr, char *page) in type_show() argument 8722 return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->type); in type_show() 8729 char *page) in perf_event_mux_interval_ms_show() argument [all …]
|
/kernel/trace/ |
D | ring_buffer.c | 305 struct buffer_data_page *page; /* Actual data page */ member 334 size_t ring_buffer_page_len(void *page) in ring_buffer_page_len() argument 336 struct buffer_data_page *bpage = page; in ring_buffer_page_len() 348 free_page((unsigned long)bpage->page); in free_buffer_page() 817 struct buffer_page *page, struct list_head *list) in rb_is_head_page() argument 823 if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) in rb_is_head_page() 836 static bool rb_is_reader_page(struct buffer_page *page) in rb_is_reader_page() argument 838 struct list_head *list = page->list.prev; in rb_is_reader_page() 840 return rb_list_head(list->next) != &page->list; in rb_is_reader_page() 957 struct buffer_page *page; in rb_set_head_page() local [all …]
|
D | trace.c | 1982 struct page *page; in trace_buffered_event_enable() local 1991 page = alloc_pages_node(cpu_to_node(cpu), in trace_buffered_event_enable() 1993 if (!page) in trace_buffered_event_enable() 1996 event = page_address(page); in trace_buffered_event_enable() 5397 struct page *pages_def[PIPE_DEF_BUFFERS]; in tracing_splice_read_pipe() 5618 struct page *pages[2]; in tracing_mark_write() 6205 void *page; member 6217 ring_buffer_free_read_page(ref->buffer, ref->page); in buffer_pipe_buf_release() 6251 ring_buffer_free_read_page(ref->buffer, ref->page); in buffer_spd_release() 6264 struct page *pages_def[PIPE_DEF_BUFFERS]; in tracing_buffers_splice_read() [all …]
|
D | trace_uprobe.c | 709 struct page *p = alloc_pages_node(cpu_to_node(cpu), in uprobe_buffer_init()
|
D | Kconfig | 569 implementation and works via page faults. Tracing is disabled by
|
/kernel/debug/kdb/ |
D | kdb_support.c | 381 struct page *page; in kdb_getphys() local 386 page = pfn_to_page(pfn); in kdb_getphys() 387 vaddr = kmap_atomic(page); in kdb_getphys()
|
/kernel/locking/ |
D | locktorture.c | 639 static void __torture_print_stats(char *page, in __torture_print_stats() argument 657 page += sprintf(page, in __torture_print_stats()
|
/kernel/sched/ |
D | fair.c | 1330 bool should_numa_migrate_memory(struct task_struct *p, struct page * page, in should_numa_migrate_memory() argument 1356 last_cpupid = page_cpupid_xchg_last(page, this_cpupid); in should_numa_migrate_memory()
|