/kernel/events/ |
D | ring_buffer.c | 174 if (rb->nr_pages) in __perf_output_begin() 234 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); in __perf_output_begin() 328 if (!rb->nr_pages) in ring_buffer_init() 630 pgoff_t pgoff, int nr_pages, long watermark, int flags) in rb_alloc_aux() argument 643 max_order = ilog2(nr_pages); in rb_alloc_aux() 656 rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL, in rb_alloc_aux() 662 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) { in rb_alloc_aux() 666 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages)); in rb_alloc_aux() 690 rb->aux_priv = event->pmu->setup_aux(event, rb->aux_pages, nr_pages, in rb_alloc_aux() 709 rb->aux_watermark = nr_pages << (PAGE_SHIFT - 1); in rb_alloc_aux() [all …]
|
D | internal.h | 20 int nr_pages; /* nr of data pages */ member 72 if (!pause && rb->nr_pages) in rb_toggle_paused() 79 rb_alloc(int nr_pages, long watermark, int cpu, int flags); 82 pgoff_t pgoff, int nr_pages, long watermark, int flags); 120 return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); in perf_data_size() 146 handle->page &= rb->nr_pages - 1; \
|
D | core.c | 5226 if (!rb || !rb->nr_pages) { in _perf_ioctl() 5719 unsigned long nr_pages; in perf_mmap() local 5741 nr_pages = (vma_size / PAGE_SIZE) - 1; in perf_mmap() 5753 nr_pages = vma_size / PAGE_SIZE; in perf_mmap() 5775 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE) in perf_mmap() 5779 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages) in perf_mmap() 5782 if (!is_power_of_2(nr_pages)) in perf_mmap() 5795 user_extra = nr_pages; in perf_mmap() 5804 if (nr_pages != 0 && !is_power_of_2(nr_pages)) in perf_mmap() 5807 if (vma_size != PAGE_SIZE * (1 + nr_pages)) in perf_mmap() [all …]
|
/kernel/power/ |
D | snapshot.c | 1521 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask) in preallocate_image_pages() argument 1525 while (nr_pages > 0) { in preallocate_image_pages() 1536 nr_pages--; in preallocate_image_pages() 1543 static unsigned long preallocate_image_memory(unsigned long nr_pages, in preallocate_image_memory() argument 1552 if (nr_pages < alloc) in preallocate_image_memory() 1553 alloc = nr_pages; in preallocate_image_memory() 1559 static unsigned long preallocate_image_highmem(unsigned long nr_pages) in preallocate_image_highmem() argument 1561 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM); in preallocate_image_highmem() 1574 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages, in preallocate_highmem_fraction() argument 1578 unsigned long alloc = __fraction(nr_pages, highmem, total); in preallocate_highmem_fraction() [all …]
|
D | swap.c | 536 int nr_pages; in save_image() local 549 nr_pages = 0; in save_image() 558 if (!(nr_pages % m)) in save_image() 560 nr_pages / m * 10); in save_image() 561 nr_pages++; in save_image() 672 int nr_pages; in save_image_lzo() local 767 nr_pages = 0; in save_image_lzo() 782 if (!(nr_pages % m)) in save_image_lzo() 784 nr_pages / m * 10); in save_image_lzo() 785 nr_pages++; in save_image_lzo() [all …]
|
D | hibernate.c | 240 unsigned nr_pages, char *msg) in swsusp_show_speed() argument 253 k = nr_pages * (PAGE_SIZE / 1024); in swsusp_show_speed()
|
/kernel/trace/ |
D | ring_buffer.c | 451 unsigned long nr_pages; member 523 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages() 643 size_t nr_pages; in ring_buffer_wait() local 651 nr_pages = cpu_buffer->nr_pages; in ring_buffer_wait() 658 (!nr_pages || (dirty * 100) > full * nr_pages)) in ring_buffer_wait() 1189 static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu) in __rb_allocate_pages() argument 1204 if (i < nr_pages) in __rb_allocate_pages() 1225 for (i = 0; i < nr_pages; i++) { in __rb_allocate_pages() 1261 unsigned long nr_pages) in rb_allocate_pages() argument 1265 WARN_ON(!nr_pages); in rb_allocate_pages() [all …]
|
D | trace.c | 6173 .nr_pages = 0, /* This gets updated below. */ in tracing_splice_read_pipe() 6232 spd.nr_pages = i; in tracing_splice_read_pipe() 7480 spd.nr_pages++; in tracing_buffers_splice_read() 7487 spd.nr_pages = i; in tracing_buffers_splice_read() 7490 if (!spd.nr_pages) { in tracing_buffers_splice_read()
|
/kernel/ |
D | relay.c | 1200 unsigned int pidx, poff, total_len, subbuf_pages, nr_pages; in subbuf_splice_actor() local 1213 .nr_pages = 0, in subbuf_splice_actor() 1235 nr_pages = min_t(unsigned int, subbuf_pages, spd.nr_pages_max); in subbuf_splice_actor() 1237 for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) { in subbuf_splice_actor() 1247 spd.pages[spd.nr_pages] = rbuf->page_array[pidx]; in subbuf_splice_actor() 1248 spd.partial[spd.nr_pages].offset = poff; in subbuf_splice_actor() 1255 spd.partial[spd.nr_pages].len = this_len; in subbuf_splice_actor() 1256 spd.partial[spd.nr_pages].private = private; in subbuf_splice_actor() 1264 spd.nr_pages++; in subbuf_splice_actor() 1270 if (!spd.nr_pages) in subbuf_splice_actor()
|
D | kexec_core.c | 153 unsigned long nr_pages = totalram_pages(); in sanity_check_segment_list() local 219 if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2) in sanity_check_segment_list() 225 if (total_pages > nr_pages / 2) in sanity_check_segment_list()
|
D | resource.c | 475 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, in walk_system_ram_range() argument 485 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; in walk_system_ram_range() 501 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) in __is_ram() argument
|
D | fork.c | 382 BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); in account_kernel_stack() 765 unsigned long nr_pages = totalram_pages(); in set_max_threads() local 771 if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64) in set_max_threads() 774 threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE, in set_max_threads()
|
/kernel/dma/ |
D | remap.c | 126 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT; in dma_atomic_pool_init() local 132 page = dma_alloc_from_contiguous(NULL, nr_pages, in dma_atomic_pool_init() 167 if (!dma_release_from_contiguous(NULL, page, nr_pages)) in dma_atomic_pool_init()
|
D | debug.c | 931 int i, nr_pages; in dma_debug_init() local 946 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES); in dma_debug_init() 947 for (i = 0; i < nr_pages; ++i) in dma_debug_init() 1107 for (i = 0; i < stack_vm_area->nr_pages; i++) { in check_for_stack()
|