Home
last modified time | relevance | path

Searched refs:nr_pages (Results 1 – 16 of 16) sorted by relevance

/kernel/events/
Dring_buffer.c175 if (rb->nr_pages) { in __perf_output_begin()
237 handle->page = (offset >> page_shift) & (rb->nr_pages - 1); in __perf_output_begin()
333 if (!rb->nr_pages) in ring_buffer_init()
671 pgoff_t pgoff, int nr_pages, long watermark, int flags) in rb_alloc_aux() argument
686 watermark = nr_pages << (PAGE_SHIFT - 1); in rb_alloc_aux()
698 max_order = ilog2(nr_pages); in rb_alloc_aux()
706 if (get_order((unsigned long)nr_pages * sizeof(void *)) > MAX_ORDER) in rb_alloc_aux()
708 rb->aux_pages = kcalloc_node(nr_pages, sizeof(void *), GFP_KERNEL, in rb_alloc_aux()
714 for (rb->aux_nr_pages = 0; rb->aux_nr_pages < nr_pages;) { in rb_alloc_aux()
718 order = min(max_order, ilog2(nr_pages - rb->aux_nr_pages)); in rb_alloc_aux()
[all …]
Dinternal.h20 int nr_pages; /* nr of data pages */ member
73 if (!pause && rb->nr_pages) in rb_toggle_paused()
80 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
83 pgoff_t pgoff, int nr_pages, long watermark, int flags);
121 return rb->nr_pages << page_order(rb); in data_page_nr()
126 return rb->nr_pages << (PAGE_SHIFT + page_order(rb)); in perf_data_size()
152 handle->page &= rb->nr_pages - 1; \
Dcore.c5773 if (!rb || !rb->nr_pages) { in _perf_ioctl()
6263 unsigned long nr_pages; in perf_mmap() local
6285 nr_pages = (vma_size / PAGE_SIZE) - 1; in perf_mmap()
6297 nr_pages = vma_size / PAGE_SIZE; in perf_mmap()
6319 if (aux_size != vma_size || aux_size != nr_pages * PAGE_SIZE) in perf_mmap()
6323 if (rb_has_aux(rb) && rb->aux_nr_pages != nr_pages) in perf_mmap()
6326 if (!is_power_of_2(nr_pages)) in perf_mmap()
6339 user_extra = nr_pages; in perf_mmap()
6348 if (nr_pages != 0 && !is_power_of_2(nr_pages)) in perf_mmap()
6351 if (vma_size != PAGE_SIZE * (1 + nr_pages)) in perf_mmap()
[all …]
/kernel/power/
Dsnapshot.c1603 static unsigned long preallocate_image_pages(unsigned long nr_pages, gfp_t mask) in preallocate_image_pages() argument
1607 while (nr_pages > 0) { in preallocate_image_pages()
1618 nr_pages--; in preallocate_image_pages()
1625 static unsigned long preallocate_image_memory(unsigned long nr_pages, in preallocate_image_memory() argument
1634 if (nr_pages < alloc) in preallocate_image_memory()
1635 alloc = nr_pages; in preallocate_image_memory()
1641 static unsigned long preallocate_image_highmem(unsigned long nr_pages) in preallocate_image_highmem() argument
1643 return preallocate_image_pages(nr_pages, GFP_IMAGE | __GFP_HIGHMEM); in preallocate_image_highmem()
1654 static unsigned long preallocate_highmem_fraction(unsigned long nr_pages, in preallocate_highmem_fraction() argument
1658 unsigned long alloc = __fraction(nr_pages, highmem, total); in preallocate_highmem_fraction()
[all …]
Dswap.c552 int nr_pages; in save_image() local
565 nr_pages = 0; in save_image()
575 if (!(nr_pages % m)) in save_image()
577 nr_pages / m * 10); in save_image()
578 nr_pages++; in save_image()
691 int nr_pages; in save_image_lzo() local
783 nr_pages = 0; in save_image_lzo()
798 if (!(nr_pages % m)) in save_image_lzo()
800 nr_pages / m * 10); in save_image_lzo()
801 nr_pages++; in save_image_lzo()
[all …]
Dhibernate.c259 unsigned nr_pages, char *msg) in swsusp_show_speed() argument
272 k = nr_pages * (PAGE_SIZE / 1024); in swsusp_show_speed()
/kernel/
Dwatch_queue.c245 int ret, i, nr_pages; in watch_queue_set_size() local
256 nr_pages = (nr_notes + WATCH_QUEUE_NOTES_PER_PAGE - 1); in watch_queue_set_size()
257 nr_pages /= WATCH_QUEUE_NOTES_PER_PAGE; in watch_queue_set_size()
258 user_bufs = account_pipe_buffers(pipe->user, pipe->nr_accounted, nr_pages); in watch_queue_set_size()
260 if (nr_pages > pipe->max_usage && in watch_queue_set_size()
268 nr_notes = nr_pages * WATCH_QUEUE_NOTES_PER_PAGE; in watch_queue_set_size()
274 pages = kcalloc(sizeof(struct page *), nr_pages, GFP_KERNEL); in watch_queue_set_size()
278 for (i = 0; i < nr_pages; i++) { in watch_queue_set_size()
292 wqueue->nr_pages = nr_pages; in watch_queue_set_size()
301 (void) account_pipe_buffers(pipe->user, nr_pages, pipe->nr_accounted); in watch_queue_set_size()
[all …]
Drelay.c1115 unsigned int pidx, poff, total_len, subbuf_pages, nr_pages; in subbuf_splice_actor() local
1128 .nr_pages = 0, in subbuf_splice_actor()
1150 nr_pages = min_t(unsigned int, subbuf_pages, spd.nr_pages_max); in subbuf_splice_actor()
1152 for (total_len = 0; spd.nr_pages < nr_pages; spd.nr_pages++) { in subbuf_splice_actor()
1162 spd.pages[spd.nr_pages] = rbuf->page_array[pidx]; in subbuf_splice_actor()
1163 spd.partial[spd.nr_pages].offset = poff; in subbuf_splice_actor()
1170 spd.partial[spd.nr_pages].len = this_len; in subbuf_splice_actor()
1171 spd.partial[spd.nr_pages].private = private; in subbuf_splice_actor()
1179 spd.nr_pages++; in subbuf_splice_actor()
1185 if (!spd.nr_pages) in subbuf_splice_actor()
Dkexec_core.c154 unsigned long nr_pages = totalram_pages(); in sanity_check_segment_list() local
220 if (PAGE_COUNT(image->segment[i].memsz) > nr_pages / 2) in sanity_check_segment_list()
226 if (total_pages > nr_pages / 2) in sanity_check_segment_list()
Dresource.c452 int walk_system_ram_range(unsigned long start_pfn, unsigned long nr_pages, in walk_system_ram_range() argument
462 end = ((u64)(start_pfn + nr_pages) << PAGE_SHIFT) - 1; in walk_system_ram_range()
477 static int __is_ram(unsigned long pfn, unsigned long nr_pages, void *arg) in __is_ram() argument
Dfork.c262 BUG_ON(vm->nr_pages != THREAD_SIZE / PAGE_SIZE); in memcg_charge_kernel_stack()
963 unsigned long nr_pages = totalram_pages(); in set_max_threads() local
969 if (fls64(nr_pages) + fls64(PAGE_SIZE) > 64) in set_max_threads()
972 threads = div64_u64((u64) nr_pages * (u64) PAGE_SIZE, in set_max_threads()
/kernel/trace/
Dring_buffer.c467 unsigned long nr_pages; member
816 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages()
853 size_t nr_pages; in full_hit() local
856 nr_pages = cpu_buffer->nr_pages; in full_hit()
857 if (!nr_pages || !full) in full_hit()
867 return (dirty * 100) >= (full * nr_pages); in full_hit()
1557 long nr_pages, struct list_head *pages) in __rb_allocate_pages() argument
1572 if (i < nr_pages) in __rb_allocate_pages()
1593 for (i = 0; i < nr_pages; i++) { in __rb_allocate_pages()
1631 unsigned long nr_pages) in rb_allocate_pages() argument
[all …]
Dtrace.c6964 .nr_pages = 0, /* This gets updated below. */ in tracing_splice_read_pipe()
7023 spd.nr_pages = i; in tracing_splice_read_pipe()
8394 spd.nr_pages++; in tracing_buffers_splice_read()
8401 spd.nr_pages = i; in tracing_buffers_splice_read()
8404 if (!spd.nr_pages) { in tracing_buffers_splice_read()
/kernel/bpf/
Dringbuf.c39 int nr_pages; member
101 int nr_pages = nr_meta_pages + nr_data_pages; in bpf_ringbuf_area_alloc() local
129 for (i = 0; i < nr_pages; i++) { in bpf_ringbuf_area_alloc()
132 nr_pages = i; in bpf_ringbuf_area_alloc()
145 rb->nr_pages = nr_pages; in bpf_ringbuf_area_alloc()
150 for (i = 0; i < nr_pages; i++) in bpf_ringbuf_area_alloc()
222 int i, nr_pages = rb->nr_pages; in bpf_ringbuf_free() local
225 for (i = 0; i < nr_pages; i++) in bpf_ringbuf_free()
/kernel/dma/
Ddebug.c892 int i, nr_pages; in dma_debug_init() local
905 nr_pages = DIV_ROUND_UP(nr_prealloc_entries, DMA_DEBUG_DYNAMIC_ENTRIES); in dma_debug_init()
906 for (i = 0; i < nr_pages; ++i) in dma_debug_init()
1066 for (i = 0; i < stack_vm_area->nr_pages; i++) { in check_for_stack()
/kernel/rcu/
Dtree.c3296 int nr_pages; in fill_page_cache_func() local
3300 nr_pages = atomic_read(&krcp->backoff_page_cache_fill) ? in fill_page_cache_func()
3303 for (i = 0; i < nr_pages; i++) { in fill_page_cache_func()