Lines Matching refs:nr_pages
451 unsigned long nr_pages; member
523 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages()
643 size_t nr_pages; in ring_buffer_wait() local
651 nr_pages = cpu_buffer->nr_pages; in ring_buffer_wait()
658 (!nr_pages || (dirty * 100) > full * nr_pages)) in ring_buffer_wait()
1189 static int __rb_allocate_pages(long nr_pages, struct list_head *pages, int cpu) in __rb_allocate_pages() argument
1204 if (i < nr_pages) in __rb_allocate_pages()
1225 for (i = 0; i < nr_pages; i++) { in __rb_allocate_pages()
1261 unsigned long nr_pages) in rb_allocate_pages() argument
1265 WARN_ON(!nr_pages); in rb_allocate_pages()
1267 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu)) in rb_allocate_pages()
1278 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
1286 rb_allocate_cpu_buffer(struct ring_buffer *buffer, long nr_pages, int cpu) in rb_allocate_cpu_buffer() argument
1326 ret = rb_allocate_pages(cpu_buffer, nr_pages); in rb_allocate_cpu_buffer()
1381 long nr_pages; in __ring_buffer_alloc() local
1395 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); in __ring_buffer_alloc()
1404 if (nr_pages < 2) in __ring_buffer_alloc()
1405 nr_pages = 2; in __ring_buffer_alloc()
1417 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in __ring_buffer_alloc()
1495 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) in rb_remove_pages() argument
1531 for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { in rb_remove_pages()
1694 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
1719 unsigned long nr_pages; in ring_buffer_resize() local
1733 nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); in ring_buffer_resize()
1736 if (nr_pages < 2) in ring_buffer_resize()
1737 nr_pages = 2; in ring_buffer_resize()
1739 size = nr_pages * BUF_PAGE_SIZE; in ring_buffer_resize()
1757 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
1758 cpu_buffer->nr_pages; in ring_buffer_resize()
1817 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
1820 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
1821 cpu_buffer->nr_pages; in ring_buffer_resize()
2474 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
2614 size_t nr_pages; in rb_wakeups() local
2642 nr_pages = cpu_buffer->nr_pages; in rb_wakeups()
2644 if (full && nr_pages && (dirty * 100) <= full * nr_pages) in rb_wakeups()
4346 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
4528 if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) in ring_buffer_swap_cpu()
4874 unsigned long nr_pages; in trace_rb_cpu_prepare() local
4880 nr_pages = 0; in trace_rb_cpu_prepare()
4885 if (nr_pages == 0) in trace_rb_cpu_prepare()
4886 nr_pages = buffer->buffers[cpu_i]->nr_pages; in trace_rb_cpu_prepare()
4887 if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { in trace_rb_cpu_prepare()
4894 nr_pages = 2; in trace_rb_cpu_prepare()
4896 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in trace_rb_cpu_prepare()