Lines Matching refs:cpu_buffer
527 struct ring_buffer_per_cpu *cpu_buffer; member
689 static inline bool rb_has_ext_writer(struct ring_buffer_per_cpu *cpu_buffer) in rb_has_ext_writer() argument
691 return has_ext_writer(cpu_buffer->buffer); in rb_has_ext_writer()
702 static void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event() argument
705 struct buffer_page *page = cpu_buffer->commit_page; in verify_event()
706 struct buffer_page *tail_page = READ_ONCE(cpu_buffer->tail_page); in verify_event()
729 static inline void verify_event(struct ring_buffer_per_cpu *cpu_buffer, in verify_event() argument
776 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[smp_processor_id()]; in ring_buffer_event_time_stamp() local
783 return rb_fix_abs_ts(ts, cpu_buffer->tail_page->page->time_stamp); in ring_buffer_event_time_stamp()
786 nest = local_read(&cpu_buffer->committing); in ring_buffer_event_time_stamp()
787 verify_event(cpu_buffer, event); in ring_buffer_event_time_stamp()
793 return cpu_buffer->event_stamp[nest]; in ring_buffer_event_time_stamp()
800 if (!rb_time_read(&cpu_buffer->write_stamp, &ts)) in ring_buffer_event_time_stamp()
802 ts = rb_time_stamp(cpu_buffer->buffer); in ring_buffer_event_time_stamp()
852 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit() local
856 nr_pages = cpu_buffer->nr_pages; in full_hit()
897 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wake_waiters() local
916 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
918 if (!cpu_buffer) in ring_buffer_wake_waiters()
920 rbwork = &cpu_buffer->irq_work; in ring_buffer_wake_waiters()
943 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wait() local
961 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
962 work = &cpu_buffer->irq_work; in ring_buffer_wait()
1015 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
1016 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in ring_buffer_wait()
1019 if (!cpu_buffer->shortest_full || in ring_buffer_wait()
1020 cpu_buffer->shortest_full > full) in ring_buffer_wait()
1021 cpu_buffer->shortest_full = full; in ring_buffer_wait()
1022 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
1061 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poll_wait() local
1071 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1072 work = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
1078 if (!cpu_buffer->shortest_full || in ring_buffer_poll_wait()
1079 cpu_buffer->shortest_full > full) in ring_buffer_poll_wait()
1080 cpu_buffer->shortest_full = full; in ring_buffer_poll_wait()
1302 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_activate() argument
1306 head = cpu_buffer->head_page; in rb_head_page_activate()
1327 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_deactivate() argument
1332 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
1334 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
1338 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set() argument
1361 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_update() argument
1366 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_update()
1370 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_head() argument
1375 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_head()
1379 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_normal() argument
1384 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_normal()
1396 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_head_page() argument
1403 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
1407 list = cpu_buffer->pages; in rb_set_head_page()
1408 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
1411 page = head = cpu_buffer->head_page; in rb_set_head_page()
1421 cpu_buffer->head_page = page; in rb_set_head_page()
1428 RB_WARN_ON(cpu_buffer, 1); in rb_set_head_page()
1451 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_tail_page_update() argument
1470 local_inc(&cpu_buffer->pages_touched); in rb_tail_page_update()
1482 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { in rb_tail_page_update()
1508 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); in rb_tail_page_update()
1512 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_bpage() argument
1517 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) in rb_check_bpage()
1530 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_check_pages() argument
1532 struct list_head *head = rb_list_head(cpu_buffer->pages); in rb_check_pages()
1535 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1539 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1544 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1548 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1556 static int __rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in __rb_allocate_pages() argument
1597 mflags, cpu_to_node(cpu_buffer->cpu)); in __rb_allocate_pages()
1601 rb_check_bpage(cpu_buffer, bpage); in __rb_allocate_pages()
1605 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); in __rb_allocate_pages()
1630 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in rb_allocate_pages() argument
1637 if (__rb_allocate_pages(cpu_buffer, nr_pages, &pages)) in rb_allocate_pages()
1645 cpu_buffer->pages = pages.next; in rb_allocate_pages()
1648 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
1650 rb_check_pages(cpu_buffer); in rb_allocate_pages()
1658 struct ring_buffer_per_cpu *cpu_buffer; in rb_allocate_cpu_buffer() local
1663 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), in rb_allocate_cpu_buffer()
1665 if (!cpu_buffer) in rb_allocate_cpu_buffer()
1668 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
1669 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1670 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
1671 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1672 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
1673 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
1674 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
1675 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
1676 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
1677 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
1684 rb_check_bpage(cpu_buffer, bpage); in rb_allocate_cpu_buffer()
1686 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
1693 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
1694 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
1696 ret = rb_allocate_pages(cpu_buffer, nr_pages); in rb_allocate_cpu_buffer()
1700 cpu_buffer->head_page in rb_allocate_cpu_buffer()
1701 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
1702 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
1704 rb_head_page_activate(cpu_buffer); in rb_allocate_cpu_buffer()
1706 return cpu_buffer; in rb_allocate_cpu_buffer()
1709 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
1712 kfree(cpu_buffer); in rb_allocate_cpu_buffer()
1716 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_cpu_buffer() argument
1718 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
1721 irq_work_sync(&cpu_buffer->irq_work.work); in rb_free_cpu_buffer()
1723 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
1726 rb_head_page_deactivate(cpu_buffer); in rb_free_cpu_buffer()
1736 free_page((unsigned long)cpu_buffer->free_page); in rb_free_cpu_buffer()
1738 kfree(cpu_buffer); in rb_free_cpu_buffer()
1881 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1894 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) in rb_remove_pages() argument
1905 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1906 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
1916 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
1922 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
1948 cpu_buffer->pages = next_page; in rb_remove_pages()
1952 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
1959 cpu_buffer->read = 0; in rb_remove_pages()
1962 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
1963 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1965 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
1987 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
1988 local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes); in rb_remove_pages()
1989 local_inc(&cpu_buffer->pages_lost); in rb_remove_pages()
2001 RB_WARN_ON(cpu_buffer, nr_removed); in rb_remove_pages()
2007 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_insert_pages() argument
2009 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
2012 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
2034 head_page = &rb_set_head_page(cpu_buffer)->list; in rb_insert_pages()
2068 RB_WARN_ON(cpu_buffer, !success); in rb_insert_pages()
2069 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
2074 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
2083 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_pages() argument
2087 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
2088 success = rb_insert_pages(cpu_buffer); in rb_update_pages()
2090 success = rb_remove_pages(cpu_buffer, in rb_update_pages()
2091 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
2094 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
2099 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, in update_pages_handler() local
2101 rb_update_pages(cpu_buffer); in update_pages_handler()
2102 complete(&cpu_buffer->update_done); in update_pages_handler()
2118 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_resize() local
2152 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2153 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2161 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2163 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2164 cpu_buffer->nr_pages; in ring_buffer_resize()
2168 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
2174 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2175 if (__rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2176 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2192 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2193 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2198 rb_update_pages(cpu_buffer); in ring_buffer_resize()
2199 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2202 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2208 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2209 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2213 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2214 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2219 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
2221 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
2229 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2234 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2235 cpu_buffer->nr_pages; in ring_buffer_resize()
2237 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2238 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
2239 __rb_allocate_pages(cpu_buffer, cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2240 &cpu_buffer->new_pages)) { in ring_buffer_resize()
2249 rb_update_pages(cpu_buffer); in ring_buffer_resize()
2252 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2253 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2256 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2278 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2279 rb_check_pages(cpu_buffer); in ring_buffer_resize()
2292 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2293 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2295 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
2298 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
2328 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_event() argument
2330 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
2331 cpu_buffer->reader_page->read); in rb_reader_event()
2400 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit_index() argument
2402 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
2415 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter() local
2423 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
2424 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
2441 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_head_page() argument
2457 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, in rb_handle_head_page()
2478 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
2479 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes); in rb_handle_head_page()
2480 local_inc(&cpu_buffer->pages_lost); in rb_handle_head_page()
2511 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ in rb_handle_head_page()
2532 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, in rb_handle_head_page()
2549 RB_WARN_ON(cpu_buffer, 1); in rb_handle_head_page()
2566 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); in rb_handle_head_page()
2573 rb_head_page_set_normal(cpu_buffer, new_head, in rb_handle_head_page()
2584 ret = rb_head_page_set_normal(cpu_buffer, next_page, in rb_handle_head_page()
2587 if (RB_WARN_ON(cpu_buffer, in rb_handle_head_page()
2596 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_reset_tail() argument
2662 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
2672 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2678 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_move_tail() argument
2682 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
2683 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
2697 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2721 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
2727 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
2731 ret = rb_handle_head_page(cpu_buffer, in rb_move_tail()
2749 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
2750 cpu_buffer->tail_page) && in rb_move_tail()
2751 (cpu_buffer->commit_page == in rb_move_tail()
2752 cpu_buffer->reader_page))) { in rb_move_tail()
2753 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2759 rb_tail_page_update(cpu_buffer, tail_page, next_page); in rb_move_tail()
2763 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
2766 rb_end_commit(cpu_buffer); in rb_move_tail()
2768 local_inc(&cpu_buffer->committing); in rb_move_tail()
2775 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
2810 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_timestamp() argument
2820 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0), in rb_check_timestamp()
2828 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_add_timestamp() argument
2860 rb_check_timestamp(cpu_buffer, info); in rb_add_timestamp()
2881 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_event() argument
2887 unsigned int nest = local_read(&cpu_buffer->committing) - 1; in rb_update_event()
2890 cpu_buffer->event_stamp[nest] = info->ts; in rb_update_event()
2897 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length); in rb_update_event()
2941 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, in rb_try_to_discard() argument
2954 bpage = READ_ONCE(cpu_buffer->tail_page); in rb_try_to_discard()
2976 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_try_to_discard()
2997 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
3006 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_start_commit() argument
3008 local_inc(&cpu_buffer->committing); in rb_start_commit()
3009 local_inc(&cpu_buffer->commits); in rb_start_commit()
3013 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_commit_to_write() argument
3026 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
3028 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { in rb_set_commit_to_write()
3029 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
3031 if (RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
3032 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3038 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3039 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3040 rb_inc_page(&cpu_buffer->commit_page); in rb_set_commit_to_write()
3044 while (rb_commit_index(cpu_buffer) != in rb_set_commit_to_write()
3045 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
3049 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3050 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3051 RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
3052 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
3065 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3069 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_end_commit() argument
3073 if (RB_WARN_ON(cpu_buffer, in rb_end_commit()
3074 !local_read(&cpu_buffer->committing))) in rb_end_commit()
3078 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
3081 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
3082 rb_set_commit_to_write(cpu_buffer); in rb_end_commit()
3084 local_dec(&cpu_buffer->committing); in rb_end_commit()
3094 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
3095 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
3096 local_inc(&cpu_buffer->committing); in rb_end_commit()
3114 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, in rb_commit() argument
3117 local_inc(&cpu_buffer->entries); in rb_commit()
3118 rb_end_commit(cpu_buffer); in rb_commit()
3122 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
3130 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
3131 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
3133 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3136 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) in rb_wakeups()
3139 if (cpu_buffer->reader_page == cpu_buffer->commit_page) in rb_wakeups()
3142 if (!cpu_buffer->irq_work.full_waiters_pending) in rb_wakeups()
3145 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); in rb_wakeups()
3147 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
3150 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
3151 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
3153 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3226 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_lock() argument
3228 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
3233 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { in trace_recursive_lock()
3240 if (val & (1 << (bit + cpu_buffer->nest))) { in trace_recursive_lock()
3246 val |= (1 << (bit + cpu_buffer->nest)); in trace_recursive_lock()
3247 cpu_buffer->current_context = val; in trace_recursive_lock()
3253 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_unlock() argument
3255 cpu_buffer->current_context &= in trace_recursive_unlock()
3256 cpu_buffer->current_context - (1 << cpu_buffer->nest); in trace_recursive_unlock()
3277 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_start() local
3283 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
3285 cpu_buffer->nest += NESTED_BITS; in ring_buffer_nest_start()
3297 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_end() local
3302 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
3304 cpu_buffer->nest -= NESTED_BITS; in ring_buffer_nest_end()
3320 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unlock_commit() local
3323 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
3325 rb_commit(cpu_buffer, event); in ring_buffer_unlock_commit()
3327 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
3329 trace_recursive_unlock(cpu_buffer); in ring_buffer_unlock_commit()
3393 static void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer() argument
3454 RB_WARN_ON(cpu_buffer, 1); in check_buffer()
3464 atomic_inc(&cpu_buffer->record_disabled); in check_buffer()
3468 cpu_buffer->cpu, in check_buffer()
3481 static inline void check_buffer(struct ring_buffer_per_cpu *cpu_buffer, in check_buffer() argument
3489 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, in __rb_reserve_next() argument
3499 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); in __rb_reserve_next()
3503 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3504 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3506 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3531 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); in __rb_reserve_next()
3542 check_buffer(cpu_buffer, info, CHECK_FULL_PAGE); in __rb_reserve_next()
3543 return rb_move_tail(cpu_buffer, tail, info); in __rb_reserve_next()
3548 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); in __rb_reserve_next()
3562 check_buffer(cpu_buffer, info, tail); in __rb_reserve_next()
3568 a_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3569 RB_WARN_ON(cpu_buffer, !a_ok); in __rb_reserve_next()
3577 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3578 rb_time_set(&cpu_buffer->before_stamp, ts); in __rb_reserve_next()
3581 /*E*/ a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3583 RB_WARN_ON(cpu_buffer, !a_ok); in __rb_reserve_next()
3619 rb_update_event(cpu_buffer, event, info); in __rb_reserve_next()
3631 local_add(info->length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
3638 struct ring_buffer_per_cpu *cpu_buffer, in rb_reserve_next_event() argument
3652 rb_start_commit(cpu_buffer); in rb_reserve_next_event()
3663 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
3664 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
3665 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
3672 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
3694 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) in rb_reserve_next_event()
3697 event = __rb_reserve_next(cpu_buffer, &info); in rb_reserve_next_event()
3708 rb_end_commit(cpu_buffer); in rb_reserve_next_event()
3730 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_lock_reserve() local
3745 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
3747 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
3753 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_lock_reserve()
3756 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
3763 trace_recursive_unlock(cpu_buffer); in ring_buffer_lock_reserve()
3777 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, in rb_decrement_entry() argument
3781 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
3807 RB_WARN_ON(cpu_buffer, 1); in rb_decrement_entry()
3832 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_discard_commit() local
3842 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3849 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
3851 rb_decrement_entry(cpu_buffer, event); in ring_buffer_discard_commit()
3852 if (rb_try_to_discard(cpu_buffer, event)) in ring_buffer_discard_commit()
3856 rb_end_commit(cpu_buffer); in ring_buffer_discard_commit()
3858 trace_recursive_unlock(cpu_buffer); in ring_buffer_discard_commit()
3882 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_write() local
3898 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3900 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
3906 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_write()
3909 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
3917 rb_commit(cpu_buffer, event); in ring_buffer_write()
3919 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
3924 trace_recursive_unlock(cpu_buffer); in ring_buffer_write()
3933 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) in rb_per_cpu_empty() argument
3935 struct buffer_page *reader = cpu_buffer->reader_page; in rb_per_cpu_empty()
3936 struct buffer_page *head = rb_set_head_page(cpu_buffer); in rb_per_cpu_empty()
3937 struct buffer_page *commit = cpu_buffer->commit_page; in rb_per_cpu_empty()
4088 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_disable_cpu() local
4093 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
4094 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
4108 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_enable_cpu() local
4113 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
4114 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
4125 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) in rb_num_of_entries() argument
4127 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
4128 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
4139 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_oldest_event_ts() local
4146 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
4147 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4152 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
4153 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
4155 bpage = rb_set_head_page(cpu_buffer); in ring_buffer_oldest_event_ts()
4158 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
4171 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_bytes_cpu() local
4177 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
4178 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
4191 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries_cpu() local
4196 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
4198 return rb_num_of_entries(cpu_buffer); in ring_buffer_entries_cpu()
4210 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overrun_cpu() local
4216 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
4217 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
4233 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_commit_overrun_cpu() local
4239 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
4240 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
4255 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_dropped_events_cpu() local
4261 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
4262 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
4276 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_events_cpu() local
4281 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
4282 return cpu_buffer->read; in ring_buffer_read_events_cpu()
4295 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries() local
4301 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
4302 entries += rb_num_of_entries(cpu_buffer); in ring_buffer_entries()
4318 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overruns() local
4324 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
4325 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
4334 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset() local
4337 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
4338 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
4342 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
4345 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
4346 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; in rb_iter_reset()
4362 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_reset() local
4368 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
4370 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4372 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4382 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_empty() local
4391 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
4392 reader = cpu_buffer->reader_page; in ring_buffer_iter_empty()
4393 head_page = cpu_buffer->head_page; in ring_buffer_iter_empty()
4394 commit_page = cpu_buffer->commit_page; in ring_buffer_iter_empty()
4408 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
4420 iter->head == rb_page_commit(cpu_buffer->reader_page))); in ring_buffer_iter_empty()
4425 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_read_stamp() argument
4436 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
4441 delta = rb_fix_abs_ts(delta, cpu_buffer->read_stamp); in rb_update_read_stamp()
4442 cpu_buffer->read_stamp = delta; in rb_update_read_stamp()
4446 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
4450 RB_WARN_ON(cpu_buffer, 1); in rb_update_read_stamp()
4481 RB_WARN_ON(iter->cpu_buffer, 1); in rb_update_iter_read_stamp()
4535 noinline rb_swap_reader_page_ext(struct ring_buffer_per_cpu *cpu_buffer) in rb_swap_reader_page_ext() argument
4541 if (cpu_buffer->buffer->ext_cb->swap_reader(cpu_buffer->cpu)) { in rb_swap_reader_page_ext()
4546 new_rb_page = cpu_buffer->reader_page; in rb_swap_reader_page_ext()
4552 new_reader = ring_buffer_search_footer(cpu_buffer->head_page, in rb_swap_reader_page_ext()
4566 cpu_buffer->reader_page = new_reader; in rb_swap_reader_page_ext()
4567 cpu_buffer->reader_page->read = 0; in rb_swap_reader_page_ext()
4572 cpu_buffer->head_page = new_head; in rb_swap_reader_page_ext()
4579 cpu_buffer->pages = &new_head->list; in rb_swap_reader_page_ext()
4585 if (overrun != cpu_buffer->last_overrun) { in rb_swap_reader_page_ext()
4586 cpu_buffer->lost_events = overrun - cpu_buffer->last_overrun; in rb_swap_reader_page_ext()
4587 cpu_buffer->last_overrun = overrun; in rb_swap_reader_page_ext()
4594 rb_swap_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_swap_reader_page() argument
4603 local_set(&cpu_buffer->reader_page->write, 0); in rb_swap_reader_page()
4604 local_set(&cpu_buffer->reader_page->entries, 0); in rb_swap_reader_page()
4605 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_swap_reader_page()
4606 cpu_buffer->reader_page->real_end = 0; in rb_swap_reader_page()
4612 reader = rb_set_head_page(cpu_buffer); in rb_swap_reader_page()
4616 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_swap_reader_page()
4617 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_swap_reader_page()
4624 cpu_buffer->pages = reader->list.prev; in rb_swap_reader_page()
4627 rb_set_list_to_head(&cpu_buffer->reader_page->list); in rb_swap_reader_page()
4639 overwrite = local_read(&(cpu_buffer->overrun)); in rb_swap_reader_page()
4652 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_swap_reader_page()
4665 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_swap_reader_page()
4666 rb_inc_page(&cpu_buffer->head_page); in rb_swap_reader_page()
4668 local_inc(&cpu_buffer->pages_read); in rb_swap_reader_page()
4671 cpu_buffer->reader_page = reader; in rb_swap_reader_page()
4672 cpu_buffer->reader_page->read = 0; in rb_swap_reader_page()
4674 if (overwrite != cpu_buffer->last_overrun) { in rb_swap_reader_page()
4675 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_swap_reader_page()
4676 cpu_buffer->last_overrun = overwrite; in rb_swap_reader_page()
4683 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_get_reader_page() argument
4691 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
4700 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { in rb_get_reader_page()
4705 reader = cpu_buffer->reader_page; in rb_get_reader_page()
4708 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
4713 if (RB_WARN_ON(cpu_buffer, in rb_get_reader_page()
4714 cpu_buffer->reader_page->read > page_size)) in rb_get_reader_page()
4719 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
4723 if (rb_num_of_entries(cpu_buffer) == 0) in rb_get_reader_page()
4726 if (rb_has_ext_writer(cpu_buffer)) in rb_get_reader_page()
4727 reader = rb_swap_reader_page_ext(cpu_buffer); in rb_get_reader_page()
4729 reader = rb_swap_reader_page(cpu_buffer); in rb_get_reader_page()
4737 cpu_buffer->read_stamp = reader->page->time_stamp; in rb_get_reader_page()
4739 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
4759 if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) in rb_get_reader_page()
4777 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) in rb_advance_reader() argument
4783 reader = rb_get_reader_page(cpu_buffer); in rb_advance_reader()
4786 if (RB_WARN_ON(cpu_buffer, !reader)) in rb_advance_reader()
4789 event = rb_reader_event(cpu_buffer); in rb_advance_reader()
4792 cpu_buffer->read++; in rb_advance_reader()
4794 rb_update_read_stamp(cpu_buffer, event); in rb_advance_reader()
4797 cpu_buffer->reader_page->read += length; in rb_advance_reader()
4798 cpu_buffer->read_bytes += length; in rb_advance_reader()
4803 struct ring_buffer_per_cpu *cpu_buffer; in rb_advance_iter() local
4805 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
4821 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
4830 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_lost_events() argument
4832 return cpu_buffer->lost_events; in rb_lost_events()
4836 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, in rb_buffer_peek() argument
4852 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) in rb_buffer_peek()
4855 reader = rb_get_reader_page(cpu_buffer); in rb_buffer_peek()
4859 event = rb_reader_event(cpu_buffer); in rb_buffer_peek()
4864 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
4877 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
4884 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4885 cpu_buffer->cpu, ts); in rb_buffer_peek()
4888 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
4893 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
4894 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4895 cpu_buffer->cpu, ts); in rb_buffer_peek()
4898 *lost_events = rb_lost_events(cpu_buffer); in rb_buffer_peek()
4902 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
4913 struct ring_buffer_per_cpu *cpu_buffer; in rb_iter_peek() local
4920 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
4921 buffer = cpu_buffer->buffer; in rb_iter_peek()
4928 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
4929 iter->cache_reader_page != cpu_buffer->reader_page)) in rb_iter_peek()
4946 if (rb_per_cpu_empty(cpu_buffer)) in rb_iter_peek()
4976 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
4977 cpu_buffer->cpu, ts); in rb_iter_peek()
4987 cpu_buffer->cpu, ts); in rb_iter_peek()
4992 RB_WARN_ON(cpu_buffer, 1); in rb_iter_peek()
4999 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_lock() argument
5002 raw_spin_lock(&cpu_buffer->reader_lock); in rb_reader_lock()
5015 if (raw_spin_trylock(&cpu_buffer->reader_lock)) in rb_reader_lock()
5019 atomic_inc(&cpu_buffer->record_disabled); in rb_reader_lock()
5024 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) in rb_reader_unlock() argument
5027 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_reader_unlock()
5045 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek() local
5055 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_peek()
5056 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_peek()
5058 rb_advance_reader(cpu_buffer); in ring_buffer_peek()
5059 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_peek()
5093 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek() local
5098 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5100 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
5123 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_consume() local
5135 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
5137 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_consume()
5139 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_consume()
5141 cpu_buffer->lost_events = 0; in ring_buffer_consume()
5142 rb_advance_reader(cpu_buffer); in ring_buffer_consume()
5145 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_consume()
5158 static void ring_buffer_update_view(struct ring_buffer_per_cpu *cpu_buffer) in ring_buffer_update_view() argument
5163 if (!rb_has_ext_writer(cpu_buffer)) in ring_buffer_update_view()
5166 raw_spin_lock_irq(&cpu_buffer->reader_lock); in ring_buffer_update_view()
5167 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_update_view()
5169 cpu_buffer->buffer->ext_cb->update_footers(cpu_buffer->cpu); in ring_buffer_update_view()
5171 bpage = cpu_buffer->reader_page; in ring_buffer_update_view()
5174 local_set(&cpu_buffer->entries, footer->stats.entries); in ring_buffer_update_view()
5175 local_set(&cpu_buffer->pages_touched, footer->stats.pages_touched); in ring_buffer_update_view()
5176 local_set(&cpu_buffer->overrun, footer->stats.overrun); in ring_buffer_update_view()
5179 bpage = ring_buffer_search_footer(cpu_buffer->commit_page, in ring_buffer_update_view()
5185 cpu_buffer->commit_page = bpage; in ring_buffer_update_view()
5188 bpage = ring_buffer_search_footer(cpu_buffer->head_page, in ring_buffer_update_view()
5196 __set_head_page_flag(cpu_buffer->head_page, RB_PAGE_NORMAL); in ring_buffer_update_view()
5201 cpu_buffer->reader_page->list.next = &cpu_buffer->head_page->list; in ring_buffer_update_view()
5203 cpu_buffer->head_page = bpage; in ring_buffer_update_view()
5206 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_update_view()
5207 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in ring_buffer_update_view()
5212 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poke() local
5217 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poke()
5219 ring_buffer_update_view(cpu_buffer); in ring_buffer_poke()
5220 rb_wakeups(buffer, cpu_buffer); in ring_buffer_poke()
5249 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_prepare() local
5266 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
5268 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_prepare()
5270 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_read_prepare()
5272 ring_buffer_update_view(cpu_buffer); in ring_buffer_read_prepare()
5306 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_start() local
5312 cpu_buffer = iter->cpu_buffer; in ring_buffer_read_start()
5314 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5315 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
5317 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
5318 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
5332 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish() local
5341 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5342 rb_check_pages(cpu_buffer); in ring_buffer_read_finish()
5343 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
5345 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_read_finish()
5360 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance() local
5363 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5367 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
5400 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) in rb_reset_cpu() argument
5404 rb_head_page_deactivate(cpu_buffer); in rb_reset_cpu()
5406 cpu_buffer->head_page in rb_reset_cpu()
5407 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
5408 rb_clear_buffer_page(cpu_buffer->head_page); in rb_reset_cpu()
5409 list_for_each_entry(page, cpu_buffer->pages, list) { in rb_reset_cpu()
5413 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
5414 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
5416 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
5417 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
5418 rb_clear_buffer_page(cpu_buffer->reader_page); in rb_reset_cpu()
5420 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
5421 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
5422 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
5423 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
5424 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
5425 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
5426 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
5427 local_set(&cpu_buffer->pages_touched, 0); in rb_reset_cpu()
5428 local_set(&cpu_buffer->pages_lost, 0); in rb_reset_cpu()
5429 local_set(&cpu_buffer->pages_read, 0); in rb_reset_cpu()
5430 cpu_buffer->last_pages_touch = 0; in rb_reset_cpu()
5431 cpu_buffer->shortest_full = 0; in rb_reset_cpu()
5432 cpu_buffer->read = 0; in rb_reset_cpu()
5433 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
5435 rb_time_set(&cpu_buffer->write_stamp, 0); in rb_reset_cpu()
5436 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_reset_cpu()
5438 memset(cpu_buffer->event_stamp, 0, sizeof(cpu_buffer->event_stamp)); in rb_reset_cpu()
5440 cpu_buffer->lost_events = 0; in rb_reset_cpu()
5441 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
5443 rb_head_page_activate(cpu_buffer); in rb_reset_cpu()
5447 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in reset_disabled_cpu_buffer() argument
5451 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5453 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in reset_disabled_cpu_buffer()
5456 arch_spin_lock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5458 rb_reset_cpu(cpu_buffer); in reset_disabled_cpu_buffer()
5460 arch_spin_unlock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5463 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5473 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu() local
5481 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5482 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5487 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset_cpu()
5489 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5490 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5506 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset_online_cpus() local
5513 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5515 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5516 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5523 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5529 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT)) in ring_buffer_reset_online_cpus()
5532 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset_online_cpus()
5534 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5535 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5547 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset() local
5554 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5556 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5557 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset()
5564 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5566 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset()
5568 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset()
5569 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5582 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty() local
5590 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
5592 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty()
5593 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty()
5594 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty()
5612 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty_cpu() local
5620 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
5622 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty_cpu()
5623 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty_cpu()
5624 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty_cpu()
5737 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_alloc_read_page() local
5745 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
5747 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5749 if (cpu_buffer->free_page) { in ring_buffer_alloc_read_page()
5750 bpage = cpu_buffer->free_page; in ring_buffer_alloc_read_page()
5751 cpu_buffer->free_page = NULL; in ring_buffer_alloc_read_page()
5754 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5784 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_free_read_page() local
5792 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
5799 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5801 if (!cpu_buffer->free_page) { in ring_buffer_free_read_page()
5802 cpu_buffer->free_page = bpage; in ring_buffer_free_read_page()
5806 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5850 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page() local
5880 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
5882 reader = rb_get_reader_page(cpu_buffer); in ring_buffer_read_page()
5886 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
5892 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
5902 cpu_buffer->reader_page == cpu_buffer->commit_page || in ring_buffer_read_page()
5904 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
5917 cpu_buffer->reader_page == cpu_buffer->commit_page)) in ring_buffer_read_page()
5930 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
5945 rb_advance_reader(cpu_buffer); in ring_buffer_read_page()
5952 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
5965 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
5966 cpu_buffer->read_bytes += rb_page_commit(reader); in ring_buffer_read_page()
5987 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
6013 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()