Lines Matching refs:cpu_buffer
562 struct ring_buffer_per_cpu *cpu_buffer; member
805 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit() local
809 nr_pages = cpu_buffer->nr_pages; in full_hit()
850 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wake_waiters() local
861 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
862 rbwork = &cpu_buffer->irq_work; in ring_buffer_wake_waiters()
885 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_wait() local
903 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
904 work = &cpu_buffer->irq_work; in ring_buffer_wait()
957 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
958 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page; in ring_buffer_wait()
961 if (!cpu_buffer->shortest_full || in ring_buffer_wait()
962 cpu_buffer->shortest_full > full) in ring_buffer_wait()
963 cpu_buffer->shortest_full = full; in ring_buffer_wait()
964 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_wait()
1003 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_poll_wait() local
1013 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1014 work = &cpu_buffer->irq_work; in ring_buffer_poll_wait()
1020 if (!cpu_buffer->shortest_full || in ring_buffer_poll_wait()
1021 cpu_buffer->shortest_full > full) in ring_buffer_poll_wait()
1022 cpu_buffer->shortest_full = full; in ring_buffer_poll_wait()
1203 rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_is_head_page() argument
1233 static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_set_list_to_head() argument
1246 static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_activate() argument
1250 head = cpu_buffer->head_page; in rb_head_page_activate()
1257 rb_set_list_to_head(cpu_buffer, head->list.prev); in rb_head_page_activate()
1271 rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) in rb_head_page_deactivate() argument
1276 rb_list_head_clear(cpu_buffer->pages); in rb_head_page_deactivate()
1278 list_for_each(hd, cpu_buffer->pages) in rb_head_page_deactivate()
1282 static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set() argument
1305 static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_update() argument
1310 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_update()
1314 static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_head() argument
1319 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_head()
1323 static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, in rb_head_page_set_normal() argument
1328 return rb_head_page_set(cpu_buffer, head, prev, in rb_head_page_set_normal()
1332 static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_inc_page() argument
1341 rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_head_page() argument
1348 if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) in rb_set_head_page()
1352 list = cpu_buffer->pages; in rb_set_head_page()
1353 if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) in rb_set_head_page()
1356 page = head = cpu_buffer->head_page; in rb_set_head_page()
1365 if (rb_is_head_page(cpu_buffer, page, page->list.prev)) { in rb_set_head_page()
1366 cpu_buffer->head_page = page; in rb_set_head_page()
1369 rb_inc_page(cpu_buffer, &page); in rb_set_head_page()
1373 RB_WARN_ON(cpu_buffer, 1); in rb_set_head_page()
1396 static void rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, in rb_tail_page_update() argument
1415 local_inc(&cpu_buffer->pages_touched); in rb_tail_page_update()
1427 if (tail_page == READ_ONCE(cpu_buffer->tail_page)) { in rb_tail_page_update()
1453 (void)cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); in rb_tail_page_update()
1457 static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_bpage() argument
1462 if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) in rb_check_bpage()
1475 static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_check_pages() argument
1477 struct list_head *head = rb_list_head(cpu_buffer->pages); in rb_check_pages()
1480 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1484 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1489 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1493 if (RB_WARN_ON(cpu_buffer, in rb_check_pages()
1572 static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, in rb_allocate_pages() argument
1579 if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu)) in rb_allocate_pages()
1587 cpu_buffer->pages = pages.next; in rb_allocate_pages()
1590 cpu_buffer->nr_pages = nr_pages; in rb_allocate_pages()
1592 rb_check_pages(cpu_buffer); in rb_allocate_pages()
1600 struct ring_buffer_per_cpu *cpu_buffer; in rb_allocate_cpu_buffer() local
1605 cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), in rb_allocate_cpu_buffer()
1607 if (!cpu_buffer) in rb_allocate_cpu_buffer()
1610 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
1611 cpu_buffer->buffer = buffer; in rb_allocate_cpu_buffer()
1612 raw_spin_lock_init(&cpu_buffer->reader_lock); in rb_allocate_cpu_buffer()
1613 lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); in rb_allocate_cpu_buffer()
1614 cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; in rb_allocate_cpu_buffer()
1615 INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); in rb_allocate_cpu_buffer()
1616 init_completion(&cpu_buffer->update_done); in rb_allocate_cpu_buffer()
1617 init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); in rb_allocate_cpu_buffer()
1618 init_waitqueue_head(&cpu_buffer->irq_work.waiters); in rb_allocate_cpu_buffer()
1619 init_waitqueue_head(&cpu_buffer->irq_work.full_waiters); in rb_allocate_cpu_buffer()
1626 rb_check_bpage(cpu_buffer, bpage); in rb_allocate_cpu_buffer()
1628 cpu_buffer->reader_page = bpage; in rb_allocate_cpu_buffer()
1635 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_allocate_cpu_buffer()
1636 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_allocate_cpu_buffer()
1638 ret = rb_allocate_pages(cpu_buffer, nr_pages); in rb_allocate_cpu_buffer()
1642 cpu_buffer->head_page in rb_allocate_cpu_buffer()
1643 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_allocate_cpu_buffer()
1644 cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; in rb_allocate_cpu_buffer()
1646 rb_head_page_activate(cpu_buffer); in rb_allocate_cpu_buffer()
1648 return cpu_buffer; in rb_allocate_cpu_buffer()
1651 free_buffer_page(cpu_buffer->reader_page); in rb_allocate_cpu_buffer()
1654 kfree(cpu_buffer); in rb_allocate_cpu_buffer()
1658 static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in rb_free_cpu_buffer() argument
1660 struct list_head *head = cpu_buffer->pages; in rb_free_cpu_buffer()
1663 irq_work_sync(&cpu_buffer->irq_work.work); in rb_free_cpu_buffer()
1665 free_buffer_page(cpu_buffer->reader_page); in rb_free_cpu_buffer()
1668 rb_head_page_deactivate(cpu_buffer); in rb_free_cpu_buffer()
1678 free_page((unsigned long)cpu_buffer->free_page); in rb_free_cpu_buffer()
1680 kfree(cpu_buffer); in rb_free_cpu_buffer()
1801 static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
1814 rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned long nr_pages) in rb_remove_pages() argument
1825 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1826 atomic_inc(&cpu_buffer->record_disabled); in rb_remove_pages()
1836 tail_page = &cpu_buffer->tail_page->list; in rb_remove_pages()
1842 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in rb_remove_pages()
1855 cpu_buffer->pages_removed += nr_removed; in rb_remove_pages()
1870 cpu_buffer->pages = next_page; in rb_remove_pages()
1874 cpu_buffer->head_page = list_entry(next_page, in rb_remove_pages()
1878 atomic_dec(&cpu_buffer->record_disabled); in rb_remove_pages()
1879 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_remove_pages()
1881 RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); in rb_remove_pages()
1892 rb_inc_page(cpu_buffer, &tmp_iter_page); in rb_remove_pages()
1903 local_add(page_entries, &cpu_buffer->overrun); in rb_remove_pages()
1904 local_sub(rb_page_commit(to_remove_page), &cpu_buffer->entries_bytes); in rb_remove_pages()
1905 local_inc(&cpu_buffer->pages_lost); in rb_remove_pages()
1917 RB_WARN_ON(cpu_buffer, nr_removed); in rb_remove_pages()
1923 rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_insert_pages() argument
1925 struct list_head *pages = &cpu_buffer->new_pages; in rb_insert_pages()
1928 raw_spin_lock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
1950 head_page = &rb_set_head_page(cpu_buffer)->list; in rb_insert_pages()
1984 RB_WARN_ON(cpu_buffer, !success); in rb_insert_pages()
1985 raw_spin_unlock_irq(&cpu_buffer->reader_lock); in rb_insert_pages()
1990 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in rb_insert_pages()
1999 static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) in rb_update_pages() argument
2003 if (cpu_buffer->nr_pages_to_update > 0) in rb_update_pages()
2004 success = rb_insert_pages(cpu_buffer); in rb_update_pages()
2006 success = rb_remove_pages(cpu_buffer, in rb_update_pages()
2007 -cpu_buffer->nr_pages_to_update); in rb_update_pages()
2010 cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; in rb_update_pages()
2015 struct ring_buffer_per_cpu *cpu_buffer = container_of(work, in update_pages_handler() local
2017 rb_update_pages(cpu_buffer); in update_pages_handler()
2018 complete(&cpu_buffer->update_done); in update_pages_handler()
2034 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_resize() local
2068 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2069 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2077 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2079 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2080 cpu_buffer->nr_pages; in ring_buffer_resize()
2084 if (cpu_buffer->nr_pages_to_update <= 0) in ring_buffer_resize()
2090 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2091 if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2092 &cpu_buffer->new_pages, cpu)) { in ring_buffer_resize()
2108 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2109 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2114 rb_update_pages(cpu_buffer); in ring_buffer_resize()
2115 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2118 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2124 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2125 if (!cpu_buffer->nr_pages_to_update) in ring_buffer_resize()
2129 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2130 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2139 cpu_buffer = buffer->buffers[cpu_id]; in ring_buffer_resize()
2141 if (nr_pages == cpu_buffer->nr_pages) in ring_buffer_resize()
2149 if (atomic_read(&cpu_buffer->resize_disabled)) { in ring_buffer_resize()
2154 cpu_buffer->nr_pages_to_update = nr_pages - in ring_buffer_resize()
2155 cpu_buffer->nr_pages; in ring_buffer_resize()
2157 INIT_LIST_HEAD(&cpu_buffer->new_pages); in ring_buffer_resize()
2158 if (cpu_buffer->nr_pages_to_update > 0 && in ring_buffer_resize()
2159 __rb_allocate_pages(cpu_buffer->nr_pages_to_update, in ring_buffer_resize()
2160 &cpu_buffer->new_pages, cpu_id)) { in ring_buffer_resize()
2169 rb_update_pages(cpu_buffer); in ring_buffer_resize()
2172 &cpu_buffer->update_pages_work); in ring_buffer_resize()
2173 wait_for_completion(&cpu_buffer->update_done); in ring_buffer_resize()
2176 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2198 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2199 rb_check_pages(cpu_buffer); in ring_buffer_resize()
2212 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2213 cpu_buffer->nr_pages_to_update = 0; in ring_buffer_resize()
2215 if (list_empty(&cpu_buffer->new_pages)) in ring_buffer_resize()
2218 list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, in ring_buffer_resize()
2248 rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_event() argument
2250 return __rb_page_index(cpu_buffer->reader_page, in rb_reader_event()
2251 cpu_buffer->reader_page->read); in rb_reader_event()
2320 rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) in rb_commit_index() argument
2322 return rb_page_commit(cpu_buffer->commit_page); in rb_commit_index()
2335 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_inc_iter() local
2343 if (iter->head_page == cpu_buffer->reader_page) in rb_inc_iter()
2344 iter->head_page = rb_set_head_page(cpu_buffer); in rb_inc_iter()
2346 rb_inc_page(cpu_buffer, &iter->head_page); in rb_inc_iter()
2361 rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, in rb_handle_head_page() argument
2377 type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, in rb_handle_head_page()
2398 local_add(entries, &cpu_buffer->overrun); in rb_handle_head_page()
2399 local_sub(rb_page_commit(next_page), &cpu_buffer->entries_bytes); in rb_handle_head_page()
2400 local_inc(&cpu_buffer->pages_lost); in rb_handle_head_page()
2431 RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ in rb_handle_head_page()
2450 rb_inc_page(cpu_buffer, &new_head); in rb_handle_head_page()
2452 ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, in rb_handle_head_page()
2469 RB_WARN_ON(cpu_buffer, 1); in rb_handle_head_page()
2486 buffer_tail_page = READ_ONCE(cpu_buffer->tail_page); in rb_handle_head_page()
2493 rb_head_page_set_normal(cpu_buffer, new_head, in rb_handle_head_page()
2504 ret = rb_head_page_set_normal(cpu_buffer, next_page, in rb_handle_head_page()
2507 if (RB_WARN_ON(cpu_buffer, in rb_handle_head_page()
2516 rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_reset_tail() argument
2582 local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); in rb_reset_tail()
2592 static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer);
2598 rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, in rb_move_tail() argument
2602 struct buffer_page *commit_page = cpu_buffer->commit_page; in rb_move_tail()
2603 struct trace_buffer *buffer = cpu_buffer->buffer; in rb_move_tail()
2609 rb_inc_page(cpu_buffer, &next_page); in rb_move_tail()
2617 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2635 if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) { in rb_move_tail()
2641 if (!rb_is_reader_page(cpu_buffer->commit_page)) { in rb_move_tail()
2647 local_inc(&cpu_buffer->dropped_events); in rb_move_tail()
2651 ret = rb_handle_head_page(cpu_buffer, in rb_move_tail()
2669 if (unlikely((cpu_buffer->commit_page != in rb_move_tail()
2670 cpu_buffer->tail_page) && in rb_move_tail()
2671 (cpu_buffer->commit_page == in rb_move_tail()
2672 cpu_buffer->reader_page))) { in rb_move_tail()
2673 local_inc(&cpu_buffer->commit_overrun); in rb_move_tail()
2679 rb_tail_page_update(cpu_buffer, tail_page, next_page); in rb_move_tail()
2683 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
2686 rb_end_commit(cpu_buffer); in rb_move_tail()
2688 local_inc(&cpu_buffer->committing); in rb_move_tail()
2695 rb_reset_tail(cpu_buffer, tail, info); in rb_move_tail()
2722 static inline bool rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer,
2733 rb_check_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_check_timestamp() argument
2743 (unsigned long long)(rb_time_read(&cpu_buffer->write_stamp, &write_stamp) ? write_stamp : 0), in rb_check_timestamp()
2751 static void rb_add_timestamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_add_timestamp() argument
2776 rb_check_timestamp(cpu_buffer, info); in rb_add_timestamp()
2797 rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_event() argument
2809 rb_add_timestamp(cpu_buffer, &event, info, &delta, &length); in rb_update_event()
2853 rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, in rb_event_is_commit() argument
2862 return cpu_buffer->commit_page->page == (void *)addr && in rb_event_is_commit()
2863 rb_commit_index(cpu_buffer) == index; in rb_event_is_commit()
2886 rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, in rb_try_to_discard() argument
2901 bpage = READ_ONCE(cpu_buffer->tail_page); in rb_try_to_discard()
2905 if (!rb_time_read(&cpu_buffer->write_stamp, &write_stamp)) in rb_try_to_discard()
2922 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_try_to_discard()
2925 if (!rb_time_cmpxchg(&cpu_buffer->write_stamp, in rb_try_to_discard()
2948 local_sub(event_length, &cpu_buffer->entries_bytes); in rb_try_to_discard()
2957 static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_start_commit() argument
2959 local_inc(&cpu_buffer->committing); in rb_start_commit()
2960 local_inc(&cpu_buffer->commits); in rb_start_commit()
2964 rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) in rb_set_commit_to_write() argument
2977 max_count = cpu_buffer->nr_pages * 100; in rb_set_commit_to_write()
2979 while (cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page)) { in rb_set_commit_to_write()
2980 if (RB_WARN_ON(cpu_buffer, !(--max_count))) in rb_set_commit_to_write()
2982 if (RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
2983 rb_is_reader_page(cpu_buffer->tail_page))) in rb_set_commit_to_write()
2989 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
2990 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
2991 rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); in rb_set_commit_to_write()
2995 while (rb_commit_index(cpu_buffer) != in rb_set_commit_to_write()
2996 rb_page_write(cpu_buffer->commit_page)) { in rb_set_commit_to_write()
3000 local_set(&cpu_buffer->commit_page->page->commit, in rb_set_commit_to_write()
3001 rb_page_write(cpu_buffer->commit_page)); in rb_set_commit_to_write()
3002 RB_WARN_ON(cpu_buffer, in rb_set_commit_to_write()
3003 local_read(&cpu_buffer->commit_page->page->commit) & in rb_set_commit_to_write()
3016 if (unlikely(cpu_buffer->commit_page != READ_ONCE(cpu_buffer->tail_page))) in rb_set_commit_to_write()
3020 static __always_inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) in rb_end_commit() argument
3024 if (RB_WARN_ON(cpu_buffer, in rb_end_commit()
3025 !local_read(&cpu_buffer->committing))) in rb_end_commit()
3029 commits = local_read(&cpu_buffer->commits); in rb_end_commit()
3032 if (local_read(&cpu_buffer->committing) == 1) in rb_end_commit()
3033 rb_set_commit_to_write(cpu_buffer); in rb_end_commit()
3035 local_dec(&cpu_buffer->committing); in rb_end_commit()
3045 if (unlikely(local_read(&cpu_buffer->commits) != commits) && in rb_end_commit()
3046 !local_read(&cpu_buffer->committing)) { in rb_end_commit()
3047 local_inc(&cpu_buffer->committing); in rb_end_commit()
3065 static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, in rb_commit() argument
3068 local_inc(&cpu_buffer->entries); in rb_commit()
3069 rb_end_commit(cpu_buffer); in rb_commit()
3073 rb_wakeups(struct trace_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) in rb_wakeups() argument
3081 if (cpu_buffer->irq_work.waiters_pending) { in rb_wakeups()
3082 cpu_buffer->irq_work.waiters_pending = false; in rb_wakeups()
3084 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3087 if (cpu_buffer->last_pages_touch == local_read(&cpu_buffer->pages_touched)) in rb_wakeups()
3090 if (cpu_buffer->reader_page == cpu_buffer->commit_page) in rb_wakeups()
3093 if (!cpu_buffer->irq_work.full_waiters_pending) in rb_wakeups()
3096 cpu_buffer->last_pages_touch = local_read(&cpu_buffer->pages_touched); in rb_wakeups()
3098 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
3101 cpu_buffer->irq_work.wakeup_full = true; in rb_wakeups()
3102 cpu_buffer->irq_work.full_waiters_pending = false; in rb_wakeups()
3104 irq_work_queue(&cpu_buffer->irq_work.work); in rb_wakeups()
3170 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_lock() argument
3172 unsigned int val = cpu_buffer->current_context; in trace_recursive_lock()
3182 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { in trace_recursive_lock()
3189 if (val & (1 << (bit + cpu_buffer->nest))) in trace_recursive_lock()
3193 val |= (1 << (bit + cpu_buffer->nest)); in trace_recursive_lock()
3194 cpu_buffer->current_context = val; in trace_recursive_lock()
3200 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer) in trace_recursive_unlock() argument
3202 cpu_buffer->current_context &= in trace_recursive_unlock()
3203 cpu_buffer->current_context - (1 << cpu_buffer->nest); in trace_recursive_unlock()
3224 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_start() local
3230 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
3232 cpu_buffer->nest += NESTED_BITS; in ring_buffer_nest_start()
3244 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_nest_end() local
3249 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
3251 cpu_buffer->nest -= NESTED_BITS; in ring_buffer_nest_end()
3267 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_unlock_commit() local
3270 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
3272 rb_commit(cpu_buffer, event); in ring_buffer_unlock_commit()
3274 rb_wakeups(buffer, cpu_buffer); in ring_buffer_unlock_commit()
3276 trace_recursive_unlock(cpu_buffer); in ring_buffer_unlock_commit()
3285 __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, in __rb_reserve_next() argument
3295 tail_page = info->tail_page = READ_ONCE(cpu_buffer->tail_page); in __rb_reserve_next()
3299 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3300 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3302 info->ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3327 /*B*/ rb_time_set(&cpu_buffer->before_stamp, info->ts); in __rb_reserve_next()
3339 b_ok = rb_time_read(&cpu_buffer->before_stamp, &info->before); in __rb_reserve_next()
3340 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3342 (void)rb_time_cmpxchg(&cpu_buffer->before_stamp, in __rb_reserve_next()
3344 return rb_move_tail(cpu_buffer, tail, info); in __rb_reserve_next()
3352 /*D*/ rb_time_set(&cpu_buffer->write_stamp, info->ts); in __rb_reserve_next()
3354 /*E*/ s_ok = rb_time_read(&cpu_buffer->before_stamp, &save_before); in __rb_reserve_next()
3355 RB_WARN_ON(cpu_buffer, !s_ok); in __rb_reserve_next()
3367 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3368 RB_WARN_ON(cpu_buffer, !a_ok); in __rb_reserve_next()
3376 (void)rb_time_cmpxchg(&cpu_buffer->write_stamp, in __rb_reserve_next()
3383 a_ok = rb_time_read(&cpu_buffer->write_stamp, &info->after); in __rb_reserve_next()
3385 RB_WARN_ON(cpu_buffer, !a_ok); in __rb_reserve_next()
3386 ts = rb_time_stamp(cpu_buffer->buffer); in __rb_reserve_next()
3390 rb_time_cmpxchg(&cpu_buffer->write_stamp, in __rb_reserve_next()
3420 rb_update_event(cpu_buffer, event, info); in __rb_reserve_next()
3432 local_add(info->length, &cpu_buffer->entries_bytes); in __rb_reserve_next()
3439 struct ring_buffer_per_cpu *cpu_buffer, in rb_reserve_next_event() argument
3453 rb_start_commit(cpu_buffer); in rb_reserve_next_event()
3464 if (unlikely(READ_ONCE(cpu_buffer->buffer) != buffer)) { in rb_reserve_next_event()
3465 local_dec(&cpu_buffer->committing); in rb_reserve_next_event()
3466 local_dec(&cpu_buffer->commits); in rb_reserve_next_event()
3473 if (ring_buffer_time_stamp_abs(cpu_buffer->buffer)) { in rb_reserve_next_event()
3495 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) in rb_reserve_next_event()
3498 event = __rb_reserve_next(cpu_buffer, &info); in rb_reserve_next_event()
3509 rb_end_commit(cpu_buffer); in rb_reserve_next_event()
3531 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_lock_reserve() local
3546 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
3548 if (unlikely(atomic_read(&cpu_buffer->record_disabled))) in ring_buffer_lock_reserve()
3554 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_lock_reserve()
3557 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_lock_reserve()
3564 trace_recursive_unlock(cpu_buffer); in ring_buffer_lock_reserve()
3578 rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, in rb_decrement_entry() argument
3582 struct buffer_page *bpage = cpu_buffer->commit_page; in rb_decrement_entry()
3597 rb_inc_page(cpu_buffer, &bpage); in rb_decrement_entry()
3604 rb_inc_page(cpu_buffer, &bpage); in rb_decrement_entry()
3608 RB_WARN_ON(cpu_buffer, 1); in rb_decrement_entry()
3633 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_discard_commit() local
3640 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3647 RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); in ring_buffer_discard_commit()
3649 rb_decrement_entry(cpu_buffer, event); in ring_buffer_discard_commit()
3650 if (rb_try_to_discard(cpu_buffer, event)) in ring_buffer_discard_commit()
3654 rb_end_commit(cpu_buffer); in ring_buffer_discard_commit()
3656 trace_recursive_unlock(cpu_buffer); in ring_buffer_discard_commit()
3680 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_write() local
3696 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
3698 if (atomic_read(&cpu_buffer->record_disabled)) in ring_buffer_write()
3704 if (unlikely(trace_recursive_lock(cpu_buffer))) in ring_buffer_write()
3707 event = rb_reserve_next_event(buffer, cpu_buffer, length); in ring_buffer_write()
3715 rb_commit(cpu_buffer, event); in ring_buffer_write()
3717 rb_wakeups(buffer, cpu_buffer); in ring_buffer_write()
3722 trace_recursive_unlock(cpu_buffer); in ring_buffer_write()
3731 static bool rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) in rb_per_cpu_empty() argument
3733 struct buffer_page *reader = cpu_buffer->reader_page; in rb_per_cpu_empty()
3734 struct buffer_page *head = rb_set_head_page(cpu_buffer); in rb_per_cpu_empty()
3735 struct buffer_page *commit = cpu_buffer->commit_page; in rb_per_cpu_empty()
3880 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_disable_cpu() local
3885 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
3886 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_record_disable_cpu()
3900 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_record_enable_cpu() local
3905 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
3906 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_record_enable_cpu()
3917 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) in rb_num_of_entries() argument
3919 return local_read(&cpu_buffer->entries) - in rb_num_of_entries()
3920 (local_read(&cpu_buffer->overrun) + cpu_buffer->read); in rb_num_of_entries()
3931 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_oldest_event_ts() local
3938 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
3939 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
3944 if (cpu_buffer->tail_page == cpu_buffer->reader_page) in ring_buffer_oldest_event_ts()
3945 bpage = cpu_buffer->reader_page; in ring_buffer_oldest_event_ts()
3947 bpage = rb_set_head_page(cpu_buffer); in ring_buffer_oldest_event_ts()
3950 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_oldest_event_ts()
3963 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_bytes_cpu() local
3969 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
3970 ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; in ring_buffer_bytes_cpu()
3983 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries_cpu() local
3988 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
3990 return rb_num_of_entries(cpu_buffer); in ring_buffer_entries_cpu()
4002 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overrun_cpu() local
4008 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
4009 ret = local_read(&cpu_buffer->overrun); in ring_buffer_overrun_cpu()
4025 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_commit_overrun_cpu() local
4031 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
4032 ret = local_read(&cpu_buffer->commit_overrun); in ring_buffer_commit_overrun_cpu()
4047 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_dropped_events_cpu() local
4053 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
4054 ret = local_read(&cpu_buffer->dropped_events); in ring_buffer_dropped_events_cpu()
4068 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_events_cpu() local
4073 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
4074 return cpu_buffer->read; in ring_buffer_read_events_cpu()
4087 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_entries() local
4093 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
4094 entries += rb_num_of_entries(cpu_buffer); in ring_buffer_entries()
4110 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_overruns() local
4116 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
4117 overruns += local_read(&cpu_buffer->overrun); in ring_buffer_overruns()
4126 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in rb_iter_reset() local
4129 iter->head_page = cpu_buffer->reader_page; in rb_iter_reset()
4130 iter->head = cpu_buffer->reader_page->read; in rb_iter_reset()
4134 iter->cache_read = cpu_buffer->read; in rb_iter_reset()
4135 iter->cache_pages_removed = cpu_buffer->pages_removed; in rb_iter_reset()
4138 iter->read_stamp = cpu_buffer->read_stamp; in rb_iter_reset()
4139 iter->page_stamp = cpu_buffer->reader_page->page->time_stamp; in rb_iter_reset()
4155 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_reset() local
4161 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_reset()
4163 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4165 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_reset()
4175 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_iter_empty() local
4184 cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_empty()
4185 reader = cpu_buffer->reader_page; in ring_buffer_iter_empty()
4186 head_page = cpu_buffer->head_page; in ring_buffer_iter_empty()
4187 commit_page = cpu_buffer->commit_page; in ring_buffer_iter_empty()
4201 curr_commit_page = READ_ONCE(cpu_buffer->commit_page); in ring_buffer_iter_empty()
4213 iter->head == rb_page_commit(cpu_buffer->reader_page))); in ring_buffer_iter_empty()
4218 rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, in rb_update_read_stamp() argument
4229 cpu_buffer->read_stamp += delta; in rb_update_read_stamp()
4234 cpu_buffer->read_stamp = delta; in rb_update_read_stamp()
4238 cpu_buffer->read_stamp += event->time_delta; in rb_update_read_stamp()
4242 RB_WARN_ON(cpu_buffer, 1); in rb_update_read_stamp()
4272 RB_WARN_ON(iter->cpu_buffer, 1); in rb_update_iter_read_stamp()
4278 rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) in rb_get_reader_page() argument
4287 arch_spin_lock(&cpu_buffer->lock); in rb_get_reader_page()
4296 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { in rb_get_reader_page()
4301 reader = cpu_buffer->reader_page; in rb_get_reader_page()
4304 if (cpu_buffer->reader_page->read < rb_page_size(reader)) in rb_get_reader_page()
4308 if (RB_WARN_ON(cpu_buffer, in rb_get_reader_page()
4309 cpu_buffer->reader_page->read > rb_page_size(reader))) in rb_get_reader_page()
4314 if (cpu_buffer->commit_page == cpu_buffer->reader_page) in rb_get_reader_page()
4318 if (rb_num_of_entries(cpu_buffer) == 0) in rb_get_reader_page()
4324 local_set(&cpu_buffer->reader_page->write, 0); in rb_get_reader_page()
4325 local_set(&cpu_buffer->reader_page->entries, 0); in rb_get_reader_page()
4326 local_set(&cpu_buffer->reader_page->page->commit, 0); in rb_get_reader_page()
4327 cpu_buffer->reader_page->real_end = 0; in rb_get_reader_page()
4333 reader = rb_set_head_page(cpu_buffer); in rb_get_reader_page()
4336 cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); in rb_get_reader_page()
4337 cpu_buffer->reader_page->list.prev = reader->list.prev; in rb_get_reader_page()
4344 cpu_buffer->pages = reader->list.prev; in rb_get_reader_page()
4347 rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); in rb_get_reader_page()
4359 overwrite = local_read(&(cpu_buffer->overrun)); in rb_get_reader_page()
4372 ret = rb_head_page_replace(reader, cpu_buffer->reader_page); in rb_get_reader_page()
4385 rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; in rb_get_reader_page()
4386 rb_inc_page(cpu_buffer, &cpu_buffer->head_page); in rb_get_reader_page()
4388 local_inc(&cpu_buffer->pages_read); in rb_get_reader_page()
4391 cpu_buffer->reader_page = reader; in rb_get_reader_page()
4392 cpu_buffer->reader_page->read = 0; in rb_get_reader_page()
4394 if (overwrite != cpu_buffer->last_overrun) { in rb_get_reader_page()
4395 cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; in rb_get_reader_page()
4396 cpu_buffer->last_overrun = overwrite; in rb_get_reader_page()
4404 cpu_buffer->read_stamp = reader->page->time_stamp; in rb_get_reader_page()
4406 arch_spin_unlock(&cpu_buffer->lock); in rb_get_reader_page()
4426 if (RB_WARN_ON(cpu_buffer, nr_loops == USECS_WAIT)) in rb_get_reader_page()
4444 static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) in rb_advance_reader() argument
4450 reader = rb_get_reader_page(cpu_buffer); in rb_advance_reader()
4453 if (RB_WARN_ON(cpu_buffer, !reader)) in rb_advance_reader()
4456 event = rb_reader_event(cpu_buffer); in rb_advance_reader()
4459 cpu_buffer->read++; in rb_advance_reader()
4461 rb_update_read_stamp(cpu_buffer, event); in rb_advance_reader()
4464 cpu_buffer->reader_page->read += length; in rb_advance_reader()
4465 cpu_buffer->read_bytes += length; in rb_advance_reader()
4470 struct ring_buffer_per_cpu *cpu_buffer; in rb_advance_iter() local
4472 cpu_buffer = iter->cpu_buffer; in rb_advance_iter()
4488 if (iter->head_page == cpu_buffer->commit_page) in rb_advance_iter()
4497 static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) in rb_lost_events() argument
4499 return cpu_buffer->lost_events; in rb_lost_events()
4503 rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, in rb_buffer_peek() argument
4519 if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) in rb_buffer_peek()
4522 reader = rb_get_reader_page(cpu_buffer); in rb_buffer_peek()
4526 event = rb_reader_event(cpu_buffer); in rb_buffer_peek()
4531 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
4544 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
4550 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4551 cpu_buffer->cpu, ts); in rb_buffer_peek()
4554 rb_advance_reader(cpu_buffer); in rb_buffer_peek()
4559 *ts = cpu_buffer->read_stamp + event->time_delta; in rb_buffer_peek()
4560 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_buffer_peek()
4561 cpu_buffer->cpu, ts); in rb_buffer_peek()
4564 *lost_events = rb_lost_events(cpu_buffer); in rb_buffer_peek()
4568 RB_WARN_ON(cpu_buffer, 1); in rb_buffer_peek()
4579 struct ring_buffer_per_cpu *cpu_buffer; in rb_iter_peek() local
4586 cpu_buffer = iter->cpu_buffer; in rb_iter_peek()
4587 buffer = cpu_buffer->buffer; in rb_iter_peek()
4594 if (unlikely(iter->cache_read != cpu_buffer->read || in rb_iter_peek()
4595 iter->cache_reader_page != cpu_buffer->reader_page || in rb_iter_peek()
4596 iter->cache_pages_removed != cpu_buffer->pages_removed)) in rb_iter_peek()
4613 if (rb_per_cpu_empty(cpu_buffer)) in rb_iter_peek()
4642 ring_buffer_normalize_time_stamp(cpu_buffer->buffer, in rb_iter_peek()
4643 cpu_buffer->cpu, ts); in rb_iter_peek()
4653 cpu_buffer->cpu, ts); in rb_iter_peek()
4658 RB_WARN_ON(cpu_buffer, 1); in rb_iter_peek()
4665 static inline bool rb_reader_lock(struct ring_buffer_per_cpu *cpu_buffer) in rb_reader_lock() argument
4668 raw_spin_lock(&cpu_buffer->reader_lock); in rb_reader_lock()
4681 if (raw_spin_trylock(&cpu_buffer->reader_lock)) in rb_reader_lock()
4685 atomic_inc(&cpu_buffer->record_disabled); in rb_reader_lock()
4690 rb_reader_unlock(struct ring_buffer_per_cpu *cpu_buffer, bool locked) in rb_reader_unlock() argument
4693 raw_spin_unlock(&cpu_buffer->reader_lock); in rb_reader_unlock()
4711 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek() local
4721 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_peek()
4722 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_peek()
4724 rb_advance_reader(cpu_buffer); in ring_buffer_peek()
4725 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_peek()
4759 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_peek() local
4764 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
4766 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_peek()
4789 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_consume() local
4801 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
4803 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_consume()
4805 event = rb_buffer_peek(cpu_buffer, ts, lost_events); in ring_buffer_consume()
4807 cpu_buffer->lost_events = 0; in ring_buffer_consume()
4808 rb_advance_reader(cpu_buffer); in ring_buffer_consume()
4811 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_consume()
4848 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_prepare() local
4865 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
4867 iter->cpu_buffer = cpu_buffer; in ring_buffer_read_prepare()
4869 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_read_prepare()
4903 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_read_start() local
4909 cpu_buffer = iter->cpu_buffer; in ring_buffer_read_start()
4911 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
4912 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_read_start()
4914 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_read_start()
4915 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_start()
4929 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_read_finish() local
4938 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
4939 rb_check_pages(cpu_buffer); in ring_buffer_read_finish()
4940 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_finish()
4942 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_read_finish()
4957 struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; in ring_buffer_iter_advance() local
4960 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
4964 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_iter_advance()
4997 rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) in rb_reset_cpu() argument
5001 rb_head_page_deactivate(cpu_buffer); in rb_reset_cpu()
5003 cpu_buffer->head_page in rb_reset_cpu()
5004 = list_entry(cpu_buffer->pages, struct buffer_page, list); in rb_reset_cpu()
5005 rb_clear_buffer_page(cpu_buffer->head_page); in rb_reset_cpu()
5006 list_for_each_entry(page, cpu_buffer->pages, list) { in rb_reset_cpu()
5010 cpu_buffer->tail_page = cpu_buffer->head_page; in rb_reset_cpu()
5011 cpu_buffer->commit_page = cpu_buffer->head_page; in rb_reset_cpu()
5013 INIT_LIST_HEAD(&cpu_buffer->reader_page->list); in rb_reset_cpu()
5014 INIT_LIST_HEAD(&cpu_buffer->new_pages); in rb_reset_cpu()
5015 rb_clear_buffer_page(cpu_buffer->reader_page); in rb_reset_cpu()
5017 local_set(&cpu_buffer->entries_bytes, 0); in rb_reset_cpu()
5018 local_set(&cpu_buffer->overrun, 0); in rb_reset_cpu()
5019 local_set(&cpu_buffer->commit_overrun, 0); in rb_reset_cpu()
5020 local_set(&cpu_buffer->dropped_events, 0); in rb_reset_cpu()
5021 local_set(&cpu_buffer->entries, 0); in rb_reset_cpu()
5022 local_set(&cpu_buffer->committing, 0); in rb_reset_cpu()
5023 local_set(&cpu_buffer->commits, 0); in rb_reset_cpu()
5024 local_set(&cpu_buffer->pages_touched, 0); in rb_reset_cpu()
5025 local_set(&cpu_buffer->pages_lost, 0); in rb_reset_cpu()
5026 local_set(&cpu_buffer->pages_read, 0); in rb_reset_cpu()
5027 cpu_buffer->last_pages_touch = 0; in rb_reset_cpu()
5028 cpu_buffer->shortest_full = 0; in rb_reset_cpu()
5029 cpu_buffer->read = 0; in rb_reset_cpu()
5030 cpu_buffer->read_bytes = 0; in rb_reset_cpu()
5032 rb_time_set(&cpu_buffer->write_stamp, 0); in rb_reset_cpu()
5033 rb_time_set(&cpu_buffer->before_stamp, 0); in rb_reset_cpu()
5035 cpu_buffer->lost_events = 0; in rb_reset_cpu()
5036 cpu_buffer->last_overrun = 0; in rb_reset_cpu()
5038 rb_head_page_activate(cpu_buffer); in rb_reset_cpu()
5039 cpu_buffer->pages_removed = 0; in rb_reset_cpu()
5043 static void reset_disabled_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) in reset_disabled_cpu_buffer() argument
5047 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5049 if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) in reset_disabled_cpu_buffer()
5052 arch_spin_lock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5054 rb_reset_cpu(cpu_buffer); in reset_disabled_cpu_buffer()
5056 arch_spin_unlock(&cpu_buffer->lock); in reset_disabled_cpu_buffer()
5059 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in reset_disabled_cpu_buffer()
5069 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu() local
5077 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5078 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5083 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset_cpu()
5085 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_cpu()
5086 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset_cpu()
5102 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset_online_cpus() local
5109 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5111 atomic_add(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5112 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5119 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5125 if (!(atomic_read(&cpu_buffer->resize_disabled) & RESET_BIT)) in ring_buffer_reset_online_cpus()
5128 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset_online_cpus()
5130 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset_online_cpus()
5131 atomic_sub(RESET_BIT, &cpu_buffer->resize_disabled); in ring_buffer_reset_online_cpus()
5143 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_reset() local
5150 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5152 atomic_inc(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5153 atomic_inc(&cpu_buffer->record_disabled); in ring_buffer_reset()
5160 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5162 reset_disabled_cpu_buffer(cpu_buffer); in ring_buffer_reset()
5164 atomic_dec(&cpu_buffer->record_disabled); in ring_buffer_reset()
5165 atomic_dec(&cpu_buffer->resize_disabled); in ring_buffer_reset()
5178 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty() local
5186 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
5188 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty()
5189 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty()
5190 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty()
5208 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_empty_cpu() local
5216 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
5218 dolock = rb_reader_lock(cpu_buffer); in ring_buffer_empty_cpu()
5219 ret = rb_per_cpu_empty(cpu_buffer); in ring_buffer_empty_cpu()
5220 rb_reader_unlock(cpu_buffer, dolock); in ring_buffer_empty_cpu()
5330 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_alloc_read_page() local
5338 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
5340 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5342 if (cpu_buffer->free_page) { in ring_buffer_alloc_read_page()
5343 bpage = cpu_buffer->free_page; in ring_buffer_alloc_read_page()
5344 cpu_buffer->free_page = NULL; in ring_buffer_alloc_read_page()
5347 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_alloc_read_page()
5377 struct ring_buffer_per_cpu *cpu_buffer; in ring_buffer_free_read_page() local
5385 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
5392 arch_spin_lock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5394 if (!cpu_buffer->free_page) { in ring_buffer_free_read_page()
5395 cpu_buffer->free_page = bpage; in ring_buffer_free_read_page()
5399 arch_spin_unlock(&cpu_buffer->lock); in ring_buffer_free_read_page()
5443 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page() local
5473 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()
5475 reader = rb_get_reader_page(cpu_buffer); in ring_buffer_read_page()
5479 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
5485 missed_events = cpu_buffer->lost_events; in ring_buffer_read_page()
5495 cpu_buffer->reader_page == cpu_buffer->commit_page) { in ring_buffer_read_page()
5496 struct buffer_data_page *rpage = cpu_buffer->reader_page->page; in ring_buffer_read_page()
5509 cpu_buffer->reader_page == cpu_buffer->commit_page)) in ring_buffer_read_page()
5522 save_timestamp = cpu_buffer->read_stamp; in ring_buffer_read_page()
5537 rb_advance_reader(cpu_buffer); in ring_buffer_read_page()
5544 event = rb_reader_event(cpu_buffer); in ring_buffer_read_page()
5557 cpu_buffer->read += rb_page_entries(reader); in ring_buffer_read_page()
5558 cpu_buffer->read_bytes += rb_page_commit(reader); in ring_buffer_read_page()
5579 cpu_buffer->lost_events = 0; in ring_buffer_read_page()
5605 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); in ring_buffer_read_page()