• Home
  • Raw
  • Download

Lines Matching refs:cpu

272 #define for_each_buffer_cpu(buffer, cpu)		\  argument
273 for_each_cpu(cpu, buffer->cpumask)
275 #define for_each_online_buffer_cpu(buffer, cpu) \ argument
276 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
459 int cpu; member
814 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_pages() argument
816 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages()
826 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_dirty_pages() argument
832 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages()
833 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages()
834 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages()
850 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) in full_hit() argument
852 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit()
865 dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1; in full_hit()
895 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) in ring_buffer_wake_waiters() argument
903 if (cpu == RING_BUFFER_ALL_CPUS) { in ring_buffer_wake_waiters()
906 for_each_buffer_cpu(buffer, cpu) in ring_buffer_wake_waiters()
907 ring_buffer_wake_waiters(buffer, cpu); in ring_buffer_wake_waiters()
913 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) in ring_buffer_wake_waiters()
916 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
941 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) in ring_buffer_wait() argument
954 if (cpu == RING_BUFFER_ALL_CPUS) { in ring_buffer_wait()
959 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
961 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
1003 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) in ring_buffer_wait()
1006 if (cpu != RING_BUFFER_ALL_CPUS && in ring_buffer_wait()
1007 !ring_buffer_empty_cpu(buffer, cpu)) { in ring_buffer_wait()
1017 done = !pagebusy && full_hit(buffer, cpu, full); in ring_buffer_wait()
1058 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, in ring_buffer_poll_wait() argument
1064 if (cpu == RING_BUFFER_ALL_CPUS) { in ring_buffer_poll_wait()
1068 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
1071 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1102 return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; in ring_buffer_poll_wait()
1104 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || in ring_buffer_poll_wait()
1105 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) in ring_buffer_poll_wait()
1156 int cpu, u64 *ts) in ring_buffer_normalize_time_stamp() argument
1597 mflags, cpu_to_node(cpu_buffer->cpu)); in __rb_allocate_pages()
1605 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); in __rb_allocate_pages()
1656 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) in rb_allocate_cpu_buffer() argument
1664 GFP_KERNEL, cpu_to_node(cpu)); in rb_allocate_cpu_buffer()
1668 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
1680 GFP_KERNEL, cpu_to_node(cpu)); in rb_allocate_cpu_buffer()
1687 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); in rb_allocate_cpu_buffer()
1758 int cpu; in __ring_buffer_alloc() local
1790 cpu = raw_smp_processor_id(); in __ring_buffer_alloc()
1791 cpumask_set_cpu(cpu, buffer->cpumask); in __ring_buffer_alloc()
1792 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in __ring_buffer_alloc()
1793 if (!buffer->buffers[cpu]) in __ring_buffer_alloc()
1805 for_each_buffer_cpu(buffer, cpu) { in __ring_buffer_alloc()
1806 if (buffer->buffers[cpu]) in __ring_buffer_alloc()
1807 rb_free_cpu_buffer(buffer->buffers[cpu]); in __ring_buffer_alloc()
1847 int cpu; in ring_buffer_free() local
1855 for_each_buffer_cpu(buffer, cpu) in ring_buffer_free()
1856 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
2120 int cpu, err; in ring_buffer_resize() local
2151 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2152 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2160 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2161 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2191 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2192 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2197 if (!cpu_online(cpu)) { in ring_buffer_resize()
2201 schedule_work_on(cpu, in ring_buffer_resize()
2207 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2208 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2212 if (cpu_online(cpu)) in ring_buffer_resize()
2277 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2278 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2289 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2292 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3147 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
3278 int cpu; in ring_buffer_nest_start() local
3282 cpu = raw_smp_processor_id(); in ring_buffer_nest_start()
3283 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
3298 int cpu; in ring_buffer_nest_end() local
3301 cpu = raw_smp_processor_id(); in ring_buffer_nest_end()
3302 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
3321 int cpu = raw_smp_processor_id(); in ring_buffer_unlock_commit() local
3323 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
3468 cpu_buffer->cpu, in check_buffer()
3732 int cpu; in ring_buffer_lock_reserve() local
3740 cpu = raw_smp_processor_id(); in ring_buffer_lock_reserve()
3742 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) in ring_buffer_lock_reserve()
3745 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
3833 int cpu; in ring_buffer_discard_commit() local
3841 cpu = smp_processor_id(); in ring_buffer_discard_commit()
3842 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3886 int cpu; in ring_buffer_write() local
3893 cpu = raw_smp_processor_id(); in ring_buffer_write()
3895 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
3898 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
4086 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_disable_cpu() argument
4090 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
4093 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
4106 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_enable_cpu() argument
4110 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
4113 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
4136 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) in ring_buffer_oldest_event_ts() argument
4143 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_oldest_event_ts()
4146 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
4169 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_bytes_cpu() argument
4174 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_bytes_cpu()
4177 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
4189 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_entries_cpu() argument
4193 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
4196 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
4208 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_overrun_cpu() argument
4213 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
4216 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
4231 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_commit_overrun_cpu() argument
4236 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_commit_overrun_cpu()
4239 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
4253 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_dropped_events_cpu() argument
4258 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_dropped_events_cpu()
4261 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
4274 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_read_events_cpu() argument
4278 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_events_cpu()
4281 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
4297 int cpu; in ring_buffer_entries() local
4300 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_entries()
4301 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
4320 int cpu; in ring_buffer_overruns() local
4323 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_overruns()
4324 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
4541 if (cpu_buffer->buffer->ext_cb->swap_reader(cpu_buffer->cpu)) { in rb_swap_reader_page_ext()
4885 cpu_buffer->cpu, ts); in rb_buffer_peek()
4895 cpu_buffer->cpu, ts); in rb_buffer_peek()
4977 cpu_buffer->cpu, ts); in rb_iter_peek()
4987 cpu_buffer->cpu, ts); in rb_iter_peek()
5042 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_peek() argument
5045 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
5050 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_peek()
5120 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_consume() argument
5132 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
5135 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
5169 cpu_buffer->buffer->ext_cb->update_footers(cpu_buffer->cpu); in ring_buffer_update_view()
5210 int ring_buffer_poke(struct trace_buffer *buffer, int cpu) in ring_buffer_poke() argument
5214 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poke()
5217 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poke()
5247 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) in ring_buffer_read_prepare() argument
5252 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_prepare()
5266 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
5376 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) in ring_buffer_size() argument
5384 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_size()
5387 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
5471 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_reset_cpu() argument
5473 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
5475 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
5507 int cpu; in ring_buffer_reset_online_cpus() local
5512 for_each_online_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
5513 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5522 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
5523 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5548 int cpu; in ring_buffer_reset() local
5553 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
5554 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5563 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
5564 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5585 int cpu; in ring_buffer_empty() local
5589 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_empty()
5590 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
5610 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_empty_cpu() argument
5617 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
5620 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
5644 struct trace_buffer *buffer_b, int cpu) in ring_buffer_swap_cpu() argument
5653 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || in ring_buffer_swap_cpu()
5654 !cpumask_test_cpu(cpu, buffer_b->cpumask)) in ring_buffer_swap_cpu()
5657 cpu_buffer_a = buffer_a->buffers[cpu]; in ring_buffer_swap_cpu()
5658 cpu_buffer_b = buffer_b->buffers[cpu]; in ring_buffer_swap_cpu()
5702 buffer_a->buffers[cpu] = cpu_buffer_b; in ring_buffer_swap_cpu()
5703 buffer_b->buffers[cpu] = cpu_buffer_a; in ring_buffer_swap_cpu()
5735 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) in ring_buffer_alloc_read_page() argument
5742 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_alloc_read_page()
5745 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
5760 page = alloc_pages_node(cpu_to_node(cpu), in ring_buffer_alloc_read_page()
5782 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) in ring_buffer_free_read_page() argument
5789 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) in ring_buffer_free_read_page()
5792 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
5848 void **data_page, size_t len, int cpu, int full) in ring_buffer_read_page() argument
5850 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
5861 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_page()
6025 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) in trace_rb_cpu_prepare() argument
6033 if (cpumask_test_cpu(cpu, buffer->cpumask)) in trace_rb_cpu_prepare()
6051 buffer->buffers[cpu] = in trace_rb_cpu_prepare()
6052 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in trace_rb_cpu_prepare()
6053 if (!buffer->buffers[cpu]) { in trace_rb_cpu_prepare()
6055 cpu); in trace_rb_cpu_prepare()
6059 cpumask_set_cpu(cpu, buffer->cpumask); in trace_rb_cpu_prepare()
6069 int cpu; in trace_buffer_pack_size() local
6071 for_each_buffer_cpu(trace_buffer, cpu) { in trace_buffer_pack_size()
6072 struct ring_buffer_per_cpu *rb = trace_buffer->buffers[cpu]; in trace_buffer_pack_size()
6086 int cpu = -1, pack_cpu, j; in trace_buffer_pack() local
6099 cpu = cpumask_next(cpu, trace_buffer->cpumask); in trace_buffer_pack()
6100 if (cpu > nr_cpu_ids) { in trace_buffer_pack()
6105 rb = trace_buffer->buffers[cpu]; in trace_buffer_pack()
6115 cpu_pack->cpu = cpu; in trace_buffer_pack()
6163 int cpu; member
6270 int cpu = smp_processor_id(); in rb_ipi() local
6272 data = &rb_data[cpu]; in rb_ipi()
6293 int cpu; in test_ringbuffer() local
6310 for_each_online_cpu(cpu) { in test_ringbuffer()
6311 rb_data[cpu].buffer = buffer; in test_ringbuffer()
6312 rb_data[cpu].cpu = cpu; in test_ringbuffer()
6313 rb_data[cpu].cnt = cpu; in test_ringbuffer()
6314 rb_threads[cpu] = kthread_run_on_cpu(rb_test, &rb_data[cpu], in test_ringbuffer()
6315 cpu, "rbtester/%u"); in test_ringbuffer()
6316 if (WARN_ON(IS_ERR(rb_threads[cpu]))) { in test_ringbuffer()
6318 ret = PTR_ERR(rb_threads[cpu]); in test_ringbuffer()
6351 for_each_online_cpu(cpu) { in test_ringbuffer()
6352 if (!rb_threads[cpu]) in test_ringbuffer()
6354 kthread_stop(rb_threads[cpu]); in test_ringbuffer()
6363 for_each_online_cpu(cpu) { in test_ringbuffer()
6365 struct rb_test_data *data = &rb_data[cpu]; in test_ringbuffer()
6389 pr_info("CPU %d:\n", cpu); in test_ringbuffer()
6402 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { in test_ringbuffer()