• Home
  • Raw
  • Download

Lines Matching refs:cpu

280 #define for_each_buffer_cpu(buffer, cpu)		\  argument
281 for_each_cpu(cpu, buffer->cpumask)
283 #define for_each_online_buffer_cpu(buffer, cpu) \ argument
284 for_each_cpu_and(cpu, buffer->cpumask, cpu_online_mask)
488 int cpu; member
809 size_t ring_buffer_nr_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_pages() argument
811 return buffer->buffers[cpu]->nr_pages; in ring_buffer_nr_pages()
821 size_t ring_buffer_nr_dirty_pages(struct trace_buffer *buffer, int cpu) in ring_buffer_nr_dirty_pages() argument
827 read = local_read(&buffer->buffers[cpu]->pages_read); in ring_buffer_nr_dirty_pages()
828 lost = local_read(&buffer->buffers[cpu]->pages_lost); in ring_buffer_nr_dirty_pages()
829 cnt = local_read(&buffer->buffers[cpu]->pages_touched); in ring_buffer_nr_dirty_pages()
845 static __always_inline bool full_hit(struct trace_buffer *buffer, int cpu, int full) in full_hit() argument
847 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in full_hit()
860 dirty = ring_buffer_nr_dirty_pages(buffer, cpu) + 1; in full_hit()
890 void ring_buffer_wake_waiters(struct trace_buffer *buffer, int cpu) in ring_buffer_wake_waiters() argument
898 if (cpu == RING_BUFFER_ALL_CPUS) { in ring_buffer_wake_waiters()
901 for_each_buffer_cpu(buffer, cpu) in ring_buffer_wake_waiters()
902 ring_buffer_wake_waiters(buffer, cpu); in ring_buffer_wake_waiters()
908 if (WARN_ON_ONCE(cpu >= nr_cpu_ids)) in ring_buffer_wake_waiters()
911 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wake_waiters()
936 int ring_buffer_wait(struct trace_buffer *buffer, int cpu, int full) in ring_buffer_wait() argument
949 if (cpu == RING_BUFFER_ALL_CPUS) { in ring_buffer_wait()
954 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_wait()
956 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_wait()
998 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) in ring_buffer_wait()
1001 if (cpu != RING_BUFFER_ALL_CPUS && in ring_buffer_wait()
1002 !ring_buffer_empty_cpu(buffer, cpu)) { in ring_buffer_wait()
1012 done = !pagebusy && full_hit(buffer, cpu, full); in ring_buffer_wait()
1053 __poll_t ring_buffer_poll_wait(struct trace_buffer *buffer, int cpu, in ring_buffer_poll_wait() argument
1059 if (cpu == RING_BUFFER_ALL_CPUS) { in ring_buffer_poll_wait()
1063 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_poll_wait()
1066 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_poll_wait()
1097 return full_hit(buffer, cpu, full) ? EPOLLIN | EPOLLRDNORM : 0; in ring_buffer_poll_wait()
1099 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || in ring_buffer_poll_wait()
1100 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) in ring_buffer_poll_wait()
1151 int cpu, u64 *ts) in ring_buffer_normalize_time_stamp() argument
1592 mflags, cpu_to_node(cpu_buffer->cpu)); in __rb_allocate_pages()
1600 page = alloc_pages_node(cpu_to_node(cpu_buffer->cpu), mflags, 0); in __rb_allocate_pages()
1651 rb_allocate_cpu_buffer(struct trace_buffer *buffer, long nr_pages, int cpu) in rb_allocate_cpu_buffer() argument
1659 GFP_KERNEL, cpu_to_node(cpu)); in rb_allocate_cpu_buffer()
1663 cpu_buffer->cpu = cpu; in rb_allocate_cpu_buffer()
1675 GFP_KERNEL, cpu_to_node(cpu)); in rb_allocate_cpu_buffer()
1682 page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); in rb_allocate_cpu_buffer()
1753 int cpu; in __ring_buffer_alloc() local
1785 cpu = raw_smp_processor_id(); in __ring_buffer_alloc()
1786 cpumask_set_cpu(cpu, buffer->cpumask); in __ring_buffer_alloc()
1787 buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in __ring_buffer_alloc()
1788 if (!buffer->buffers[cpu]) in __ring_buffer_alloc()
1800 for_each_buffer_cpu(buffer, cpu) { in __ring_buffer_alloc()
1801 if (buffer->buffers[cpu]) in __ring_buffer_alloc()
1802 rb_free_cpu_buffer(buffer->buffers[cpu]); in __ring_buffer_alloc()
1822 int cpu; in ring_buffer_free() local
1828 for_each_buffer_cpu(buffer, cpu) in ring_buffer_free()
1829 rb_free_cpu_buffer(buffer->buffers[cpu]); in ring_buffer_free()
2089 int cpu, err; in ring_buffer_resize() local
2118 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2119 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2127 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2128 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2158 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2159 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2164 if (!cpu_online(cpu)) { in ring_buffer_resize()
2168 schedule_work_on(cpu, in ring_buffer_resize()
2174 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2175 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2179 if (cpu_online(cpu)) in ring_buffer_resize()
2244 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2245 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
2256 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_resize()
2259 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_resize()
3107 if (!full_hit(buffer, cpu_buffer->cpu, cpu_buffer->shortest_full)) in rb_wakeups()
3238 int cpu; in ring_buffer_nest_start() local
3242 cpu = raw_smp_processor_id(); in ring_buffer_nest_start()
3243 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_start()
3258 int cpu; in ring_buffer_nest_end() local
3261 cpu = raw_smp_processor_id(); in ring_buffer_nest_end()
3262 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_nest_end()
3281 int cpu = raw_smp_processor_id(); in ring_buffer_unlock_commit() local
3283 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_unlock_commit()
3428 cpu_buffer->cpu, in check_buffer()
3692 int cpu; in ring_buffer_lock_reserve() local
3700 cpu = raw_smp_processor_id(); in ring_buffer_lock_reserve()
3702 if (unlikely(!cpumask_test_cpu(cpu, buffer->cpumask))) in ring_buffer_lock_reserve()
3705 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_lock_reserve()
3793 int cpu; in ring_buffer_discard_commit() local
3798 cpu = smp_processor_id(); in ring_buffer_discard_commit()
3799 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_discard_commit()
3843 int cpu; in ring_buffer_write() local
3850 cpu = raw_smp_processor_id(); in ring_buffer_write()
3852 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_write()
3855 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_write()
4037 void ring_buffer_record_disable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_disable_cpu() argument
4041 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_disable_cpu()
4044 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_disable_cpu()
4057 void ring_buffer_record_enable_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_record_enable_cpu() argument
4061 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_record_enable_cpu()
4064 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_record_enable_cpu()
4087 u64 ring_buffer_oldest_event_ts(struct trace_buffer *buffer, int cpu) in ring_buffer_oldest_event_ts() argument
4094 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_oldest_event_ts()
4097 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_oldest_event_ts()
4120 unsigned long ring_buffer_bytes_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_bytes_cpu() argument
4125 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_bytes_cpu()
4128 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_bytes_cpu()
4140 unsigned long ring_buffer_entries_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_entries_cpu() argument
4144 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_entries_cpu()
4147 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries_cpu()
4159 unsigned long ring_buffer_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_overrun_cpu() argument
4164 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_overrun_cpu()
4167 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overrun_cpu()
4182 ring_buffer_commit_overrun_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_commit_overrun_cpu() argument
4187 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_commit_overrun_cpu()
4190 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_commit_overrun_cpu()
4204 ring_buffer_dropped_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_dropped_events_cpu() argument
4209 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_dropped_events_cpu()
4212 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_dropped_events_cpu()
4225 ring_buffer_read_events_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_read_events_cpu() argument
4229 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_events_cpu()
4232 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_events_cpu()
4248 int cpu; in ring_buffer_entries() local
4251 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_entries()
4252 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_entries()
4271 int cpu; in ring_buffer_overruns() local
4274 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_overruns()
4275 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_overruns()
4710 cpu_buffer->cpu, ts); in rb_buffer_peek()
4720 cpu_buffer->cpu, ts); in rb_buffer_peek()
4802 cpu_buffer->cpu, ts); in rb_iter_peek()
4812 cpu_buffer->cpu, ts); in rb_iter_peek()
4867 ring_buffer_peek(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_peek() argument
4870 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_peek()
4875 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_peek()
4945 ring_buffer_consume(struct trace_buffer *buffer, int cpu, u64 *ts, in ring_buffer_consume() argument
4957 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_consume()
4960 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_consume()
5005 ring_buffer_read_prepare(struct trace_buffer *buffer, int cpu, gfp_t flags) in ring_buffer_read_prepare() argument
5010 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_prepare()
5024 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_prepare()
5132 unsigned long ring_buffer_size(struct trace_buffer *buffer, int cpu) in ring_buffer_size() argument
5140 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_size()
5143 return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; in ring_buffer_size()
5228 void ring_buffer_reset_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_reset_cpu() argument
5230 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_cpu()
5232 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_reset_cpu()
5264 int cpu; in ring_buffer_reset_online_cpus() local
5269 for_each_online_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
5270 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5279 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset_online_cpus()
5280 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset_online_cpus()
5305 int cpu; in ring_buffer_reset() local
5310 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
5311 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5320 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_reset()
5321 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_reset()
5342 int cpu; in ring_buffer_empty() local
5346 for_each_buffer_cpu(buffer, cpu) { in ring_buffer_empty()
5347 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty()
5367 bool ring_buffer_empty_cpu(struct trace_buffer *buffer, int cpu) in ring_buffer_empty_cpu() argument
5374 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_empty_cpu()
5377 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_empty_cpu()
5401 struct trace_buffer *buffer_b, int cpu) in ring_buffer_swap_cpu() argument
5407 if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || in ring_buffer_swap_cpu()
5408 !cpumask_test_cpu(cpu, buffer_b->cpumask)) in ring_buffer_swap_cpu()
5411 cpu_buffer_a = buffer_a->buffers[cpu]; in ring_buffer_swap_cpu()
5412 cpu_buffer_b = buffer_b->buffers[cpu]; in ring_buffer_swap_cpu()
5456 buffer_a->buffers[cpu] = cpu_buffer_b; in ring_buffer_swap_cpu()
5457 buffer_b->buffers[cpu] = cpu_buffer_a; in ring_buffer_swap_cpu()
5489 void *ring_buffer_alloc_read_page(struct trace_buffer *buffer, int cpu) in ring_buffer_alloc_read_page() argument
5496 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_alloc_read_page()
5499 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_alloc_read_page()
5514 page = alloc_pages_node(cpu_to_node(cpu), in ring_buffer_alloc_read_page()
5536 void ring_buffer_free_read_page(struct trace_buffer *buffer, int cpu, void *data) in ring_buffer_free_read_page() argument
5543 if (!buffer || !buffer->buffers || !buffer->buffers[cpu]) in ring_buffer_free_read_page()
5546 cpu_buffer = buffer->buffers[cpu]; in ring_buffer_free_read_page()
5602 void **data_page, size_t len, int cpu, int full) in ring_buffer_read_page() argument
5604 struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; in ring_buffer_read_page()
5615 if (!cpumask_test_cpu(cpu, buffer->cpumask)) in ring_buffer_read_page()
5778 int trace_rb_cpu_prepare(unsigned int cpu, struct hlist_node *node) in trace_rb_cpu_prepare() argument
5786 if (cpumask_test_cpu(cpu, buffer->cpumask)) in trace_rb_cpu_prepare()
5804 buffer->buffers[cpu] = in trace_rb_cpu_prepare()
5805 rb_allocate_cpu_buffer(buffer, nr_pages, cpu); in trace_rb_cpu_prepare()
5806 if (!buffer->buffers[cpu]) { in trace_rb_cpu_prepare()
5808 cpu); in trace_rb_cpu_prepare()
5812 cpumask_set_cpu(cpu, buffer->cpumask); in trace_rb_cpu_prepare()
5848 int cpu; member
5955 int cpu = smp_processor_id(); in rb_ipi() local
5957 data = &rb_data[cpu]; in rb_ipi()
5978 int cpu; in test_ringbuffer() local
5995 for_each_online_cpu(cpu) { in test_ringbuffer()
5996 rb_data[cpu].buffer = buffer; in test_ringbuffer()
5997 rb_data[cpu].cpu = cpu; in test_ringbuffer()
5998 rb_data[cpu].cnt = cpu; in test_ringbuffer()
5999 rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu], in test_ringbuffer()
6000 "rbtester/%d", cpu); in test_ringbuffer()
6001 if (WARN_ON(IS_ERR(rb_threads[cpu]))) { in test_ringbuffer()
6003 ret = PTR_ERR(rb_threads[cpu]); in test_ringbuffer()
6007 kthread_bind(rb_threads[cpu], cpu); in test_ringbuffer()
6008 wake_up_process(rb_threads[cpu]); in test_ringbuffer()
6039 for_each_online_cpu(cpu) { in test_ringbuffer()
6040 if (!rb_threads[cpu]) in test_ringbuffer()
6042 kthread_stop(rb_threads[cpu]); in test_ringbuffer()
6051 for_each_online_cpu(cpu) { in test_ringbuffer()
6053 struct rb_test_data *data = &rb_data[cpu]; in test_ringbuffer()
6077 pr_info("CPU %d:\n", cpu); in test_ringbuffer()
6090 while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { in test_ringbuffer()