/kernel/irq/ |
D | matrix.c | 181 void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, in irq_matrix_assign_system() argument 186 BUG_ON(bit > m->matrix_bits); in irq_matrix_assign_system() 189 set_bit(bit, m->system_map); in irq_matrix_assign_system() 191 BUG_ON(!test_and_clear_bit(bit, cm->alloc_map)); in irq_matrix_assign_system() 195 if (bit >= m->alloc_start && bit < m->alloc_end) in irq_matrix_assign_system() 198 trace_irq_matrix_assign_system(bit, m); in irq_matrix_assign_system() 216 unsigned int bit; in irq_matrix_reserve_managed() local 218 bit = matrix_alloc_area(m, cm, 1, true); in irq_matrix_reserve_managed() 219 if (bit >= m->alloc_end) in irq_matrix_reserve_managed() 226 trace_irq_matrix_reserve_managed(bit, cpu, m, cm); in irq_matrix_reserve_managed() [all …]
|
/kernel/sched/ |
D | wait_bit.c | 12 wait_queue_head_t *bit_waitqueue(void *word, int bit) in bit_waitqueue() argument 15 unsigned long val = (unsigned long)word << shift | bit; in bit_waitqueue() 58 int __sched out_of_line_wait_on_bit(void *word, int bit, in out_of_line_wait_on_bit() argument 61 struct wait_queue_head *wq_head = bit_waitqueue(word, bit); in out_of_line_wait_on_bit() 62 DEFINE_WAIT_BIT(wq_entry, word, bit); in out_of_line_wait_on_bit() 69 void *word, int bit, wait_bit_action_f *action, in out_of_line_wait_on_bit_timeout() argument 72 struct wait_queue_head *wq_head = bit_waitqueue(word, bit); in out_of_line_wait_on_bit_timeout() 73 DEFINE_WAIT_BIT(wq_entry, word, bit); in out_of_line_wait_on_bit_timeout() 111 int __sched out_of_line_wait_on_bit_lock(void *word, int bit, in out_of_line_wait_on_bit_lock() argument 114 struct wait_queue_head *wq_head = bit_waitqueue(word, bit); in out_of_line_wait_on_bit_lock() [all …]
|
/kernel/time/ |
D | tick-sched.c | 420 enum tick_dep_bits bit) in tick_nohz_dep_set_all() argument 424 prev = atomic_fetch_or(BIT(bit), dep); in tick_nohz_dep_set_all() 433 void tick_nohz_dep_set(enum tick_dep_bits bit) in tick_nohz_dep_set() argument 435 tick_nohz_dep_set_all(&tick_dep_mask, bit); in tick_nohz_dep_set() 438 void tick_nohz_dep_clear(enum tick_dep_bits bit) in tick_nohz_dep_clear() argument 440 atomic_andnot(BIT(bit), &tick_dep_mask); in tick_nohz_dep_clear() 447 void tick_nohz_dep_set_cpu(int cpu, enum tick_dep_bits bit) in tick_nohz_dep_set_cpu() argument 454 prev = atomic_fetch_or(BIT(bit), &ts->tick_dep_mask); in tick_nohz_dep_set_cpu() 470 void tick_nohz_dep_clear_cpu(int cpu, enum tick_dep_bits bit) in tick_nohz_dep_clear_cpu() argument 474 atomic_andnot(BIT(bit), &ts->tick_dep_mask); in tick_nohz_dep_clear_cpu() [all …]
|
/kernel/trace/ |
D | trace_functions.c | 178 int bit; in function_trace_call() local 184 bit = ftrace_test_recursion_trylock(ip, parent_ip); in function_trace_call() 185 if (bit < 0) in function_trace_call() 195 ftrace_test_recursion_unlock(bit); in function_trace_call() 289 int bit; in function_no_repeats_trace_call() local 295 bit = ftrace_test_recursion_trylock(ip, parent_ip); in function_no_repeats_trace_call() 296 if (bit < 0) in function_no_repeats_trace_call() 322 ftrace_test_recursion_unlock(bit); in function_no_repeats_trace_call() 396 func_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) in func_set_flag() argument 402 if (!!set == !!(func_flags.val & bit)) in func_set_flag() [all …]
|
D | trace_nop.c | 64 static int nop_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) in nop_set_flag() argument 70 if (bit == TRACE_NOP_OPT_ACCEPT) { in nop_set_flag() 77 if (bit == TRACE_NOP_OPT_REFUSE) { in nop_set_flag()
|
D | fprobe.c | 30 int bit; in fprobe_handler() local 36 bit = ftrace_test_recursion_trylock(ip, parent_ip); in fprobe_handler() 37 if (bit < 0) { in fprobe_handler() 61 ftrace_test_recursion_unlock(bit); in fprobe_handler()
|
D | trace_event_perf.c | 441 int bit; in perf_ftrace_function_call() local 446 bit = ftrace_test_recursion_trylock(ip, parent_ip); in perf_ftrace_function_call() 447 if (bit < 0) in perf_ftrace_function_call() 481 ftrace_test_recursion_unlock(bit); in perf_ftrace_function_call()
|
D | pid_list.c | 88 int bit = find_first_bit((unsigned long *)chunk->data, in upper_empty() local 90 return bit >= sizeof(chunk->data) * 8; in upper_empty()
|
D | trace_functions_graph.c | 1251 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) in func_graph_set_flag() argument 1253 if (bit == TRACE_GRAPH_PRINT_IRQS) in func_graph_set_flag() 1256 if (bit == TRACE_GRAPH_SLEEP_TIME) in func_graph_set_flag() 1259 if (bit == TRACE_GRAPH_GRAPH_TIME) in func_graph_set_flag()
|
D | trace.h | 489 u32 bit; /* Mask assigned in val field in tracer_flags */ member 503 #define TRACER_OPT(s, b) .name = #s, .bit = b 561 u32 old_flags, u32 bit, int set);
|
D | ring_buffer.c | 3229 int bit = interrupt_context_level(); in trace_recursive_lock() local 3231 bit = RB_CTX_NORMAL - bit; in trace_recursive_lock() 3233 if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) { in trace_recursive_lock() 3239 bit = RB_CTX_TRANSITION; in trace_recursive_lock() 3240 if (val & (1 << (bit + cpu_buffer->nest))) { in trace_recursive_lock() 3246 val |= (1 << (bit + cpu_buffer->nest)); in trace_recursive_lock()
|
D | ftrace.c | 7546 int bit; in __ftrace_ops_list_func() local 7553 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START); in __ftrace_ops_list_func() 7554 if (bit < 0) in __ftrace_ops_list_func() 7580 trace_clear_recursion(bit); in __ftrace_ops_list_func() 7621 int bit; in ftrace_ops_assist_func() local 7623 bit = trace_test_and_set_recursion(ip, parent_ip, TRACE_LIST_START); in ftrace_ops_assist_func() 7624 if (bit < 0) in ftrace_ops_assist_func() 7630 trace_clear_recursion(bit); in ftrace_ops_assist_func()
|
D | blktrace.c | 1559 blk_tracer_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) in blk_tracer_set_flag() argument 1562 if (bit == TRACE_BLK_OPT_CLASSIC) { in blk_tracer_set_flag()
|
D | trace.c | 101 dummy_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set) in dummy_set_flag() argument 5299 if (tracer_flags & trace_opts[i].bit) in tracing_trace_options_show() 5316 ret = trace->set_flag(tr, tracer_flags->val, opts->bit, !neg); in __set_tracer_option() 5321 tracer_flags->val &= ~opts->bit; in __set_tracer_option() 5323 tracer_flags->val |= opts->bit; in __set_tracer_option() 8829 if (topt->flags->val & topt->opt->bit) in trace_options_read() 8852 if (!!(topt->flags->val & topt->opt->bit) != val) { in trace_options_write()
|
D | Kconfig | 553 Either of the above profilers adds a bit of overhead to the system. 559 No branch profiling. Branch profiling adds a bit of overhead. 977 with the event enabled. This adds a bit more time for kernel boot
|
/kernel/power/ |
D | snapshot.c | 816 unsigned int bit; in memory_bm_set_bit() local 819 error = memory_bm_find_bit(bm, pfn, &addr, &bit); in memory_bm_set_bit() 821 set_bit(bit, addr); in memory_bm_set_bit() 827 unsigned int bit; in mem_bm_set_bit_check() local 830 error = memory_bm_find_bit(bm, pfn, &addr, &bit); in mem_bm_set_bit_check() 832 set_bit(bit, addr); in mem_bm_set_bit_check() 840 unsigned int bit; in memory_bm_clear_bit() local 843 error = memory_bm_find_bit(bm, pfn, &addr, &bit); in memory_bm_clear_bit() 845 clear_bit(bit, addr); in memory_bm_clear_bit() 850 int bit; in memory_bm_clear_current() local [all …]
|
/kernel/livepatch/ |
D | patch.c | 48 int bit; in klp_ftrace_handler() local 58 bit = ftrace_test_recursion_trylock(ip, parent_ip); in klp_ftrace_handler() 59 if (WARN_ON_ONCE(bit < 0)) in klp_ftrace_handler() 124 ftrace_test_recursion_unlock(bit); in klp_ftrace_handler()
|
/kernel/ |
D | watch_queue.c | 63 unsigned int bit; in watch_queue_pipe_buf_release() local 69 bit = buf->offset + buf->len; in watch_queue_pipe_buf_release() 70 if ((bit & (WATCH_QUEUE_NOTE_SIZE - 1)) == 0) in watch_queue_pipe_buf_release() 71 bit -= WATCH_QUEUE_NOTE_SIZE; in watch_queue_pipe_buf_release() 72 bit /= WATCH_QUEUE_NOTE_SIZE; in watch_queue_pipe_buf_release() 75 bit += page->index; in watch_queue_pipe_buf_release() 77 set_bit(bit, wqueue->notes_bitmap); in watch_queue_pipe_buf_release()
|
D | auditfilter.c | 285 int bit = AUDIT_BITMASK_SIZE * 32 - i - 1; in audit_to_entry_common() local 286 __u32 *p = &entry->rule.mask[AUDIT_WORD(bit)]; in audit_to_entry_common() 289 if (!(*p & AUDIT_BIT(bit))) in audit_to_entry_common() 291 *p &= ~AUDIT_BIT(bit); in audit_to_entry_common()
|
D | auditsc.c | 794 int word, bit; in audit_in_mask() local 803 bit = AUDIT_BIT(val); in audit_in_mask() 805 return rule->mask[word] & bit; in audit_in_mask()
|
/kernel/locking/ |
D | lockdep.c | 668 static inline unsigned long lock_flag(enum lock_usage_bit bit) in lock_flag() argument 670 return 1UL << bit; in lock_flag() 673 static char get_usage_char(struct lock_class *class, enum lock_usage_bit bit) in get_usage_char() argument 689 if (class->usage_mask & lock_flag(bit + LOCK_USAGE_DIR_MASK)) { in get_usage_char() 691 if (class->usage_mask & lock_flag(bit)) in get_usage_char() 693 } else if (class->usage_mask & lock_flag(bit)) in get_usage_char() 2323 int bit; in print_lock_class_header() local 2332 for (bit = 0; bit < LOCK_TRACE_STATES; bit++) { in print_lock_class_header() 2333 if (class->usage_mask & (1 << bit)) { in print_lock_class_header() 2336 len += printk("%*s %s", depth, "", usage_str[bit]); in print_lock_class_header() [all …]
|
/kernel/cgroup/ |
D | cpuset.c | 1322 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, 2258 static int update_flag(cpuset_flagbits_t bit, struct cpuset *cs, in update_flag() argument 2271 set_bit(bit, &trialcs->flags); in update_flag() 2273 clear_bit(bit, &trialcs->flags); in update_flag()
|
/kernel/dma/ |
D | Kconfig | 98 # The only thing that is really required is a way to set an uncached bit
|
/kernel/events/ |
D | core.c | 6675 int bit; in perf_output_sample_regs() local 6679 for_each_set_bit(bit, _mask, sizeof(mask) * BITS_PER_BYTE) { in perf_output_sample_regs() 6682 val = perf_reg_value(regs, bit); in perf_output_sample_regs()
|