/kernel/ |
D | softirq.c | 85 static bool ksoftirqd_running(unsigned long pending) in ksoftirqd_running() argument 89 if (pending & SOFTIRQ_NOW_MASK) in ksoftirqd_running() 256 __u32 pending; in __do_softirq() local 266 pending = local_softirq_pending(); in __do_softirq() 280 while ((softirq_bit = ffs(pending))) { in __do_softirq() 301 pending >>= softirq_bit; in __do_softirq() 308 pending = local_softirq_pending(); in __do_softirq() 309 if (pending) { in __do_softirq() 326 __u32 pending; in do_softirq() local 334 pending = local_softirq_pending(); in do_softirq() [all …]
|
D | async.c | 90 if (!list_empty(&domain->pending)) in lowest_in_progress() 91 first = list_first_entry(&domain->pending, in lowest_in_progress() 201 list_add_tail(&entry->domain_list, &domain->pending); in async_schedule_node_domain() 260 WARN_ON(!domain->registered || !list_empty(&domain->pending)); in async_unregister_domain()
|
D | signal.c | 157 PENDING(&t->pending, &t->blocked) || in recalc_sigpending_tsk() 208 int next_signal(struct sigpending *pending, sigset_t *mask) in next_signal() argument 213 s = pending->signal.sig; in next_signal() 476 flush_sigqueue(&t->pending); in flush_signals() 483 static void __flush_itimer_signals(struct sigpending *pending) in __flush_itimer_signals() argument 488 signal = pending->signal; in __flush_itimer_signals() 491 list_for_each_entry_safe(q, n, &pending->list, list) { in __flush_itimer_signals() 503 sigorsets(&pending->signal, &signal, &retain); in __flush_itimer_signals() 512 __flush_itimer_signals(&tsk->pending); in flush_itimer_signals() 607 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, in __dequeue_signal() argument [all …]
|
D | futex.c | 3680 struct robust_list __user *entry, *next_entry, *pending; in exit_robust_list() local 3704 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) in exit_robust_list() 3718 if (entry != pending) { in exit_robust_list() 3736 if (pending) { in exit_robust_list() 3737 handle_futex_death((void __user *)pending + futex_offset, in exit_robust_list() 3980 struct robust_list __user *entry, *next_entry, *pending; in compat_exit_robust_list() local 4005 if (compat_fetch_robust_entry(&upending, &pending, in compat_exit_robust_list() 4021 if (entry != pending) { in compat_exit_robust_list() 4041 if (pending) { in compat_exit_robust_list() 4042 void __user *uaddr = futex_uaddr(pending, futex_offset); in compat_exit_robust_list()
|
D | ptrace.c | 711 struct sigpending *pending; in ptrace_peek_siginfo() local 731 pending = &child->signal->shared_pending; in ptrace_peek_siginfo() 733 pending = &child->pending; in ptrace_peek_siginfo() 741 list_for_each_entry(q, &pending->list, list) { in ptrace_peek_siginfo()
|
D | exit.c | 164 flush_sigqueue(&tsk->pending); in __exit_signal()
|
D | fork.c | 1915 init_sigpending(&p->pending); in copy_process()
|
/kernel/irq/ |
D | irq_sim.c | 56 while (!bitmap_empty(work_ctx->pending, sim->irq_count)) { in irq_sim_handle_irq() 57 offset = find_next_bit(work_ctx->pending, in irq_sim_handle_irq() 59 clear_bit(offset, work_ctx->pending); in irq_sim_handle_irq() 89 sim->work_ctx.pending = bitmap_zalloc(num_irqs, GFP_KERNEL); in irq_sim_init() 90 if (!sim->work_ctx.pending) { in irq_sim_init() 122 bitmap_free(sim->work_ctx.pending); in irq_sim_fini() 177 set_bit(offset, sim->work_ctx.pending); in irq_sim_fire()
|
D | Kconfig | 46 # Tasklet based software resend for pending interrupts on enable_irq()
|
/kernel/locking/ |
D | qspinlock.c | 149 WRITE_ONCE(lock->pending, 0); in clear_pending()
|
D | qspinlock_paravirt.h | 111 WRITE_ONCE(lock->pending, 1); in set_pending()
|
/kernel/printk/ |
D | printk.c | 2950 int pending = __this_cpu_xchg(printk_pending, 0); in wake_up_klogd_work_func() local 2952 if (pending & PRINTK_PENDING_OUTPUT) { in wake_up_klogd_work_func() 2958 if (pending & PRINTK_PENDING_WAKEUP) in wake_up_klogd_work_func()
|
/kernel/events/ |
D | ring_buffer.c | 25 irq_work_queue(&handle->event->pending); in perf_output_wakeup()
|
D | core.c | 2288 irq_work_queue(&event->pending); in perf_event_disable_inatomic() 4539 irq_work_sync(&event->pending); in _free_event() 6009 irq_work_queue_on(&event->pending, cpu); in perf_pending_event_disable() 6014 struct perf_event *event = container_of(entry, struct perf_event, pending); in perf_pending_event() 8208 irq_work_queue(&event->pending); in __perf_event_overflow() 10456 init_irq_work(&event->pending, perf_pending_event); in perf_event_alloc()
|
/kernel/rcu/ |
D | tree.c | 2119 long pending, tlimit = 0; in rcu_do_batch() local 2141 pending = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch() 2142 bl = max(rdp->blimit, pending >> rcu_divisor); in rcu_do_batch()
|