Home
last modified time | relevance | path

Searched refs:pending (Results 1 – 15 of 15) sorted by relevance

/kernel/irq/
Dirq_sim.c17 unsigned long *pending; member
65 *state = test_bit(hwirq, irq_ctx->work_ctx->pending); in irq_sim_get_irqchip_state()
83 assign_bit(hwirq, irq_ctx->work_ctx->pending, state); in irq_sim_set_irqchip_state()
112 while (!bitmap_empty(work_ctx->pending, work_ctx->irq_count)) { in irq_sim_handle_irq()
113 offset = find_next_bit(work_ctx->pending, in irq_sim_handle_irq()
115 clear_bit(offset, work_ctx->pending); in irq_sim_handle_irq()
177 work_ctx->pending = bitmap_zalloc(num_irqs, GFP_KERNEL); in irq_domain_create_sim()
178 if (!work_ctx->pending) in irq_domain_create_sim()
193 bitmap_free(work_ctx->pending); in irq_domain_create_sim()
212 bitmap_free(work_ctx->pending); in irq_domain_remove_sim()
DKconfig50 # Tasklet based software resend for pending interrupts on enable_irq()
/kernel/
Dsoftirq.c250 #define softirq_deferred_for_rt(pending) \ argument
254 deferred = pending & LONG_SOFTIRQ_MASK; \
255 pending &= ~LONG_SOFTIRQ_MASK; \
268 __u32 pending; in __do_softirq() local
278 pending = local_softirq_pending(); in __do_softirq()
279 deferred = softirq_deferred_for_rt(pending); in __do_softirq()
287 __this_cpu_write(active_softirqs, pending); in __do_softirq()
293 while ((softirq_bit = ffs(pending))) { in __do_softirq()
314 pending >>= softirq_bit; in __do_softirq()
322 pending = local_softirq_pending(); in __do_softirq()
[all …]
Dasync.c90 if (!list_empty(&domain->pending)) in lowest_in_progress()
91 first = list_first_entry(&domain->pending, in lowest_in_progress()
168 list_add_tail(&entry->domain_list, &domain->pending); in __async_schedule_node_domain()
298 WARN_ON(!domain->registered || !list_empty(&domain->pending)); in async_unregister_domain()
Dsignal.c160 PENDING(&t->pending, &t->blocked) || in recalc_sigpending_tsk()
211 int next_signal(struct sigpending *pending, sigset_t *mask) in next_signal() argument
216 s = pending->signal.sig; in next_signal()
485 flush_sigqueue(&t->pending); in flush_signals()
492 static void __flush_itimer_signals(struct sigpending *pending) in __flush_itimer_signals() argument
497 signal = pending->signal; in __flush_itimer_signals()
500 list_for_each_entry_safe(q, n, &pending->list, list) { in __flush_itimer_signals()
512 sigorsets(&pending->signal, &signal, &retain); in __flush_itimer_signals()
521 __flush_itimer_signals(&tsk->pending); in flush_itimer_signals()
616 static int __dequeue_signal(struct sigpending *pending, sigset_t *mask, in __dequeue_signal() argument
[all …]
Dptrace.c738 struct sigpending *pending; in ptrace_peek_siginfo() local
758 pending = &child->signal->shared_pending; in ptrace_peek_siginfo()
760 pending = &child->pending; in ptrace_peek_siginfo()
768 list_for_each_entry(q, &pending->list, list) { in ptrace_peek_siginfo()
Dexit.c204 flush_sigqueue(&tsk->pending); in __exit_signal()
Dfork.c2072 init_sigpending(&p->pending); in copy_process()
/kernel/futex/
Dcore.c3565 struct robust_list __user *entry, *next_entry, *pending; in exit_robust_list() local
3589 if (fetch_robust_entry(&pending, &head->list_op_pending, &pip)) in exit_robust_list()
3603 if (entry != pending) { in exit_robust_list()
3621 if (pending) { in exit_robust_list()
3622 handle_futex_death((void __user *)pending + futex_offset, in exit_robust_list()
3867 struct robust_list __user *entry, *next_entry, *pending; in compat_exit_robust_list() local
3892 if (compat_fetch_robust_entry(&upending, &pending, in compat_exit_robust_list()
3908 if (entry != pending) { in compat_exit_robust_list()
3928 if (pending) { in compat_exit_robust_list()
3929 void __user *uaddr = futex_uaddr(pending, futex_offset); in compat_exit_robust_list()
/kernel/locking/
Dqspinlock.c150 WRITE_ONCE(lock->pending, 0); in clear_pending()
Dqspinlock_paravirt.h111 WRITE_ONCE(lock->pending, 1); in set_pending()
/kernel/printk/
Dprintk.c3064 int pending = __this_cpu_xchg(printk_pending, 0); in wake_up_klogd_work_func() local
3066 if (pending & PRINTK_PENDING_OUTPUT) { in wake_up_klogd_work_func()
3072 if (pending & PRINTK_PENDING_WAKEUP) in wake_up_klogd_work_func()
/kernel/events/
Dring_buffer.c25 irq_work_queue(&handle->event->pending); in perf_output_wakeup()
Dcore.c2489 irq_work_queue(&event->pending); in perf_event_disable_inatomic()
4884 irq_work_sync(&event->pending); in _free_event()
6423 irq_work_queue_on(&event->pending, cpu); in perf_pending_event_disable()
6428 struct perf_event *event = container_of(entry, struct perf_event, pending); in perf_pending_event()
8994 irq_work_queue(&event->pending); in __perf_event_overflow()
11309 init_irq_work(&event->pending, perf_pending_event); in perf_event_alloc()
/kernel/rcu/
Dtree.c2443 long pending, tlimit = 0; in rcu_do_batch() local
2464 pending = rcu_segcblist_n_cbs(&rdp->cblist); in rcu_do_batch()
2467 bl = max(rdp->blimit, pending >> div); in rcu_do_batch()