/kernel/time/ |
D | tick-internal.h | 56 ktime_t expires, bool force); 103 extern int tick_program_event(ktime_t expires, int force); 118 static inline int tick_program_event(ktime_t expires, int force) { return 0; } in tick_program_event() argument
|
D | tick-oneshot.c | 23 int tick_program_event(ktime_t expires, int force) in tick_program_event() argument 44 return clockevents_program_event(dev, expires, force); in tick_program_event()
|
D | clockevents.c | 304 bool force) in clockevents_program_event() argument 328 return force ? clockevents_program_min_delta(dev) : -ETIME; in clockevents_program_event() 336 return (rc && force) ? clockevents_program_min_delta(dev) : rc; in clockevents_program_event()
|
/kernel/irq/ |
D | manage.c | 213 bool force) in irq_do_set_affinity() argument 267 if (!force && !cpumask_empty(&tmp_mask)) in irq_do_set_affinity() 268 ret = chip->irq_set_affinity(data, &tmp_mask, force); in irq_do_set_affinity() 269 else if (force) in irq_do_set_affinity() 270 ret = chip->irq_set_affinity(data, mask, force); in irq_do_set_affinity() 310 const struct cpumask *dest, bool force) in irq_try_set_affinity() argument 312 int ret = irq_do_set_affinity(data, dest, force); in irq_try_set_affinity() 319 if (ret == -EBUSY && !force) in irq_try_set_affinity() 325 const struct cpumask *mask, bool force) in irq_set_affinity_deactivated() argument 349 bool force) in irq_set_affinity_locked() argument [all …]
|
D | chip.c | 193 bool force) in __irq_startup_managed() argument 209 if (WARN_ON_ONCE(force)) in __irq_startup_managed() 230 bool force) in __irq_startup_managed() argument 255 int irq_startup(struct irq_desc *desc, bool resend, bool force) in irq_startup() argument 266 switch (__irq_startup_managed(desc, aff, force)) { in irq_startup() 1445 const struct cpumask *dest, bool force) in irq_chip_set_affinity_parent() argument 1449 return data->chip->irq_set_affinity(data, dest, force); in irq_chip_set_affinity_parent()
|
D | spurious.c | 62 static int try_one_irq(struct irq_desc *desc, bool force) in try_one_irq() argument 82 if (irqd_irq_disabled(&desc->irq_data) && !force) in try_one_irq()
|
D | internals.h | 86 extern int irq_startup(struct irq_desc *desc, bool resend, bool force); 138 const struct cpumask *dest, bool force);
|
D | msi.c | 494 const struct cpumask *mask, bool force) in msi_domain_set_affinity() argument 500 ret = parent->chip->irq_set_affinity(parent, mask, force); in msi_domain_set_affinity()
|
/kernel/ |
D | reboot.c | 849 static int __orderly_poweroff(bool force) in __orderly_poweroff() argument 855 if (ret && force) { in __orderly_poweroff() 886 void orderly_poweroff(bool force) in orderly_poweroff() argument 888 if (force) /* do not override the pending "true" */ in orderly_poweroff() 1160 static struct kobj_attribute reboot_force_attr = __ATTR_RW(force);
|
D | signal.c | 83 static bool sig_task_ignored(struct task_struct *t, int sig, bool force) in sig_task_ignored() argument 94 handler == SIG_DFL && !(force && sig_kernel_only(sig))) in sig_task_ignored() 99 (handler == SIG_KTHREAD_KERNEL) && !force)) in sig_task_ignored() 105 static bool sig_ignored(struct task_struct *t, int sig, bool force) in sig_ignored() argument 123 return sig_task_ignored(t, sig, force); in sig_ignored() 914 static bool prepare_signal(int sig, struct task_struct *p, bool force) in prepare_signal() argument 978 return !sig_ignored(p, sig, force); in prepare_signal() 1093 struct task_struct *t, enum pid_type type, bool force) in __send_signal_locked() argument 1103 if (!prepare_signal(sig, t, force)) in __send_signal_locked() 1237 bool force = false; in send_signal_locked() local [all …]
|
D | kprobes.c | 725 static void unoptimize_kprobe(struct kprobe *p, bool force) in unoptimize_kprobe() argument 739 if (force) { in unoptimize_kprobe() 756 if (force) { in unoptimize_kprobe()
|
D | cpu.c | 418 void __init cpu_smt_disable(bool force) in cpu_smt_disable() argument 423 if (force) { in cpu_smt_disable()
|
/kernel/bpf/ |
D | bpf_lru_list.c | 383 bool force = false; in __local_list_pop_pending() local 389 if ((!bpf_lru_node_is_ref(node) || force) && in __local_list_pop_pending() 396 if (!force) { in __local_list_pop_pending() 397 force = true; in __local_list_pop_pending()
|
/kernel/rcu/ |
D | tree_nocb.h | 217 bool force, unsigned long flags) in __wake_nocb_gp() argument 234 if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) { in __wake_nocb_gp() 250 static bool wake_nocb_gp(struct rcu_data *rdp, bool force) in wake_nocb_gp() argument 256 return __wake_nocb_gp(rdp_gp, rdp, force, flags); in wake_nocb_gp() 1725 static bool wake_nocb_gp(struct rcu_data *rdp, bool force) in wake_nocb_gp() argument
|
D | Kconfig | 88 This option force-enables a task-based RCU implementation 104 This option force-enables a task-based RCU implementation 124 CPU hotplug code paths. It can force IPIs on online CPUs, 261 be used to force the kthreads to run on whatever set of CPUs is
|
D | tree.h | 448 static bool wake_nocb_gp(struct rcu_data *rdp, bool force);
|
/kernel/module/ |
D | Kconfig | 30 --force). Forced module loading sets the 'F' (forced) taint flag and 45 This option allows you to force a module to unload, even if the
|
/kernel/sched/ |
D | psi.c | 571 bool force) in psi_schedule_poll_work() argument 579 if (atomic_xchg(&group->poll_scheduled, 1) && !force) in psi_schedule_poll_work()
|
/kernel/livepatch/ |
D | core.c | 426 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
|
/kernel/cgroup/ |
D | cpuset.c | 1598 bool force) in update_cpumasks_hier() argument 1641 if (!cp->partition_root_state && !force && in update_cpumasks_hier()
|
/kernel/locking/ |
D | lockdep.c | 1273 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) in register_lock_class() argument 1365 if (!subclass || force) in register_lock_class()
|