Home
last modified time | relevance | path

Searched refs:force (Results 1 – 20 of 20) sorted by relevance

/kernel/time/
Dtick-internal.h56 ktime_t expires, bool force);
103 extern int tick_program_event(ktime_t expires, int force);
118 static inline int tick_program_event(ktime_t expires, int force) { return 0; } in tick_program_event() argument
Dtick-oneshot.c23 int tick_program_event(ktime_t expires, int force) in tick_program_event() argument
44 return clockevents_program_event(dev, expires, force); in tick_program_event()
Dclockevents.c304 bool force) in clockevents_program_event() argument
328 return force ? clockevents_program_min_delta(dev) : -ETIME; in clockevents_program_event()
336 return (rc && force) ? clockevents_program_min_delta(dev) : rc; in clockevents_program_event()
/kernel/
Dreboot.c851 static int __orderly_poweroff(bool force) in __orderly_poweroff() argument
857 if (ret && force) { in __orderly_poweroff()
888 void orderly_poweroff(bool force) in orderly_poweroff() argument
890 if (force) /* do not override the pending "true" */ in orderly_poweroff()
1164 static struct kobj_attribute reboot_force_attr = __ATTR_RW(force);
Dsignal.c85 static bool sig_task_ignored(struct task_struct *t, int sig, bool force) in sig_task_ignored() argument
96 handler == SIG_DFL && !(force && sig_kernel_only(sig))) in sig_task_ignored()
101 (handler == SIG_KTHREAD_KERNEL) && !force)) in sig_task_ignored()
107 static bool sig_ignored(struct task_struct *t, int sig, bool force) in sig_ignored() argument
125 return sig_task_ignored(t, sig, force); in sig_ignored()
916 static bool prepare_signal(int sig, struct task_struct *p, bool force) in prepare_signal() argument
980 return !sig_ignored(p, sig, force); in prepare_signal()
1094 struct task_struct *t, enum pid_type type, bool force) in __send_signal_locked() argument
1104 if (!prepare_signal(sig, t, force)) in __send_signal_locked()
1238 bool force = false; in send_signal_locked() local
[all …]
Dkprobes.c725 static void unoptimize_kprobe(struct kprobe *p, bool force) in unoptimize_kprobe() argument
739 if (force) { in unoptimize_kprobe()
756 if (force) { in unoptimize_kprobe()
Dcpu.c600 void __init cpu_smt_disable(bool force) in cpu_smt_disable() argument
605 if (force) { in cpu_smt_disable()
/kernel/irq/
Dmanage.c218 bool force) in irq_do_set_affinity() argument
272 if (!force && !cpumask_empty(&tmp_mask)) in irq_do_set_affinity()
273 ret = chip->irq_set_affinity(data, &tmp_mask, force); in irq_do_set_affinity()
274 else if (force) in irq_do_set_affinity()
275 ret = chip->irq_set_affinity(data, mask, force); in irq_do_set_affinity()
315 const struct cpumask *dest, bool force) in irq_try_set_affinity() argument
317 int ret = irq_do_set_affinity(data, dest, force); in irq_try_set_affinity()
324 if (ret == -EBUSY && !force) in irq_try_set_affinity()
354 bool force) in irq_set_affinity_locked() argument
367 ret = irq_try_set_affinity(data, mask, force); in irq_set_affinity_locked()
[all …]
Dchip.c193 bool force) in __irq_startup_managed() argument
209 if (WARN_ON_ONCE(force)) in __irq_startup_managed()
230 bool force) in __irq_startup_managed() argument
255 int irq_startup(struct irq_desc *desc, bool resend, bool force) in irq_startup() argument
266 switch (__irq_startup_managed(desc, aff, force)) { in irq_startup()
1457 const struct cpumask *dest, bool force) in irq_chip_set_affinity_parent() argument
1461 return data->chip->irq_set_affinity(data, dest, force); in irq_chip_set_affinity_parent()
Dspurious.c62 static int try_one_irq(struct irq_desc *desc, bool force) in try_one_irq() argument
82 if (irqd_irq_disabled(&desc->irq_data) && !force) in try_one_irq()
Dinternals.h89 extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
143 const struct cpumask *dest, bool force);
Dmsi.c648 const struct cpumask *mask, bool force) in msi_domain_set_affinity() argument
654 ret = parent->chip->irq_set_affinity(parent, mask, force); in msi_domain_set_affinity()
/kernel/bpf/
Dbpf_lru_list.c383 bool force = false; in __local_list_pop_pending() local
389 if ((!bpf_lru_node_is_ref(node) || force) && in __local_list_pop_pending()
396 if (!force) { in __local_list_pop_pending()
397 force = true; in __local_list_pop_pending()
/kernel/rcu/
Dtree_nocb.h217 bool force, unsigned long flags) in __wake_nocb_gp() argument
234 if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) { in __wake_nocb_gp()
253 static bool wake_nocb_gp(struct rcu_data *rdp, bool force) in wake_nocb_gp() argument
259 return __wake_nocb_gp(rdp_gp, rdp, force, flags); in wake_nocb_gp()
1762 static bool wake_nocb_gp(struct rcu_data *rdp, bool force) in wake_nocb_gp() argument
DKconfig83 This option force-enables a task-based RCU implementation
99 This option force-enables a task-based RCU implementation
119 CPU hotplug code paths. It can force IPIs on online CPUs,
256 be used to force the kthreads to run on whatever set of CPUs is
Dtree.h467 static bool wake_nocb_gp(struct rcu_data *rdp, bool force);
/kernel/module/
DKconfig115 Enabling this will force a full stack trace for duplicate module
128 --force). Forced module loading sets the 'F' (forced) taint flag and
143 This option allows you to force a module to unload, even if the
/kernel/sched/
Dpsi.c619 bool force) in psi_schedule_rtpoll_work() argument
627 if (atomic_xchg(&group->rtpoll_scheduled, 1) && !force) in psi_schedule_rtpoll_work()
/kernel/livepatch/
Dcore.c448 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
/kernel/locking/
Dlockdep.c1275 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) in register_lock_class() argument
1367 if (!subclass || force) in register_lock_class()