Home
last modified time | relevance | path

Searched refs:force (Results 1 – 18 of 18) sorted by relevance

/kernel/
Dreboot.c459 static int __orderly_poweroff(bool force) in __orderly_poweroff() argument
465 if (ret && force) { in __orderly_poweroff()
496 void orderly_poweroff(bool force) in orderly_poweroff() argument
498 if (force) /* do not override the pending "true" */ in orderly_poweroff()
Dsignal.c79 static bool sig_task_ignored(struct task_struct *t, int sig, bool force) in sig_task_ignored() argument
90 handler == SIG_DFL && !(force && sig_kernel_only(sig))) in sig_task_ignored()
95 (handler == SIG_KTHREAD_KERNEL) && !force)) in sig_task_ignored()
101 static bool sig_ignored(struct task_struct *t, int sig, bool force) in sig_ignored() argument
119 return sig_task_ignored(t, sig, force); in sig_ignored()
894 static bool prepare_signal(int sig, struct task_struct *p, bool force) in prepare_signal() argument
956 return !sig_ignored(p, sig, force); in prepare_signal()
1066 enum pid_type type, bool force) in __send_signal() argument
1076 if (!prepare_signal(sig, t, force)) in __send_signal()
1207 bool force = false; in send_signal() local
[all …]
Dkprobes.c657 static void unoptimize_kprobe(struct kprobe *p, bool force) in unoptimize_kprobe() argument
667 if (force && !list_empty(&op->list)) { in unoptimize_kprobe()
686 if (force) in unoptimize_kprobe()
Dcpu.c393 void __init cpu_smt_disable(bool force) in cpu_smt_disable() argument
398 if (force) { in cpu_smt_disable()
/kernel/time/
Dtick-internal.h57 ktime_t expires, bool force);
103 extern int tick_program_event(ktime_t expires, int force);
118 static inline int tick_program_event(ktime_t expires, int force) { return 0; } in tick_program_event() argument
Dtick-oneshot.c23 int tick_program_event(ktime_t expires, int force) in tick_program_event() argument
44 return clockevents_program_event(dev, expires, force); in tick_program_event()
Dclockevents.c304 bool force) in clockevents_program_event() argument
328 return force ? clockevents_program_min_delta(dev) : -ETIME; in clockevents_program_event()
336 return (rc && force) ? clockevents_program_min_delta(dev) : rc; in clockevents_program_event()
/kernel/irq/
Dchip.c194 __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) in __irq_startup_managed() argument
210 if (WARN_ON_ONCE(force)) in __irq_startup_managed()
230 __irq_startup_managed(struct irq_desc *desc, struct cpumask *aff, bool force) in __irq_startup_managed() argument
255 int irq_startup(struct irq_desc *desc, bool resend, bool force) in irq_startup() argument
266 switch (__irq_startup_managed(desc, aff, force)) { in irq_startup()
1438 const struct cpumask *dest, bool force) in irq_chip_set_affinity_parent() argument
1442 return data->chip->irq_set_affinity(data, dest, force); in irq_chip_set_affinity_parent()
Dmanage.c211 bool force) in irq_do_set_affinity() argument
220 ret = chip->irq_set_affinity(data, mask, force); in irq_do_set_affinity()
254 const struct cpumask *dest, bool force) in irq_try_set_affinity() argument
256 int ret = irq_do_set_affinity(data, dest, force); in irq_try_set_affinity()
263 if (ret == -EBUSY && !force) in irq_try_set_affinity()
269 bool force) in irq_set_affinity_locked() argument
279 ret = irq_try_set_affinity(data, mask, force); in irq_set_affinity_locked()
294 int __irq_set_affinity(unsigned int irq, const struct cpumask *mask, bool force) in __irq_set_affinity() argument
304 ret = irq_set_affinity_locked(irq_desc_get_irq_data(desc), mask, force); in __irq_set_affinity()
Dspurious.c61 static int try_one_irq(struct irq_desc *desc, bool force) in try_one_irq() argument
81 if (irqd_irq_disabled(&desc->irq_data) && !force) in try_one_irq()
Dinternals.h82 extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
136 const struct cpumask *dest, bool force);
Dmsi.c102 const struct cpumask *mask, bool force) in msi_domain_set_affinity() argument
108 ret = parent->chip->irq_set_affinity(parent, mask, force); in msi_domain_set_affinity()
/kernel/bpf/
Dbpf_lru_list.c378 bool force = false; in __local_list_pop_pending() local
384 if ((!bpf_lru_node_is_ref(node) || force) && in __local_list_pop_pending()
391 if (!force) { in __local_list_pop_pending()
392 force = true; in __local_list_pop_pending()
/kernel/rcu/
DKconfig207 to force the kthreads to run on whatever set of CPUs is desired.
Dtree_plugin.h1641 static void wake_nocb_gp(struct rcu_data *rdp, bool force, in wake_nocb_gp() argument
1658 if (force || READ_ONCE(rdp_gp->nocb_gp_sleep)) { in wake_nocb_gp()
/kernel/livepatch/
Dcore.c404 static struct kobj_attribute force_kobj_attr = __ATTR_WO(force);
/kernel/sched/
Dfair.c825 static void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) in update_tg_load_avg() argument
3149 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) in update_tg_load_avg() argument
3159 if (force || abs(delta) > cfs_rq->tg_load_avg_contrib / 64) { in update_tg_load_avg()
3442 static inline void update_tg_load_avg(struct cfs_rq *cfs_rq, int force) {} in update_tg_load_avg() argument
8098 static bool update_nohz_stats(struct rq *rq, bool force) in update_nohz_stats() argument
8109 if (!force && !time_after(jiffies, rq->last_blocked_load_update_tick)) in update_nohz_stats()
/kernel/locking/
Dlockdep.c1176 register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force) in register_lock_class() argument
1261 if (!subclass || force) in register_lock_class()