Home
last modified time | relevance | path

Searched refs:d (Results 1 – 25 of 26) sorted by relevance

12

/kernel/irq/
Dgeneric-chip.c25 void irq_gc_noop(struct irq_data *d) in irq_gc_noop() argument
36 void irq_gc_mask_disable_reg(struct irq_data *d) in irq_gc_mask_disable_reg() argument
38 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); in irq_gc_mask_disable_reg()
39 struct irq_chip_type *ct = irq_data_get_chip_type(d); in irq_gc_mask_disable_reg()
40 u32 mask = d->mask; in irq_gc_mask_disable_reg()
55 void irq_gc_mask_set_bit(struct irq_data *d) in irq_gc_mask_set_bit() argument
57 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); in irq_gc_mask_set_bit()
58 struct irq_chip_type *ct = irq_data_get_chip_type(d); in irq_gc_mask_set_bit()
59 u32 mask = d->mask; in irq_gc_mask_set_bit()
75 void irq_gc_mask_clr_bit(struct irq_data *d) in irq_gc_mask_clr_bit() argument
[all …]
Dcpuhotplug.c19 static inline bool irq_needs_fixup(struct irq_data *d) in irq_needs_fixup() argument
21 const struct cpumask *m = irq_data_get_effective_affinity_mask(d); in irq_needs_fixup()
31 m = irq_data_get_affinity_mask(d); in irq_needs_fixup()
45 cpumask_pr_args(m), d->irq, cpu); in irq_needs_fixup()
54 struct irq_data *d = irq_desc_get_irq_data(desc); in migrate_one_irq() local
55 struct irq_chip *chip = irq_data_get_irq_chip(d); in migrate_one_irq()
56 bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d); in migrate_one_irq()
67 pr_debug("IRQ %u: Unable to migrate away\n", d->irq); in migrate_one_irq()
80 if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) { in migrate_one_irq()
106 affinity = irq_data_get_affinity_mask(d); in migrate_one_irq()
[all …]
Dirqdomain.c38 static void debugfs_add_domain_dir(struct irq_domain *d);
39 static void debugfs_remove_domain_dir(struct irq_domain *d);
41 static inline void debugfs_add_domain_dir(struct irq_domain *d) { } in debugfs_add_domain_dir() argument
42 static inline void debugfs_remove_domain_dir(struct irq_domain *d) { } in debugfs_remove_domain_dir() argument
730 static int irq_domain_translate(struct irq_domain *d, in irq_domain_translate() argument
735 if (d->ops->translate) in irq_domain_translate()
736 return d->ops->translate(d, fwspec, hwirq, type); in irq_domain_translate()
738 if (d->ops->xlate) in irq_domain_translate()
739 return d->ops->xlate(d, to_of_node(fwspec->fwnode), in irq_domain_translate()
927 int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, in irq_domain_xlate_onecell() argument
[all …]
Dinternals.h195 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) argument
197 static inline unsigned int irqd_get(struct irq_data *d) in irqd_get() argument
199 return __irqd_to_state(d); in irqd_get()
205 static inline void irqd_set_move_pending(struct irq_data *d) in irqd_set_move_pending() argument
207 __irqd_to_state(d) |= IRQD_SETAFFINITY_PENDING; in irqd_set_move_pending()
210 static inline void irqd_clr_move_pending(struct irq_data *d) in irqd_clr_move_pending() argument
212 __irqd_to_state(d) &= ~IRQD_SETAFFINITY_PENDING; in irqd_clr_move_pending()
215 static inline void irqd_set_managed_shutdown(struct irq_data *d) in irqd_set_managed_shutdown() argument
217 __irqd_to_state(d) |= IRQD_MANAGED_SHUTDOWN; in irqd_set_managed_shutdown()
220 static inline void irqd_clr_managed_shutdown(struct irq_data *d) in irqd_clr_managed_shutdown() argument
[all …]
Dchip.c196 struct irq_data *d = irq_desc_get_irq_data(desc); in __irq_startup_managed() local
198 if (!irqd_affinity_is_managed(d)) in __irq_startup_managed()
201 irqd_clr_managed_shutdown(d); in __irq_startup_managed()
224 if (WARN_ON(irq_domain_activate_irq(d, false))) in __irq_startup_managed()
238 struct irq_data *d = irq_desc_get_irq_data(desc); in __irq_startup() local
242 WARN_ON_ONCE(!irqd_is_activated(d)); in __irq_startup()
244 if (d->chip->irq_startup) { in __irq_startup()
245 ret = d->chip->irq_startup(d); in __irq_startup()
257 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_startup() local
258 struct cpumask *aff = irq_data_get_affinity_mask(d); in irq_startup()
[all …]
Dmanage.c1183 struct irq_data *d = &desc->irq_data; in irq_request_resources() local
1184 struct irq_chip *c = d->chip; in irq_request_resources()
1186 return c->irq_request_resources ? c->irq_request_resources(d) : 0; in irq_request_resources()
1191 struct irq_data *d = &desc->irq_data; in irq_release_resources() local
1192 struct irq_chip *c = d->chip; in irq_release_resources()
1195 c->irq_release_resources(d); in irq_release_resources()
1200 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_supports_nmi() local
1204 if (d->parent_data) in irq_supports_nmi()
1208 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock) in irq_supports_nmi()
1211 return d->chip->flags & IRQCHIP_SUPPORTS_NMI; in irq_supports_nmi()
[all …]
/kernel/
Ddelayacct.c85 int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) in __delayacct_add_tsk() argument
93 tmp = (s64)d->cpu_run_real_total; in __delayacct_add_tsk()
95 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; in __delayacct_add_tsk()
98 tmp = (s64)d->cpu_scaled_run_real_total; in __delayacct_add_tsk()
100 d->cpu_scaled_run_real_total = in __delayacct_add_tsk()
101 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp; in __delayacct_add_tsk()
111 d->cpu_count += t1; in __delayacct_add_tsk()
113 tmp = (s64)d->cpu_delay_total + t2; in __delayacct_add_tsk()
114 d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp; in __delayacct_add_tsk()
116 tmp = (s64)d->cpu_run_virtual_total + t3; in __delayacct_add_tsk()
[all …]
Daudit_watch.c351 struct dentry *d = kern_path_locked(watch->path, parent); in audit_get_nd() local
352 if (IS_ERR(d)) in audit_get_nd()
353 return PTR_ERR(d); in audit_get_nd()
354 if (d_is_positive(d)) { in audit_get_nd()
356 watch->dev = d->d_sb->s_dev; in audit_get_nd()
357 watch->ino = d_backing_inode(d)->i_ino; in audit_get_nd()
360 dput(d); in audit_get_nd()
Dauditsc.c110 struct audit_aux_data d; member
121 struct audit_aux_data d; member
1768 const struct dentry *d, *parent; in handle_path() local
1778 d = dentry; in handle_path()
1782 struct inode *inode = d_backing_inode(d); in handle_path()
1793 parent = d->d_parent; in handle_path()
1794 if (parent == d) in handle_path()
1796 d = parent; in handle_path()
2413 axp->d.type = AUDIT_OBJ_PID; in audit_signal_info_syscall()
2414 axp->d.next = ctx->aux_pids; in audit_signal_info_syscall()
[all …]
Daudit.h301 #define audit_watch_compare(w, i, d) 0 argument
307 #define audit_mark_compare(m, i, d) 0 argument
/kernel/power/
Dswap.c593 struct crc_data *d = data; in crc32_threadfn() local
597 wait_event(d->go, atomic_read(&d->ready) || in crc32_threadfn()
600 d->thr = NULL; in crc32_threadfn()
601 atomic_set(&d->stop, 1); in crc32_threadfn()
602 wake_up(&d->done); in crc32_threadfn()
605 atomic_set(&d->ready, 0); in crc32_threadfn()
607 for (i = 0; i < d->run_threads; i++) in crc32_threadfn()
608 *d->crc32 = crc32_le(*d->crc32, in crc32_threadfn()
609 d->unc[i], *d->unc_len[i]); in crc32_threadfn()
610 atomic_set(&d->stop, 1); in crc32_threadfn()
[all …]
Denergy_model.c32 struct dentry *d; in em_debug_create_cs() local
38 d = debugfs_create_dir(name, pd); in em_debug_create_cs()
39 debugfs_create_ulong("frequency", 0444, d, &cs->frequency); in em_debug_create_cs()
40 debugfs_create_ulong("power", 0444, d, &cs->power); in em_debug_create_cs()
41 debugfs_create_ulong("cost", 0444, d, &cs->cost); in em_debug_create_cs()
54 struct dentry *d; in em_debug_create_pd() local
61 d = debugfs_create_dir(name, rootdir); in em_debug_create_pd()
63 debugfs_create_file("cpus", 0444, d, pd->cpus, &em_debug_cpus_fops); in em_debug_create_pd()
67 em_debug_create_cs(&pd->table[i], d); in em_debug_create_pd()
Dqos.c532 static int register_pm_qos_misc(struct pm_qos_object *qos, struct dentry *d) in register_pm_qos_misc() argument
538 debugfs_create_file(qos->name, S_IRUGO, d, (void *)qos, in register_pm_qos_misc()
634 struct dentry *d; in pm_qos_power_init() local
638 d = debugfs_create_dir("pm_qos", NULL); in pm_qos_power_init()
641 ret = register_pm_qos_misc(pm_qos_array[i], d); in pm_qos_power_init()
/kernel/time/
Dtimeconst.bc16 define fmul(b,n,d) {
17 return (2^b*n+d-1)/d;
22 define fadj(b,n,d) {
24 d = d/gcd(n,d);
25 v = 2^b*(d-1)/d;
33 define fmuls(b,n,d) {
36 m = fmul(s,n,d);
Dtick-internal.h79 static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { } in tick_do_periodic_broadcast() argument
/kernel/trace/
Dtrace_events_filter_test.h12 TP_PROTO(int a, int b, int c, int d, int e, int f, int g, int h),
14 TP_ARGS(a, b, c, d, e, f, g, h),
20 __field(int, d)
31 __entry->d = d;
39 __entry->a, __entry->b, __entry->c, __entry->d,
Dtrace_events_filter.c2107 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \
2221 struct test_filter_data_t *d = &test_filter_data[i]; in ftrace_test_event_filter() local
2225 d->filter, false, &filter); in ftrace_test_event_filter()
2229 d->filter, err); in ftrace_test_event_filter()
2241 if (*d->not_visited) in ftrace_test_event_filter()
2242 update_pred_fn(filter, d->not_visited); in ftrace_test_event_filter()
2245 err = filter_match_preds(filter, &d->rec); in ftrace_test_event_filter()
2255 d->filter); in ftrace_test_event_filter()
2259 if (err != d->match) { in ftrace_test_event_filter()
2262 d->filter, d->match); in ftrace_test_event_filter()
Dtrace_output.c507 char trace_find_mark(unsigned long long d) in trace_find_mark() argument
513 if (d > mark[i].val) in trace_find_mark()
Dtrace.h1073 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } in ftrace_init_tracefs() argument
1074 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } in ftrace_init_tracefs_toplevel() argument
/kernel/sched/
Dtopology.c1223 static void __free_domain_allocs(struct s_data *d, enum s_alloc what, in __free_domain_allocs() argument
1228 if (!atomic_read(&d->rd->refcount)) in __free_domain_allocs()
1229 free_rootdomain(&d->rd->rcu); in __free_domain_allocs()
1232 free_percpu(d->sd); in __free_domain_allocs()
1243 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) in __visit_domain_allocation_hell() argument
1245 memset(d, 0, sizeof(*d)); in __visit_domain_allocation_hell()
1249 d->sd = alloc_percpu(struct sched_domain *); in __visit_domain_allocation_hell()
1250 if (!d->sd) in __visit_domain_allocation_hell()
1252 d->rd = alloc_rootdomain(); in __visit_domain_allocation_hell()
1253 if (!d->rd) in __visit_domain_allocation_hell()
[all …]
Drt.c2482 struct rt_schedulable_data *d = data; in tg_rt_schedulable() local
2490 if (tg == d->tg) { in tg_rt_schedulable()
2491 period = d->rt_period; in tg_rt_schedulable()
2492 runtime = d->rt_runtime; in tg_rt_schedulable()
2522 if (child == d->tg) { in tg_rt_schedulable()
2523 period = d->rt_period; in tg_rt_schedulable()
2524 runtime = d->rt_runtime; in tg_rt_schedulable()
Dcore.c7551 struct cfs_schedulable_data *d) in normalize_cfs_quota() argument
7555 if (tg == d->tg) { in normalize_cfs_quota()
7556 period = d->period; in normalize_cfs_quota()
7557 quota = d->quota; in normalize_cfs_quota()
7572 struct cfs_schedulable_data *d = data; in tg_cfs_schedulable_down() local
7581 quota = normalize_cfs_quota(tg, d); in tg_cfs_schedulable_down()
/kernel/events/
Duprobes.c374 __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) in __update_ref_ctr() argument
382 if (!vaddr || !d) in __update_ref_ctr()
398 if (unlikely(*ptr + d < 0)) { in __update_ref_ctr()
400 "curr val: %d, delta: %d\n", vaddr, *ptr, d); in __update_ref_ctr()
405 *ptr += d; in __update_ref_ctr()
414 struct mm_struct *mm, short d) in update_ref_ctr_warn() argument
418 d > 0 ? "increment" : "decrement", uprobe->inode->i_ino, in update_ref_ctr_warn()
424 short d) in update_ref_ctr() argument
434 ret = __update_ref_ctr(mm, rc_vaddr, d); in update_ref_ctr()
436 update_ref_ctr_warn(uprobe, mm, d); in update_ref_ctr()
[all …]
/kernel/locking/
Drtmutex.h22 #define debug_rt_mutex_deadlock(d, a ,l) do { } while (0) argument
/kernel/rcu/
Dtree.c3423 ulong d; in rcu_init_geometry() local
3434 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; in rcu_init_geometry()
3436 jiffies_till_first_fqs = d; in rcu_init_geometry()
3438 jiffies_till_next_fqs = d; in rcu_init_geometry()

12