Home
last modified time | relevance | path

Searched refs:d (Results 1 – 20 of 20) sorted by relevance

/kernel/irq/
Dgeneric-chip.c24 void irq_gc_noop(struct irq_data *d) in irq_gc_noop() argument
35 void irq_gc_mask_disable_reg(struct irq_data *d) in irq_gc_mask_disable_reg() argument
37 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); in irq_gc_mask_disable_reg()
38 struct irq_chip_type *ct = irq_data_get_chip_type(d); in irq_gc_mask_disable_reg()
39 u32 mask = d->mask; in irq_gc_mask_disable_reg()
54 void irq_gc_mask_set_bit(struct irq_data *d) in irq_gc_mask_set_bit() argument
56 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); in irq_gc_mask_set_bit()
57 struct irq_chip_type *ct = irq_data_get_chip_type(d); in irq_gc_mask_set_bit()
58 u32 mask = d->mask; in irq_gc_mask_set_bit()
74 void irq_gc_mask_clr_bit(struct irq_data *d) in irq_gc_mask_clr_bit() argument
[all …]
Dinternals.h171 static inline void irqd_set_move_pending(struct irq_data *d) in irqd_set_move_pending() argument
173 d->state_use_accessors |= IRQD_SETAFFINITY_PENDING; in irqd_set_move_pending()
176 static inline void irqd_clr_move_pending(struct irq_data *d) in irqd_clr_move_pending() argument
178 d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING; in irqd_clr_move_pending()
181 static inline void irqd_clear(struct irq_data *d, unsigned int mask) in irqd_clear() argument
183 d->state_use_accessors &= ~mask; in irqd_clear()
186 static inline void irqd_set(struct irq_data *d, unsigned int mask) in irqd_set() argument
188 d->state_use_accessors |= mask; in irqd_set()
191 static inline bool irqd_has_set(struct irq_data *d, unsigned int mask) in irqd_has_set() argument
193 return d->state_use_accessors & mask; in irqd_has_set()
Dmanage.c356 setup_affinity(unsigned int irq, struct irq_desc *d, struct cpumask *mask) in setup_affinity() argument
935 struct irq_data *d = &desc->irq_data; in irq_request_resources() local
936 struct irq_chip *c = d->chip; in irq_request_resources()
938 return c->irq_request_resources ? c->irq_request_resources(d) : 0; in irq_request_resources()
943 struct irq_data *d = &desc->irq_data; in irq_release_resources() local
944 struct irq_chip *c = d->chip; in irq_release_resources()
947 c->irq_release_resources(d); in irq_release_resources()
Dirqdomain.c653 int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, in irq_domain_xlate_onecell() argument
672 int irq_domain_xlate_twocell(struct irq_domain *d, struct device_node *ctrlr, in irq_domain_xlate_twocell() argument
695 int irq_domain_xlate_onetwocell(struct irq_domain *d, in irq_domain_xlate_onetwocell() argument
/kernel/
Ddelayacct.c83 int __delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) in __delayacct_add_tsk() argument
91 tmp = (s64)d->cpu_run_real_total; in __delayacct_add_tsk()
93 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; in __delayacct_add_tsk()
96 tmp = (s64)d->cpu_scaled_run_real_total; in __delayacct_add_tsk()
98 d->cpu_scaled_run_real_total = in __delayacct_add_tsk()
99 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp; in __delayacct_add_tsk()
109 d->cpu_count += t1; in __delayacct_add_tsk()
111 tmp = (s64)d->cpu_delay_total + t2; in __delayacct_add_tsk()
112 d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp; in __delayacct_add_tsk()
114 tmp = (s64)d->cpu_run_virtual_total + t3; in __delayacct_add_tsk()
[all …]
Daudit_watch.c361 struct dentry *d = kern_path_locked(watch->path, parent); in audit_get_nd() local
362 if (IS_ERR(d)) in audit_get_nd()
363 return PTR_ERR(d); in audit_get_nd()
365 if (d->d_inode) { in audit_get_nd()
367 watch->dev = d->d_inode->i_sb->s_dev; in audit_get_nd()
368 watch->ino = d->d_inode->i_ino; in audit_get_nd()
370 dput(d); in audit_get_nd()
Dauditsc.c111 struct audit_aux_data d; member
122 struct audit_aux_data d; member
1637 const struct dentry *d, *parent; in handle_path() local
1647 d = dentry; in handle_path()
1651 struct inode *inode = d->d_inode; in handle_path()
1662 parent = d->d_parent; in handle_path()
1663 if (parent == d) in handle_path()
1665 d = parent; in handle_path()
2365 axp->d.type = AUDIT_OBJ_PID; in __audit_signal_info()
2366 axp->d.next = ctx->aux_pids; in __audit_signal_info()
[all …]
Daudit.h289 #define audit_watch_compare(w, i, d) 0 argument
/kernel/power/
Dswap.c501 struct crc_data *d = data; in crc32_threadfn() local
505 wait_event(d->go, atomic_read(&d->ready) || in crc32_threadfn()
508 d->thr = NULL; in crc32_threadfn()
509 atomic_set(&d->stop, 1); in crc32_threadfn()
510 wake_up(&d->done); in crc32_threadfn()
513 atomic_set(&d->ready, 0); in crc32_threadfn()
515 for (i = 0; i < d->run_threads; i++) in crc32_threadfn()
516 *d->crc32 = crc32_le(*d->crc32, in crc32_threadfn()
517 d->unc[i], *d->unc_len[i]); in crc32_threadfn()
518 atomic_set(&d->stop, 1); in crc32_threadfn()
[all …]
Dwakeup_reason.c246 struct dentry *d; in suspend_time_debug_init() local
248 d = debugfs_create_file("suspend_time", 0755, NULL, NULL, in suspend_time_debug_init()
250 if (!d) { in suspend_time_debug_init()
/kernel/time/
Dtimeconst.bc14 define fmul(b,n,d) {
15 return (2^b*n+d-1)/d;
20 define fadj(b,n,d) {
22 d = d/gcd(n,d);
23 v = 2^b*(d-1)/d;
31 define fmuls(b,n,d) {
34 m = fmul(s,n,d);
Dtimekeeping_debug.c59 struct dentry *d; in tk_debug_sleep_time_init() local
61 d = debugfs_create_file("sleep_time", 0444, NULL, NULL, in tk_debug_sleep_time_init()
63 if (!d) { in tk_debug_sleep_time_init()
Dtick-internal.h140 static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { } in tick_do_periodic_broadcast() argument
/kernel/trace/
Dtrace_events_filter_test.h11 TP_PROTO(int a, int b, int c, int d, int e, int f, int g, int h),
13 TP_ARGS(a, b, c, d, e, f, g, h),
19 __field(int, d)
30 __entry->d = d;
38 __entry->a, __entry->b, __entry->c, __entry->d,
Dtrace_events_filter.c501 struct filter_match_preds_data *d = data; in filter_match_preds_cb() local
512 d->match = process_ops(d->preds, pred, d->rec); in filter_match_preds_cb()
515 d->match = pred->fn(pred, d->rec); in filter_match_preds_cb()
528 if (!!d->match == (pred->op == OP_OR)) in filter_match_preds_cb()
1421 struct check_pred_data *d = data; in check_pred_tree_cb() local
1423 if (WARN_ON(d->count++ > d->max)) { in check_pred_tree_cb()
1483 struct fold_pred_data *d = data; in fold_pred_cb() local
1484 struct filter_pred *root = d->root; in fold_pred_cb()
1491 if (WARN_ON(d->count == d->children)) { in fold_pred_cb()
1497 root->ops[d->count++] = pred->index; in fold_pred_cb()
[all …]
/kernel/sched/
Dcore.c6465 static void __free_domain_allocs(struct s_data *d, enum s_alloc what, in __free_domain_allocs() argument
6470 if (!atomic_read(&d->rd->refcount)) in __free_domain_allocs()
6471 free_rootdomain(&d->rd->rcu); /* fall through */ in __free_domain_allocs()
6473 free_percpu(d->sd); /* fall through */ in __free_domain_allocs()
6481 static enum s_alloc __visit_domain_allocation_hell(struct s_data *d, in __visit_domain_allocation_hell() argument
6484 memset(d, 0, sizeof(*d)); in __visit_domain_allocation_hell()
6488 d->sd = alloc_percpu(struct sched_domain *); in __visit_domain_allocation_hell()
6489 if (!d->sd) in __visit_domain_allocation_hell()
6491 d->rd = alloc_rootdomain(); in __visit_domain_allocation_hell()
6492 if (!d->rd) in __visit_domain_allocation_hell()
[all …]
Dfair.c2813 s64 d = se->vruntime - cfs_rq->min_vruntime; in check_spread() local
2815 if (d < 0) in check_spread()
2816 d = -d; in check_spread()
2818 if (d > 3*sysctl_sched_latency) in check_spread()
/kernel/locking/
Drtmutex.h23 #define debug_rt_mutex_deadlock(d, a ,l) do { } while (0) argument
/kernel/rcu/
Dtree.c3679 ulong d; in rcu_init_geometry() local
3692 d = RCU_JIFFIES_TILL_FORCE_QS + nr_cpu_ids / RCU_JIFFIES_FQS_DIV; in rcu_init_geometry()
3694 jiffies_till_first_fqs = d; in rcu_init_geometry()
3696 jiffies_till_next_fqs = d; in rcu_init_geometry()
Dtree_plugin.h2223 bool d; in rcu_nocb_wait_gp() local
2243 (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c))); in rcu_nocb_wait_gp()
2244 if (likely(d)) in rcu_nocb_wait_gp()