/kernel/bpf/ |
D | dispatcher.c | 25 struct bpf_dispatcher *d, struct bpf_prog *prog) in bpf_dispatcher_find_prog() argument 30 if (prog == d->progs[i].prog) in bpf_dispatcher_find_prog() 31 return &d->progs[i]; in bpf_dispatcher_find_prog() 37 struct bpf_dispatcher *d) in bpf_dispatcher_find_free() argument 39 return bpf_dispatcher_find_prog(d, NULL); in bpf_dispatcher_find_free() 42 static bool bpf_dispatcher_add_prog(struct bpf_dispatcher *d, in bpf_dispatcher_add_prog() argument 50 entry = bpf_dispatcher_find_prog(d, prog); in bpf_dispatcher_add_prog() 56 entry = bpf_dispatcher_find_free(d); in bpf_dispatcher_add_prog() 63 d->num_progs++; in bpf_dispatcher_add_prog() 67 static bool bpf_dispatcher_remove_prog(struct bpf_dispatcher *d, in bpf_dispatcher_remove_prog() argument [all …]
|
/kernel/irq/ |
D | generic-chip.c | 25 void irq_gc_noop(struct irq_data *d) in irq_gc_noop() argument 36 void irq_gc_mask_disable_reg(struct irq_data *d) in irq_gc_mask_disable_reg() argument 38 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); in irq_gc_mask_disable_reg() 39 struct irq_chip_type *ct = irq_data_get_chip_type(d); in irq_gc_mask_disable_reg() 40 u32 mask = d->mask; in irq_gc_mask_disable_reg() 55 void irq_gc_mask_set_bit(struct irq_data *d) in irq_gc_mask_set_bit() argument 57 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); in irq_gc_mask_set_bit() 58 struct irq_chip_type *ct = irq_data_get_chip_type(d); in irq_gc_mask_set_bit() 59 u32 mask = d->mask; in irq_gc_mask_set_bit() 75 void irq_gc_mask_clr_bit(struct irq_data *d) in irq_gc_mask_clr_bit() argument [all …]
|
D | cpuhotplug.c | 20 static inline bool irq_needs_fixup(struct irq_data *d) in irq_needs_fixup() argument 22 const struct cpumask *m = irq_data_get_effective_affinity_mask(d); in irq_needs_fixup() 32 m = irq_data_get_affinity_mask(d); in irq_needs_fixup() 46 cpumask_pr_args(m), d->irq, cpu); in irq_needs_fixup() 55 struct irq_data *d = irq_desc_get_irq_data(desc); in migrate_one_irq() local 56 struct irq_chip *chip = irq_data_get_irq_chip(d); in migrate_one_irq() 57 bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d); in migrate_one_irq() 68 pr_debug("IRQ %u: Unable to migrate away\n", d->irq); in migrate_one_irq() 81 if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) { in migrate_one_irq() 107 affinity = irq_data_get_affinity_mask(d); in migrate_one_irq() [all …]
|
D | internals.h | 197 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) argument 199 static inline unsigned int irqd_get(struct irq_data *d) in irqd_get() argument 201 return __irqd_to_state(d); in irqd_get() 207 static inline void irqd_set_move_pending(struct irq_data *d) in irqd_set_move_pending() argument 209 __irqd_to_state(d) |= IRQD_SETAFFINITY_PENDING; in irqd_set_move_pending() 212 static inline void irqd_clr_move_pending(struct irq_data *d) in irqd_clr_move_pending() argument 214 __irqd_to_state(d) &= ~IRQD_SETAFFINITY_PENDING; in irqd_clr_move_pending() 217 static inline void irqd_set_managed_shutdown(struct irq_data *d) in irqd_set_managed_shutdown() argument 219 __irqd_to_state(d) |= IRQD_MANAGED_SHUTDOWN; in irqd_set_managed_shutdown() 222 static inline void irqd_clr_managed_shutdown(struct irq_data *d) in irqd_clr_managed_shutdown() argument [all …]
|
D | irqdomain.c | 41 static void debugfs_add_domain_dir(struct irq_domain *d); 42 static void debugfs_remove_domain_dir(struct irq_domain *d); 44 static inline void debugfs_add_domain_dir(struct irq_domain *d) { } in debugfs_add_domain_dir() argument 45 static inline void debugfs_remove_domain_dir(struct irq_domain *d) { } in debugfs_remove_domain_dir() argument 779 static int irq_domain_translate(struct irq_domain *d, in irq_domain_translate() argument 784 if (d->ops->translate) in irq_domain_translate() 785 return d->ops->translate(d, fwspec, hwirq, type); in irq_domain_translate() 787 if (d->ops->xlate) in irq_domain_translate() 788 return d->ops->xlate(d, to_of_node(fwspec->fwnode), in irq_domain_translate() 1001 int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, in irq_domain_xlate_onecell() argument [all …]
|
D | chip.c | 197 struct irq_data *d = irq_desc_get_irq_data(desc); in __irq_startup_managed() local 199 if (!irqd_affinity_is_managed(d)) in __irq_startup_managed() 202 irqd_clr_managed_shutdown(d); in __irq_startup_managed() 225 if (WARN_ON(irq_domain_activate_irq(d, false))) in __irq_startup_managed() 239 struct irq_data *d = irq_desc_get_irq_data(desc); in __irq_startup() local 243 WARN_ON_ONCE(!irqd_is_activated(d)); in __irq_startup() 245 if (d->chip->irq_startup) { in __irq_startup() 246 ret = d->chip->irq_startup(d); in __irq_startup() 258 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_startup() local 259 struct cpumask *aff = irq_data_get_affinity_mask(d); in irq_startup() [all …]
|
D | manage.c | 1406 struct irq_data *d = &desc->irq_data; in irq_request_resources() local 1407 struct irq_chip *c = d->chip; in irq_request_resources() 1409 return c->irq_request_resources ? c->irq_request_resources(d) : 0; in irq_request_resources() 1414 struct irq_data *d = &desc->irq_data; in irq_release_resources() local 1415 struct irq_chip *c = d->chip; in irq_release_resources() 1418 c->irq_release_resources(d); in irq_release_resources() 1423 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_supports_nmi() local 1427 if (d->parent_data) in irq_supports_nmi() 1431 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock) in irq_supports_nmi() 1434 return d->chip->flags & IRQCHIP_SUPPORTS_NMI; in irq_supports_nmi() [all …]
|
/kernel/ |
D | delayacct.c | 120 int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) in delayacct_add_tsk() argument 128 tmp = (s64)d->cpu_run_real_total; in delayacct_add_tsk() 130 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; in delayacct_add_tsk() 133 tmp = (s64)d->cpu_scaled_run_real_total; in delayacct_add_tsk() 135 d->cpu_scaled_run_real_total = in delayacct_add_tsk() 136 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp; in delayacct_add_tsk() 146 d->cpu_count += t1; in delayacct_add_tsk() 148 tmp = (s64)d->cpu_delay_total + t2; in delayacct_add_tsk() 149 d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp; in delayacct_add_tsk() 151 tmp = (s64)d->cpu_run_virtual_total + t3; in delayacct_add_tsk() [all …]
|
D | audit_watch.c | 349 struct dentry *d = kern_path_locked(watch->path, parent); in audit_get_nd() local 350 if (IS_ERR(d)) in audit_get_nd() 351 return PTR_ERR(d); in audit_get_nd() 352 if (d_is_positive(d)) { in audit_get_nd() 354 watch->dev = d->d_sb->s_dev; in audit_get_nd() 355 watch->ino = d_backing_inode(d)->i_ino; in audit_get_nd() 358 dput(d); in audit_get_nd()
|
D | gen_kheaders.sh | 28 for d in $dir_list; do 29 all_dirs="$all_dirs $srctree/$d"
|
D | auditsc.c | 109 struct audit_aux_data d; member 120 struct audit_aux_data d; member 1858 const struct dentry *d, *parent; in handle_path() local 1868 d = dentry; in handle_path() 1872 struct inode *inode = d_backing_inode(d); in handle_path() 1885 parent = d->d_parent; in handle_path() 1886 if (parent == d) in handle_path() 1888 d = parent; in handle_path() 2510 axp->d.type = AUDIT_OBJ_PID; in audit_signal_info_syscall() 2511 axp->d.next = ctx->aux_pids; in audit_signal_info_syscall() [all …]
|
D | audit.h | 308 #define audit_watch_compare(w, i, d) 0 argument 314 #define audit_mark_compare(m, i, d) 0 argument
|
D | smp.c | 190 #define CFD_SEQ(s, d, t, c) \ argument 191 (union cfd_seq_cnt){ .u.src = s, .u.dst = d, .u.type = t, .u.cnt = c }
|
/kernel/power/ |
D | swap.c | 610 struct crc_data *d = data; in crc32_threadfn() local 614 wait_event(d->go, atomic_read_acquire(&d->ready) || in crc32_threadfn() 617 d->thr = NULL; in crc32_threadfn() 618 atomic_set_release(&d->stop, 1); in crc32_threadfn() 619 wake_up(&d->done); in crc32_threadfn() 622 atomic_set(&d->ready, 0); in crc32_threadfn() 624 for (i = 0; i < d->run_threads; i++) in crc32_threadfn() 625 *d->crc32 = crc32_le(*d->crc32, in crc32_threadfn() 626 d->unc[i], *d->unc_len[i]); in crc32_threadfn() 627 atomic_set_release(&d->stop, 1); in crc32_threadfn() [all …]
|
D | energy_model.c | 36 struct dentry *d; in em_debug_create_ps() local 42 d = debugfs_create_dir(name, pd); in em_debug_create_ps() 43 debugfs_create_ulong("frequency", 0444, d, &ps->frequency); in em_debug_create_ps() 44 debugfs_create_ulong("power", 0444, d, &ps->power); in em_debug_create_ps() 45 debugfs_create_ulong("cost", 0444, d, &ps->cost); in em_debug_create_ps() 69 struct dentry *d; in em_debug_create_pd() local 73 d = debugfs_create_dir(dev_name(dev), rootdir); in em_debug_create_pd() 76 debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus, in em_debug_create_pd() 79 debugfs_create_file("units", 0444, d, dev->em_pd, &em_debug_units_fops); in em_debug_create_pd() 83 em_debug_create_ps(&dev->em_pd->table[i], d); in em_debug_create_pd()
|
/kernel/time/ |
D | timeconst.bc | 16 define fmul(b,n,d) { 17 return (2^b*n+d-1)/d; 22 define fadj(b,n,d) { 24 d = d/gcd(n,d); 25 v = 2^b*(d-1)/d; 33 define fmuls(b,n,d) { 36 m = fmul(s,n,d);
|
D | tick-internal.h | 79 static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { } in tick_do_periodic_broadcast() argument
|
/kernel/trace/ |
D | trace_events_filter_test.h | 12 TP_PROTO(int a, int b, int c, int d, int e, int f, int g, int h), 14 TP_ARGS(a, b, c, d, e, f, g, h), 20 __field(int, d) 31 __entry->d = d; 39 __entry->a, __entry->b, __entry->c, __entry->d,
|
D | trace_events_filter.c | 2179 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \ 2293 struct test_filter_data_t *d = &test_filter_data[i]; in ftrace_test_event_filter() local 2297 d->filter, false, &filter); in ftrace_test_event_filter() 2301 d->filter, err); in ftrace_test_event_filter() 2313 if (*d->not_visited) in ftrace_test_event_filter() 2314 update_pred_fn(filter, d->not_visited); in ftrace_test_event_filter() 2317 err = filter_match_preds(filter, &d->rec); in ftrace_test_event_filter() 2327 d->filter); in ftrace_test_event_filter() 2331 if (err != d->match) { in ftrace_test_event_filter() 2334 d->filter, d->match); in ftrace_test_event_filter()
|
D | trace.h | 1032 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } in ftrace_init_tracefs() argument 1033 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } in ftrace_init_tracefs_toplevel() argument
|
/kernel/printk/ |
D | printk_ringbuffer.c | 1265 struct prb_desc *d; in desc_reopen_last() local 1279 d = to_desc(desc_ring, id); in desc_reopen_last() 1299 if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val, in desc_reopen_last() 1305 return d; in desc_reopen_last() 1356 struct prb_desc *d; in prb_reserve_in_last() local 1362 d = desc_reopen_last(desc_ring, caller_id, &id); in prb_reserve_in_last() 1363 if (!d) { in prb_reserve_in_last() 1387 if (BLK_DATALESS(&d->text_blk_lpos)) { in prb_reserve_in_last() 1401 &d->text_blk_lpos, id); in prb_reserve_in_last() 1403 if (!get_data(&rb->text_data_ring, &d->text_blk_lpos, &data_size)) in prb_reserve_in_last() [all …]
|
/kernel/sched/ |
D | topology.c | 1410 static void __free_domain_allocs(struct s_data *d, enum s_alloc what, in __free_domain_allocs() argument 1415 if (!atomic_read(&d->rd->refcount)) in __free_domain_allocs() 1416 free_rootdomain(&d->rd->rcu); in __free_domain_allocs() 1419 free_percpu(d->sd); in __free_domain_allocs() 1430 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) in __visit_domain_allocation_hell() argument 1432 memset(d, 0, sizeof(*d)); in __visit_domain_allocation_hell() 1436 d->sd = alloc_percpu(struct sched_domain *); in __visit_domain_allocation_hell() 1437 if (!d->sd) in __visit_domain_allocation_hell() 1439 d->rd = alloc_rootdomain(); in __visit_domain_allocation_hell() 1440 if (!d->rd) in __visit_domain_allocation_hell() [all …]
|
D | rt.c | 2704 struct rt_schedulable_data *d = data; in tg_rt_schedulable() local 2712 if (tg == d->tg) { in tg_rt_schedulable() 2713 period = d->rt_period; in tg_rt_schedulable() 2714 runtime = d->rt_runtime; in tg_rt_schedulable() 2745 if (child == d->tg) { in tg_rt_schedulable() 2746 period = d->rt_period; in tg_rt_schedulable() 2747 runtime = d->rt_runtime; in tg_rt_schedulable()
|
/kernel/events/ |
D | uprobes.c | 368 __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) in __update_ref_ctr() argument 376 if (!vaddr || !d) in __update_ref_ctr() 392 if (unlikely(*ptr + d < 0)) { in __update_ref_ctr() 394 "curr val: %d, delta: %d\n", vaddr, *ptr, d); in __update_ref_ctr() 399 *ptr += d; in __update_ref_ctr() 408 struct mm_struct *mm, short d) in update_ref_ctr_warn() argument 412 d > 0 ? "increment" : "decrement", uprobe->inode->i_ino, in update_ref_ctr_warn() 418 short d) in update_ref_ctr() argument 428 ret = __update_ref_ctr(mm, rc_vaddr, d); in update_ref_ctr() 430 update_ref_ctr_warn(uprobe, mm, d); in update_ref_ctr() [all …]
|
/kernel/rcu/ |
D | tree_stall.h | 63 unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV; in rcu_gp_might_be_stalled() local 66 if (d < RCU_STALL_MIGHT_MIN) in rcu_gp_might_be_stalled() 67 d = RCU_STALL_MIGHT_MIN; in rcu_gp_might_be_stalled() 75 return !time_before(j, READ_ONCE(rcu_state.gp_start) + d); in rcu_gp_might_be_stalled()
|