/kernel/bpf/ |
D | dispatcher.c | 26 struct bpf_dispatcher *d, struct bpf_prog *prog) in bpf_dispatcher_find_prog() argument 31 if (prog == d->progs[i].prog) in bpf_dispatcher_find_prog() 32 return &d->progs[i]; in bpf_dispatcher_find_prog() 38 struct bpf_dispatcher *d) in bpf_dispatcher_find_free() argument 40 return bpf_dispatcher_find_prog(d, NULL); in bpf_dispatcher_find_free() 43 static bool bpf_dispatcher_add_prog(struct bpf_dispatcher *d, in bpf_dispatcher_add_prog() argument 51 entry = bpf_dispatcher_find_prog(d, prog); in bpf_dispatcher_add_prog() 57 entry = bpf_dispatcher_find_free(d); in bpf_dispatcher_add_prog() 64 d->num_progs++; in bpf_dispatcher_add_prog() 68 static bool bpf_dispatcher_remove_prog(struct bpf_dispatcher *d, in bpf_dispatcher_remove_prog() argument [all …]
|
/kernel/irq/ |
D | generic-chip.c | 25 void irq_gc_noop(struct irq_data *d) in irq_gc_noop() argument 37 void irq_gc_mask_disable_reg(struct irq_data *d) in irq_gc_mask_disable_reg() argument 39 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); in irq_gc_mask_disable_reg() 40 struct irq_chip_type *ct = irq_data_get_chip_type(d); in irq_gc_mask_disable_reg() 41 u32 mask = d->mask; in irq_gc_mask_disable_reg() 57 void irq_gc_mask_set_bit(struct irq_data *d) in irq_gc_mask_set_bit() argument 59 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); in irq_gc_mask_set_bit() 60 struct irq_chip_type *ct = irq_data_get_chip_type(d); in irq_gc_mask_set_bit() 61 u32 mask = d->mask; in irq_gc_mask_set_bit() 77 void irq_gc_mask_clr_bit(struct irq_data *d) in irq_gc_mask_clr_bit() argument [all …]
|
D | cpuhotplug.c | 20 static inline bool irq_needs_fixup(struct irq_data *d) in irq_needs_fixup() argument 22 const struct cpumask *m = irq_data_get_effective_affinity_mask(d); in irq_needs_fixup() 32 m = irq_data_get_affinity_mask(d); in irq_needs_fixup() 46 cpumask_pr_args(m), d->irq, cpu); in irq_needs_fixup() 55 struct irq_data *d = irq_desc_get_irq_data(desc); in migrate_one_irq() local 56 struct irq_chip *chip = irq_data_get_irq_chip(d); in migrate_one_irq() 57 bool maskchip = !irq_can_move_pcntxt(d) && !irqd_irq_masked(d); in migrate_one_irq() 68 pr_debug("IRQ %u: Unable to migrate away\n", d->irq); in migrate_one_irq() 81 if (irqd_is_per_cpu(d) || !irqd_is_started(d) || !irq_needs_fixup(d)) { in migrate_one_irq() 107 affinity = irq_data_get_affinity_mask(d); in migrate_one_irq() [all …]
|
D | internals.h | 197 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) argument 199 static inline unsigned int irqd_get(struct irq_data *d) in irqd_get() argument 201 return __irqd_to_state(d); in irqd_get() 207 static inline void irqd_set_move_pending(struct irq_data *d) in irqd_set_move_pending() argument 209 __irqd_to_state(d) |= IRQD_SETAFFINITY_PENDING; in irqd_set_move_pending() 212 static inline void irqd_clr_move_pending(struct irq_data *d) in irqd_clr_move_pending() argument 214 __irqd_to_state(d) &= ~IRQD_SETAFFINITY_PENDING; in irqd_clr_move_pending() 217 static inline void irqd_set_managed_shutdown(struct irq_data *d) in irqd_set_managed_shutdown() argument 219 __irqd_to_state(d) |= IRQD_MANAGED_SHUTDOWN; in irqd_set_managed_shutdown() 222 static inline void irqd_clr_managed_shutdown(struct irq_data *d) in irqd_clr_managed_shutdown() argument [all …]
|
D | irqdomain.c | 41 static void debugfs_add_domain_dir(struct irq_domain *d); 42 static void debugfs_remove_domain_dir(struct irq_domain *d); 44 static inline void debugfs_add_domain_dir(struct irq_domain *d) { } in debugfs_add_domain_dir() argument 45 static inline void debugfs_remove_domain_dir(struct irq_domain *d) { } in debugfs_remove_domain_dir() argument 779 static int irq_domain_translate(struct irq_domain *d, in irq_domain_translate() argument 784 if (d->ops->translate) in irq_domain_translate() 785 return d->ops->translate(d, fwspec, hwirq, type); in irq_domain_translate() 787 if (d->ops->xlate) in irq_domain_translate() 788 return d->ops->xlate(d, to_of_node(fwspec->fwnode), in irq_domain_translate() 1001 int irq_domain_xlate_onecell(struct irq_domain *d, struct device_node *ctrlr, in irq_domain_xlate_onecell() argument [all …]
|
D | chip.c | 195 struct irq_data *d = irq_desc_get_irq_data(desc); in __irq_startup_managed() local 197 if (!irqd_affinity_is_managed(d)) in __irq_startup_managed() 200 irqd_clr_managed_shutdown(d); in __irq_startup_managed() 223 if (WARN_ON(irq_domain_activate_irq(d, false))) in __irq_startup_managed() 238 struct irq_data *d = irq_desc_get_irq_data(desc); in __irq_startup() local 242 WARN_ON_ONCE(!irqd_is_activated(d)); in __irq_startup() 244 if (d->chip->irq_startup) { in __irq_startup() 245 ret = d->chip->irq_startup(d); in __irq_startup() 257 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_startup() local 258 const struct cpumask *aff = irq_data_get_affinity_mask(d); in irq_startup() [all …]
|
D | manage.c | 1400 struct irq_data *d = &desc->irq_data; in irq_request_resources() local 1401 struct irq_chip *c = d->chip; in irq_request_resources() 1403 return c->irq_request_resources ? c->irq_request_resources(d) : 0; in irq_request_resources() 1408 struct irq_data *d = &desc->irq_data; in irq_release_resources() local 1409 struct irq_chip *c = d->chip; in irq_release_resources() 1412 c->irq_release_resources(d); in irq_release_resources() 1417 struct irq_data *d = irq_desc_get_irq_data(desc); in irq_supports_nmi() local 1421 if (d->parent_data) in irq_supports_nmi() 1425 if (d->chip->irq_bus_lock || d->chip->irq_bus_sync_unlock) in irq_supports_nmi() 1428 return d->chip->flags & IRQCHIP_SUPPORTS_NMI; in irq_supports_nmi() [all …]
|
/kernel/ |
D | delayacct.c | 131 int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk) in delayacct_add_tsk() argument 139 tmp = (s64)d->cpu_run_real_total; in delayacct_add_tsk() 141 d->cpu_run_real_total = (tmp < (s64)d->cpu_run_real_total) ? 0 : tmp; in delayacct_add_tsk() 144 tmp = (s64)d->cpu_scaled_run_real_total; in delayacct_add_tsk() 146 d->cpu_scaled_run_real_total = in delayacct_add_tsk() 147 (tmp < (s64)d->cpu_scaled_run_real_total) ? 0 : tmp; in delayacct_add_tsk() 157 d->cpu_count += t1; in delayacct_add_tsk() 159 tmp = (s64)d->cpu_delay_total + t2; in delayacct_add_tsk() 160 d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp; in delayacct_add_tsk() 162 tmp = (s64)d->cpu_run_virtual_total + t3; in delayacct_add_tsk() [all …]
|
D | audit_watch.c | 350 struct dentry *d = kern_path_locked(watch->path, parent); in audit_get_nd() local 351 if (IS_ERR(d)) in audit_get_nd() 352 return PTR_ERR(d); in audit_get_nd() 353 if (d_is_positive(d)) { in audit_get_nd() 355 watch->dev = d->d_sb->s_dev; in audit_get_nd() 356 watch->ino = d_backing_inode(d)->i_ino; in audit_get_nd() 359 dput(d); in audit_get_nd()
|
D | gen_kheaders.sh | 28 for d in $dir_list; do 29 all_dirs="$all_dirs $srctree/$d"
|
D | audit.h | 313 #define audit_watch_compare(w, i, d) 0 argument 319 #define audit_mark_compare(m, i, d) 0 argument
|
D | auditsc.c | 97 struct audit_aux_data d; member 108 struct audit_aux_data d; member 2114 const struct dentry *d, *parent; in handle_path() local 2124 d = dentry; in handle_path() 2128 struct inode *inode = d_backing_inode(d); in handle_path() 2141 parent = d->d_parent; in handle_path() 2142 if (parent == d) in handle_path() 2144 d = parent; in handle_path() 2766 axp->d.type = AUDIT_OBJ_PID; in audit_signal_info_syscall() 2767 axp->d.next = ctx->aux_pids; in audit_signal_info_syscall() [all …]
|
D | smp.c | 194 #define CFD_SEQ(s, d, t, c) \ argument 195 (union cfd_seq_cnt){ .u.src = s, .u.dst = d, .u.type = t, .u.cnt = c }
|
/kernel/power/ |
D | swap.c | 612 struct crc_data *d = data; in crc32_threadfn() local 616 wait_event(d->go, atomic_read_acquire(&d->ready) || in crc32_threadfn() 619 d->thr = NULL; in crc32_threadfn() 620 atomic_set_release(&d->stop, 1); in crc32_threadfn() 621 wake_up(&d->done); in crc32_threadfn() 624 atomic_set(&d->ready, 0); in crc32_threadfn() 626 for (i = 0; i < d->run_threads; i++) in crc32_threadfn() 627 *d->crc32 = crc32_le(*d->crc32, in crc32_threadfn() 628 d->unc[i], *d->unc_len[i]); in crc32_threadfn() 629 atomic_set_release(&d->stop, 1); in crc32_threadfn() [all …]
|
D | energy_model.c | 36 struct dentry *d; in em_debug_create_ps() local 42 d = debugfs_create_dir(name, pd); in em_debug_create_ps() 43 debugfs_create_ulong("frequency", 0444, d, &ps->frequency); in em_debug_create_ps() 44 debugfs_create_ulong("power", 0444, d, &ps->power); in em_debug_create_ps() 45 debugfs_create_ulong("cost", 0444, d, &ps->cost); in em_debug_create_ps() 46 debugfs_create_ulong("inefficient", 0444, d, &ps->flags); in em_debug_create_ps() 69 struct dentry *d; in em_debug_create_pd() local 73 d = debugfs_create_dir(dev_name(dev), rootdir); in em_debug_create_pd() 76 debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus, in em_debug_create_pd() 79 debugfs_create_file("flags", 0444, d, dev->em_pd, in em_debug_create_pd() [all …]
|
/kernel/time/ |
D | timeconst.bc | 16 define fmul(b,n,d) { 17 return (2^b*n+d-1)/d; 22 define fadj(b,n,d) { 24 d = d/gcd(n,d); 25 v = 2^b*(d-1)/d; 33 define fmuls(b,n,d) { 36 m = fmul(s,n,d);
|
D | tick-internal.h | 79 static inline void tick_do_periodic_broadcast(struct clock_event_device *d) { } in tick_do_periodic_broadcast() argument
|
/kernel/trace/ |
D | trace_events_filter_test.h | 12 TP_PROTO(int a, int b, int c, int d, int e, int f, int g, int h), 14 TP_ARGS(a, b, c, d, e, f, g, h), 20 __field(int, d) 31 __entry->d = d; 39 __entry->a, __entry->b, __entry->c, __entry->d,
|
D | trace_events_filter.c | 2304 .rec = { .a = va, .b = vb, .c = vc, .d = vd, \ 2418 struct test_filter_data_t *d = &test_filter_data[i]; in ftrace_test_event_filter() local 2422 d->filter, false, &filter); in ftrace_test_event_filter() 2426 d->filter, err); in ftrace_test_event_filter() 2438 if (*d->not_visited) in ftrace_test_event_filter() 2439 update_pred_fn(filter, d->not_visited); in ftrace_test_event_filter() 2442 err = filter_match_preds(filter, &d->rec); in ftrace_test_event_filter() 2452 d->filter); in ftrace_test_event_filter() 2456 if (err != d->match) { in ftrace_test_event_filter() 2459 d->filter, d->match); in ftrace_test_event_filter()
|
D | trace.h | 1034 static inline void ftrace_init_tracefs(struct trace_array *tr, struct dentry *d) { } in ftrace_init_tracefs() argument 1035 static inline void ftrace_init_tracefs_toplevel(struct trace_array *tr, struct dentry *d) { } in ftrace_init_tracefs_toplevel() argument
|
/kernel/printk/ |
D | printk_ringbuffer.c | 1265 struct prb_desc *d; in desc_reopen_last() local 1279 d = to_desc(desc_ring, id); in desc_reopen_last() 1299 if (!atomic_long_try_cmpxchg(&d->state_var, &prev_state_val, in desc_reopen_last() 1305 return d; in desc_reopen_last() 1356 struct prb_desc *d; in prb_reserve_in_last() local 1362 d = desc_reopen_last(desc_ring, caller_id, &id); in prb_reserve_in_last() 1363 if (!d) { in prb_reserve_in_last() 1387 if (BLK_DATALESS(&d->text_blk_lpos)) { in prb_reserve_in_last() 1401 &d->text_blk_lpos, id); in prb_reserve_in_last() 1403 if (!get_data(&rb->text_data_ring, &d->text_blk_lpos, &data_size)) in prb_reserve_in_last() [all …]
|
/kernel/sched/ |
D | topology.c | 1441 static void __free_domain_allocs(struct s_data *d, enum s_alloc what, in __free_domain_allocs() argument 1446 if (!atomic_read(&d->rd->refcount)) in __free_domain_allocs() 1447 free_rootdomain(&d->rd->rcu); in __free_domain_allocs() 1450 free_percpu(d->sd); in __free_domain_allocs() 1461 __visit_domain_allocation_hell(struct s_data *d, const struct cpumask *cpu_map) in __visit_domain_allocation_hell() argument 1463 memset(d, 0, sizeof(*d)); in __visit_domain_allocation_hell() 1467 d->sd = alloc_percpu(struct sched_domain *); in __visit_domain_allocation_hell() 1468 if (!d->sd) in __visit_domain_allocation_hell() 1470 d->rd = alloc_rootdomain(); in __visit_domain_allocation_hell() 1471 if (!d->rd) in __visit_domain_allocation_hell() [all …]
|
D | rt.c | 2867 struct rt_schedulable_data *d = data; in tg_rt_schedulable() local 2875 if (tg == d->tg) { in tg_rt_schedulable() 2876 period = d->rt_period; in tg_rt_schedulable() 2877 runtime = d->rt_runtime; in tg_rt_schedulable() 2908 if (child == d->tg) { in tg_rt_schedulable() 2909 period = d->rt_period; in tg_rt_schedulable() 2910 runtime = d->rt_runtime; in tg_rt_schedulable()
|
/kernel/events/ |
D | uprobes.c | 365 __update_ref_ctr(struct mm_struct *mm, unsigned long vaddr, short d) in __update_ref_ctr() argument 373 if (!vaddr || !d) in __update_ref_ctr() 389 if (unlikely(*ptr + d < 0)) { in __update_ref_ctr() 391 "curr val: %d, delta: %d\n", vaddr, *ptr, d); in __update_ref_ctr() 396 *ptr += d; in __update_ref_ctr() 405 struct mm_struct *mm, short d) in update_ref_ctr_warn() argument 409 d > 0 ? "increment" : "decrement", uprobe->inode->i_ino, in update_ref_ctr_warn() 415 short d) in update_ref_ctr() argument 425 ret = __update_ref_ctr(mm, rc_vaddr, d); in update_ref_ctr() 427 update_ref_ctr_warn(uprobe, mm, d); in update_ref_ctr() [all …]
|
/kernel/rcu/ |
D | tree_stall.h | 91 unsigned long d = rcu_jiffies_till_stall_check() / RCU_STALL_MIGHT_DIV; in rcu_gp_might_be_stalled() local 94 if (d < RCU_STALL_MIGHT_MIN) in rcu_gp_might_be_stalled() 95 d = RCU_STALL_MIGHT_MIN; in rcu_gp_might_be_stalled() 103 return !time_before(j, READ_ONCE(rcu_state.gp_start) + d); in rcu_gp_might_be_stalled()
|