Home
last modified time | relevance | path

Searched refs:desc (Results 1 – 25 of 27) sorted by relevance

12

/kernel/irq/
Dchip.c44 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_chip() local
46 if (!desc) in irq_set_chip()
52 desc->irq_data.chip = chip; in irq_set_chip()
53 irq_put_desc_unlock(desc, flags); in irq_set_chip()
71 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags, IRQ_GET_DESC_CHECK_GLOBAL); in irq_set_irq_type() local
74 if (!desc) in irq_set_irq_type()
77 ret = __irq_set_trigger(desc, type); in irq_set_irq_type()
78 irq_put_desc_busunlock(desc, flags); in irq_set_irq_type()
93 struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0); in irq_set_handler_data() local
95 if (!desc) in irq_set_handler_data()
[all …]
Dmanage.c38 static void __synchronize_hardirq(struct irq_desc *desc, bool sync_chip) in __synchronize_hardirq() argument
40 struct irq_data *irqd = irq_desc_get_irq_data(desc); in __synchronize_hardirq()
50 while (irqd_irq_inprogress(&desc->irq_data)) in __synchronize_hardirq()
54 raw_spin_lock_irqsave(&desc->lock, flags); in __synchronize_hardirq()
55 inprogress = irqd_irq_inprogress(&desc->irq_data); in __synchronize_hardirq()
70 raw_spin_unlock_irqrestore(&desc->lock, flags); in __synchronize_hardirq()
100 struct irq_desc *desc = irq_to_desc(irq); in synchronize_hardirq() local
102 if (desc) { in synchronize_hardirq()
103 __synchronize_hardirq(desc, false); in synchronize_hardirq()
104 return !atomic_read(&desc->threads_active); in synchronize_hardirq()
[all …]
Dpm.c16 bool irq_pm_check_wakeup(struct irq_desc *desc) in irq_pm_check_wakeup() argument
18 if (irqd_is_wakeup_armed(&desc->irq_data)) { in irq_pm_check_wakeup()
19 irqd_clear(&desc->irq_data, IRQD_WAKEUP_ARMED); in irq_pm_check_wakeup()
20 desc->istate |= IRQS_SUSPENDED | IRQS_PENDING; in irq_pm_check_wakeup()
21 desc->depth++; in irq_pm_check_wakeup()
22 irq_disable(desc); in irq_pm_check_wakeup()
23 pm_system_irq_wakeup(irq_desc_get_irq(desc)); in irq_pm_check_wakeup()
33 void irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) in irq_pm_install_action() argument
35 desc->nr_actions++; in irq_pm_install_action()
38 desc->force_resume_depth++; in irq_pm_install_action()
[all …]
Dirqdesc.c55 static int alloc_masks(struct irq_desc *desc, int node) in alloc_masks() argument
57 if (!zalloc_cpumask_var_node(&desc->irq_common_data.affinity, in alloc_masks()
62 if (!zalloc_cpumask_var_node(&desc->irq_common_data.effective_affinity, in alloc_masks()
64 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks()
70 if (!zalloc_cpumask_var_node(&desc->pending_mask, GFP_KERNEL, node)) { in alloc_masks()
72 free_cpumask_var(desc->irq_common_data.effective_affinity); in alloc_masks()
74 free_cpumask_var(desc->irq_common_data.affinity); in alloc_masks()
81 static void desc_smp_init(struct irq_desc *desc, int node, in desc_smp_init() argument
86 cpumask_copy(desc->irq_common_data.affinity, affinity); in desc_smp_init()
89 cpumask_clear(desc->pending_mask); in desc_smp_init()
[all …]
Dsettings.h38 irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set) in irq_settings_clr_and_set() argument
40 desc->status_use_accessors &= ~(clr & _IRQF_MODIFY_MASK); in irq_settings_clr_and_set()
41 desc->status_use_accessors |= (set & _IRQF_MODIFY_MASK); in irq_settings_clr_and_set()
44 static inline bool irq_settings_is_per_cpu(struct irq_desc *desc) in irq_settings_is_per_cpu() argument
46 return desc->status_use_accessors & _IRQ_PER_CPU; in irq_settings_is_per_cpu()
49 static inline bool irq_settings_is_per_cpu_devid(struct irq_desc *desc) in irq_settings_is_per_cpu_devid() argument
51 return desc->status_use_accessors & _IRQ_PER_CPU_DEVID; in irq_settings_is_per_cpu_devid()
54 static inline void irq_settings_set_per_cpu(struct irq_desc *desc) in irq_settings_set_per_cpu() argument
56 desc->status_use_accessors |= _IRQ_PER_CPU; in irq_settings_set_per_cpu()
59 static inline void irq_settings_set_no_balancing(struct irq_desc *desc) in irq_settings_set_no_balancing() argument
[all …]
Dinternals.h70 extern int __irq_set_trigger(struct irq_desc *desc, unsigned long flags);
71 extern void __disable_irq(struct irq_desc *desc);
72 extern void __enable_irq(struct irq_desc *desc);
80 extern int irq_activate(struct irq_desc *desc);
81 extern int irq_activate_and_startup(struct irq_desc *desc, bool resend);
82 extern int irq_startup(struct irq_desc *desc, bool resend, bool force);
84 extern void irq_shutdown(struct irq_desc *desc);
85 extern void irq_shutdown_and_deactivate(struct irq_desc *desc);
86 extern void irq_enable(struct irq_desc *desc);
87 extern void irq_disable(struct irq_desc *desc);
[all …]
Dspurious.c36 bool irq_wait_for_poll(struct irq_desc *desc) in irq_wait_for_poll() argument
40 smp_processor_id(), desc->irq_data.irq)) in irq_wait_for_poll()
45 raw_spin_unlock(&desc->lock); in irq_wait_for_poll()
46 while (irqd_irq_inprogress(&desc->irq_data)) in irq_wait_for_poll()
48 raw_spin_lock(&desc->lock); in irq_wait_for_poll()
49 } while (irqd_irq_inprogress(&desc->irq_data)); in irq_wait_for_poll()
51 return !irqd_irq_disabled(&desc->irq_data) && desc->action; in irq_wait_for_poll()
61 static int try_one_irq(struct irq_desc *desc, bool force) in try_one_irq() argument
66 raw_spin_lock(&desc->lock); in try_one_irq()
72 if (irq_settings_is_per_cpu(desc) || in try_one_irq()
[all …]
Dautoprobe.c32 struct irq_desc *desc; in probe_irq_on() local
45 for_each_irq_desc_reverse(i, desc) { in probe_irq_on()
46 raw_spin_lock_irq(&desc->lock); in probe_irq_on()
47 if (!desc->action && irq_settings_can_probe(desc)) { in probe_irq_on()
52 if (desc->irq_data.chip->irq_set_type) in probe_irq_on()
53 desc->irq_data.chip->irq_set_type(&desc->irq_data, in probe_irq_on()
55 irq_activate_and_startup(desc, IRQ_NORESEND); in probe_irq_on()
57 raw_spin_unlock_irq(&desc->lock); in probe_irq_on()
68 for_each_irq_desc_reverse(i, desc) { in probe_irq_on()
69 raw_spin_lock_irq(&desc->lock); in probe_irq_on()
[all …]
Dproc.c48 struct irq_desc *desc = irq_to_desc((long)m->private); in show_irq_affinity() local
54 mask = desc->irq_common_data.affinity; in show_irq_affinity()
56 if (irqd_is_setaffinity_pending(&desc->irq_data)) in show_irq_affinity()
57 mask = desc->pending_mask; in show_irq_affinity()
63 mask = irq_data_get_effective_affinity_mask(&desc->irq_data); in show_irq_affinity()
85 struct irq_desc *desc = irq_to_desc((long)m->private); in irq_affinity_hint_proc_show() local
92 raw_spin_lock_irqsave(&desc->lock, flags); in irq_affinity_hint_proc_show()
93 if (desc->affinity_hint) in irq_affinity_hint_proc_show()
94 cpumask_copy(mask, desc->affinity_hint); in irq_affinity_hint_proc_show()
95 raw_spin_unlock_irqrestore(&desc->lock, flags); in irq_affinity_hint_proc_show()
[all …]
Dmsi.c32 struct msi_desc *desc; in alloc_msi_entry() local
34 desc = kzalloc(sizeof(*desc), GFP_KERNEL); in alloc_msi_entry()
35 if (!desc) in alloc_msi_entry()
38 INIT_LIST_HEAD(&desc->list); in alloc_msi_entry()
39 desc->dev = dev; in alloc_msi_entry()
40 desc->nvec_used = nvec; in alloc_msi_entry()
42 desc->affinity = kmemdup(affinity, in alloc_msi_entry()
43 nvec * sizeof(*desc->affinity), GFP_KERNEL); in alloc_msi_entry()
44 if (!desc->affinity) { in alloc_msi_entry()
45 kfree(desc); in alloc_msi_entry()
[all …]
Dresend.c32 struct irq_desc *desc; in resend_irqs() local
38 desc = irq_to_desc(irq); in resend_irqs()
39 if (!desc) in resend_irqs()
42 desc->handle_irq(desc); in resend_irqs()
57 void check_irq_resend(struct irq_desc *desc) in check_irq_resend() argument
65 if (irq_settings_is_level(desc)) { in check_irq_resend()
66 desc->istate &= ~IRQS_PENDING; in check_irq_resend()
69 if (desc->istate & IRQS_REPLAY) in check_irq_resend()
71 if (desc->istate & IRQS_PENDING) { in check_irq_resend()
72 desc->istate &= ~IRQS_PENDING; in check_irq_resend()
[all …]
Dhandle.c31 void handle_bad_irq(struct irq_desc *desc) in handle_bad_irq() argument
33 unsigned int irq = irq_desc_get_irq(desc); in handle_bad_irq()
35 print_irq_desc(irq, desc); in handle_bad_irq()
36 kstat_incr_irqs_this_cpu(desc); in handle_bad_irq()
59 void __irq_wake_thread(struct irq_desc *desc, struct irqaction *action) in __irq_wake_thread() argument
121 desc->threads_oneshot |= action->thread_mask; in __irq_wake_thread()
132 atomic_inc(&desc->threads_active); in __irq_wake_thread()
137 irqreturn_t __handle_irq_event_percpu(struct irq_desc *desc, unsigned int *flags) in __handle_irq_event_percpu() argument
140 unsigned int irq = desc->irq_data.irq; in __handle_irq_event_percpu()
143 record_irq_time(desc); in __handle_irq_event_percpu()
[all …]
Ddebugfs.c30 static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc) in irq_debug_show_masks() argument
32 struct irq_data *data = irq_desc_get_irq_data(desc); in irq_debug_show_masks()
42 msk = desc->pending_mask; in irq_debug_show_masks()
47 static void irq_debug_show_masks(struct seq_file *m, struct irq_desc *desc) { } in irq_debug_show_masks() argument
150 struct irq_desc *desc = m->private; in irq_debug_show() local
153 raw_spin_lock_irq(&desc->lock); in irq_debug_show()
154 data = irq_desc_get_irq_data(desc); in irq_debug_show()
155 seq_printf(m, "handler: %ps\n", desc->handle_irq); in irq_debug_show()
156 seq_printf(m, "device: %s\n", desc->dev_name); in irq_debug_show()
157 seq_printf(m, "status: 0x%08x\n", desc->status_use_accessors); in irq_debug_show()
[all …]
Dcpuhotplug.c52 static bool migrate_one_irq(struct irq_desc *desc) in migrate_one_irq() argument
54 struct irq_data *d = irq_desc_get_irq_data(desc); in migrate_one_irq()
85 irq_fixup_move_pending(desc, false); in migrate_one_irq()
95 irq_force_complete_move(desc); in migrate_one_irq()
103 if (irq_fixup_move_pending(desc, true)) in migrate_one_irq()
104 affinity = irq_desc_get_pending_mask(desc); in migrate_one_irq()
119 irq_shutdown_and_deactivate(desc); in migrate_one_irq()
156 struct irq_desc *desc; in irq_migrate_all_off_this_cpu() local
162 desc = irq_to_desc(irq); in irq_migrate_all_off_this_cpu()
163 raw_spin_lock(&desc->lock); in irq_migrate_all_off_this_cpu()
[all …]
Ddebug.h6 #define ___P(f) if (desc->status_use_accessors & f) printk("%14s set\n", #f)
7 #define ___PS(f) if (desc->istate & f) printk("%14s set\n", #f)
11 static inline void print_irq_desc(unsigned int irq, struct irq_desc *desc) in print_irq_desc() argument
19 irq, desc, desc->depth, desc->irq_count, desc->irqs_unhandled); in print_irq_desc()
21 desc->handle_irq, desc->handle_irq); in print_irq_desc()
23 desc->irq_data.chip, desc->irq_data.chip); in print_irq_desc()
24 printk("->action(): %p\n", desc->action); in print_irq_desc()
25 if (desc->action) { in print_irq_desc()
27 desc->action->handler, desc->action->handler); in print_irq_desc()
Dmigration.c18 bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear) in irq_fixup_move_pending() argument
20 struct irq_data *data = irq_desc_get_irq_data(desc); in irq_fixup_move_pending()
29 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) >= nr_cpu_ids) { in irq_fixup_move_pending()
40 struct irq_desc *desc = irq_data_to_desc(idata); in irq_move_masked_irq() local
41 struct irq_data *data = &desc->irq_data; in irq_move_masked_irq()
57 if (unlikely(cpumask_empty(desc->pending_mask))) in irq_move_masked_irq()
63 assert_raw_spin_locked(&desc->lock); in irq_move_masked_irq()
77 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids) { in irq_move_masked_irq()
80 ret = irq_do_set_affinity(data, desc->pending_mask, false); in irq_move_masked_irq()
91 cpumask_clear(desc->pending_mask); in irq_move_masked_irq()
Dipi.c221 int __ipi_send_single(struct irq_desc *desc, unsigned int cpu) in __ipi_send_single() argument
223 struct irq_data *data = irq_desc_get_irq_data(desc); in __ipi_send_single()
263 int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest) in __ipi_send_mask() argument
265 struct irq_data *data = irq_desc_get_irq_data(desc); in __ipi_send_mask()
309 struct irq_desc *desc = irq_to_desc(virq); in ipi_send_single() local
310 struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL; in ipi_send_single()
316 return __ipi_send_single(desc, cpu); in ipi_send_single()
330 struct irq_desc *desc = irq_to_desc(virq); in ipi_send_mask() local
331 struct irq_data *data = desc ? irq_desc_get_irq_data(desc) : NULL; in ipi_send_mask()
337 return __ipi_send_mask(desc, dest); in ipi_send_mask()
Ddummychip.c20 struct irq_desc *desc = irq_data_to_desc(data); in ack_bad() local
22 print_irq_desc(data->irq, desc); in ack_bad()
Dirqdomain.c1406 struct irq_desc *desc; in irq_domain_push_irq() local
1418 desc = irq_to_desc(virq); in irq_domain_push_irq()
1419 if (!desc) in irq_domain_push_irq()
1421 if (WARN_ON(desc->action)) in irq_domain_push_irq()
1488 struct irq_desc *desc; in irq_domain_pop_irq() local
1499 desc = irq_to_desc(virq); in irq_domain_pop_irq()
1500 if (!desc) in irq_domain_pop_irq()
1502 if (WARN_ON(desc->action)) in irq_domain_pop_irq()
/kernel/power/
Dwakeup_reason.c48 struct irq_desc *desc; in last_resume_reason_show() local
55 desc = irq_to_desc(irq_list[irq_no]); in last_resume_reason_show()
56 if (desc && desc->action && desc->action->name) in last_resume_reason_show()
58 irq_list[irq_no], desc->action->name); in last_resume_reason_show()
115 struct irq_desc *desc; in log_wakeup_reason() local
117 desc = irq_to_desc(irq); in log_wakeup_reason()
118 if (desc && desc->action && desc->action->name) in log_wakeup_reason()
120 desc->action->name); in log_wakeup_reason()
/kernel/
Dresource.c342 unsigned long flags, unsigned long desc, in find_next_iomem_res() argument
376 if ((desc != IORES_DESC_NONE) && (desc != p->desc)) in find_next_iomem_res()
388 res->desc = p->desc; in find_next_iomem_res()
396 unsigned long flags, unsigned long desc, in __walk_iomem_res_desc() argument
404 !find_next_iomem_res(start, end, flags, desc, first_lvl, &res)) { in __walk_iomem_res_desc()
431 int walk_iomem_res_desc(unsigned long desc, unsigned long flags, u64 start, in walk_iomem_res_desc() argument
434 return __walk_iomem_res_desc(start, end, flags, desc, false, arg, func); in walk_iomem_res_desc()
537 unsigned long desc) in region_intersects() argument
549 ((desc == IORES_DESC_NONE) || in region_intersects()
550 (desc == p->desc))); in region_intersects()
[all …]
Dkexec_file.c619 return walk_iomem_res_desc(crashk_res.desc, in kexec_walk_resources()
709 struct shash_desc *desc; in kexec_calculate_store_digests() local
729 desc_size = crypto_shash_descsize(tfm) + sizeof(*desc); in kexec_calculate_store_digests()
730 desc = kzalloc(desc_size, GFP_KERNEL); in kexec_calculate_store_digests()
731 if (!desc) { in kexec_calculate_store_digests()
741 desc->tfm = tfm; in kexec_calculate_store_digests()
743 ret = crypto_shash_init(desc); in kexec_calculate_store_digests()
764 ret = crypto_shash_update(desc, ksegment->kbuf, in kexec_calculate_store_digests()
779 ret = crypto_shash_update(desc, zero_buf, bytes); in kexec_calculate_store_digests()
794 ret = crypto_shash_final(desc, digest); in kexec_calculate_store_digests()
[all …]
Dworkqueue_internal.h53 char desc[WORKER_DESC_LEN]; member
/kernel/cgroup/
Dfreezer.c16 int desc = 1; in cgroup_propagate_frozen() local
26 cgrp->freezer.nr_frozen_descendants += desc; in cgroup_propagate_frozen()
34 desc++; in cgroup_propagate_frozen()
37 cgrp->freezer.nr_frozen_descendants -= desc; in cgroup_propagate_frozen()
42 desc++; in cgroup_propagate_frozen()
/kernel/bpf/
Dcgroup.c244 struct cgroup *desc = container_of(css, struct cgroup, self); in update_effective_progs() local
246 if (percpu_ref_is_zero(&desc->bpf.refcnt)) in update_effective_progs()
249 err = compute_effective_progs(desc, type, &desc->bpf.inactive); in update_effective_progs()
256 struct cgroup *desc = container_of(css, struct cgroup, self); in update_effective_progs() local
258 if (percpu_ref_is_zero(&desc->bpf.refcnt)) { in update_effective_progs()
259 if (unlikely(desc->bpf.inactive)) { in update_effective_progs()
260 bpf_prog_array_free(desc->bpf.inactive); in update_effective_progs()
261 desc->bpf.inactive = NULL; in update_effective_progs()
266 activate_effective_progs(desc, type, desc->bpf.inactive); in update_effective_progs()
267 desc->bpf.inactive = NULL; in update_effective_progs()
[all …]

12