Home
last modified time | relevance | path

Searched refs:dev (Results 1 – 25 of 51) sorted by relevance

123

/kernel/time/
Dclockevents.c91 static int __clockevents_switch_state(struct clock_event_device *dev, in __clockevents_switch_state() argument
94 if (dev->features & CLOCK_EVT_FEAT_DUMMY) in __clockevents_switch_state()
103 if (dev->set_state_shutdown) in __clockevents_switch_state()
104 return dev->set_state_shutdown(dev); in __clockevents_switch_state()
109 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) in __clockevents_switch_state()
111 if (dev->set_state_periodic) in __clockevents_switch_state()
112 return dev->set_state_periodic(dev); in __clockevents_switch_state()
117 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) in __clockevents_switch_state()
119 if (dev->set_state_oneshot) in __clockevents_switch_state()
120 return dev->set_state_oneshot(dev); in __clockevents_switch_state()
[all …]
Dtick-internal.h20 extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
21 extern void tick_handle_periodic(struct clock_event_device *dev);
22 extern void tick_check_new_device(struct clock_event_device *dev);
28 extern void tick_install_replacement(struct clock_event_device *dev);
32 extern int clockevents_tick_resume(struct clock_event_device *dev);
34 static inline int tick_device_is_functional(struct clock_event_device *dev) in tick_device_is_functional() argument
36 return !(dev->features & CLOCK_EVT_FEAT_DUMMY); in tick_device_is_functional()
39 static inline enum clock_event_state clockevent_get_state(struct clock_event_device *dev) in clockevent_get_state() argument
41 return dev->state_use_accessors; in clockevent_get_state()
44 static inline void clockevent_set_state(struct clock_event_device *dev, in clockevent_set_state() argument
[all …]
Dtick-oneshot.c25 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); in tick_program_event() local
31 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT_STOPPED); in tick_program_event()
32 dev->next_event = KTIME_MAX; in tick_program_event()
36 if (unlikely(clockevent_state_oneshot_stopped(dev))) { in tick_program_event()
41 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); in tick_program_event()
44 return clockevents_program_event(dev, expires, force); in tick_program_event()
52 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); in tick_resume_oneshot() local
54 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); in tick_resume_oneshot()
55 clockevents_program_event(dev, ktime_get(), true); in tick_resume_oneshot()
76 struct clock_event_device *dev = td->evtdev; in tick_switch_to_oneshot() local
[all …]
Dtick-broadcast.c163 void tick_install_broadcast_device(struct clock_event_device *dev, int cpu) in tick_install_broadcast_device() argument
167 if (tick_set_oneshot_wakeup_device(dev, cpu)) in tick_install_broadcast_device()
170 if (!tick_check_broadcast_device(cur, dev)) in tick_install_broadcast_device()
173 if (!try_module_get(dev->owner)) in tick_install_broadcast_device()
176 clockevents_exchange_device(cur, dev); in tick_install_broadcast_device()
179 tick_broadcast_device.evtdev = dev; in tick_install_broadcast_device()
181 tick_broadcast_start_periodic(dev); in tick_install_broadcast_device()
183 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) in tick_install_broadcast_device()
209 int tick_is_broadcast_device(struct clock_event_device *dev) in tick_is_broadcast_device() argument
211 return (dev && tick_broadcast_device.evtdev == dev); in tick_is_broadcast_device()
[all …]
Dtick-common.c74 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); in tick_is_oneshot_available() local
76 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT)) in tick_is_oneshot_available()
78 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) in tick_is_oneshot_available()
109 void tick_handle_periodic(struct clock_event_device *dev) in tick_handle_periodic() argument
112 ktime_t next = dev->next_event; in tick_handle_periodic()
122 if (dev->event_handler != tick_handle_periodic) in tick_handle_periodic()
126 if (!clockevent_state_oneshot(dev)) in tick_handle_periodic()
135 if (!clockevents_program_event(dev, next, false)) in tick_handle_periodic()
154 void tick_setup_periodic(struct clock_event_device *dev, int broadcast) in tick_setup_periodic() argument
156 tick_set_periodic_handler(dev, broadcast); in tick_setup_periodic()
[all …]
Dtimer_list.c180 struct clock_event_device *dev = td->evtdev; in print_tickdevice() local
191 if (!dev) { in print_tickdevice()
195 SEQ_printf(m, "%s\n", dev->name); in print_tickdevice()
197 (unsigned long long) dev->max_delta_ns); in print_tickdevice()
199 (unsigned long long) dev->min_delta_ns); in print_tickdevice()
200 SEQ_printf(m, " mult: %u\n", dev->mult); in print_tickdevice()
201 SEQ_printf(m, " shift: %u\n", dev->shift); in print_tickdevice()
202 SEQ_printf(m, " mode: %d\n", clockevent_get_state(dev)); in print_tickdevice()
204 (unsigned long long) ktime_to_ns(dev->next_event)); in print_tickdevice()
206 SEQ_printf(m, " set_next_event: %ps\n", dev->set_next_event); in print_tickdevice()
[all …]
/kernel/dma/
Dmapping.c31 static void dmam_release(struct device *dev, void *res) in dmam_release() argument
35 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, in dmam_release()
39 static int dmam_match(struct device *dev, void *res, void *match_data) in dmam_match() argument
60 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, in dmam_free_coherent() argument
65 dma_free_coherent(dev, size, vaddr, dma_handle); in dmam_free_coherent()
66 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); in dmam_free_coherent()
84 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, in dmam_alloc_attrs() argument
94 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); in dmam_alloc_attrs()
105 devres_add(dev, dr); in dmam_alloc_attrs()
111 static bool dma_go_direct(struct device *dev, dma_addr_t mask, in dma_go_direct() argument
[all …]
Ddirect.c25 static inline dma_addr_t phys_to_dma_direct(struct device *dev, in phys_to_dma_direct() argument
28 if (force_dma_unencrypted(dev)) in phys_to_dma_direct()
29 return phys_to_dma_unencrypted(dev, phys); in phys_to_dma_direct()
30 return phys_to_dma(dev, phys); in phys_to_dma_direct()
33 static inline struct page *dma_direct_to_page(struct device *dev, in dma_direct_to_page() argument
36 return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr))); in dma_direct_to_page()
39 u64 dma_direct_get_required_mask(struct device *dev) in dma_direct_get_required_mask() argument
42 u64 max_dma = phys_to_dma_direct(dev, phys); in dma_direct_get_required_mask()
47 static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 dma_mask, in dma_direct_optimal_gfp_mask() argument
50 u64 dma_limit = min_not_zero(dma_mask, dev->bus_dma_limit); in dma_direct_optimal_gfp_mask()
[all …]
Ddirect.h12 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt,
15 bool dma_direct_can_mmap(struct device *dev);
16 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma,
19 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr);
20 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
22 size_t dma_direct_max_mapping_size(struct device *dev);
26 void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl,
29 static inline void dma_direct_sync_sg_for_device(struct device *dev, in dma_direct_sync_sg_for_device() argument
38 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
40 void dma_direct_sync_sg_for_cpu(struct device *dev,
[all …]
Ddebug.c68 struct device *dev; member
176 static bool driver_filter(struct device *dev) in driver_filter() argument
187 if (current_driver && dev && dev->driver == current_driver) in driver_filter()
191 if (!dev) in driver_filter()
198 drv = dev->driver; in driver_filter()
217 #define err_printk(dev, entry, format, arg...) do { \ argument
219 if (driver_filter(dev) && \
222 dev ? dev_driver_string(dev) : "NULL", \
223 dev ? dev_name(dev) : "NULL", ## arg); \
273 (a->dev == b->dev)) ? true : false; in exact_match()
[all …]
Ddebug.h12 extern void debug_dma_map_page(struct device *dev, struct page *page,
17 extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr,
20 extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg,
24 extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
27 extern void debug_dma_alloc_coherent(struct device *dev, size_t size,
31 extern void debug_dma_free_coherent(struct device *dev, size_t size,
34 extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr,
39 extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr,
42 extern void debug_dma_sync_single_for_cpu(struct device *dev,
46 extern void debug_dma_sync_single_for_device(struct device *dev,
[all …]
Dswiotlb.c358 static unsigned int swiotlb_align_offset(struct device *dev, u64 addr) in swiotlb_align_offset() argument
360 return addr & dma_get_min_align_mask(dev) & (IO_TLB_SIZE - 1); in swiotlb_align_offset()
366 static void swiotlb_bounce(struct device *dev, phys_addr_t tlb_addr, size_t size, in swiotlb_bounce() argument
369 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_bounce()
381 orig_addr_offset = swiotlb_align_offset(dev, orig_addr); in swiotlb_bounce()
383 dev_WARN_ONCE(dev, 1, in swiotlb_bounce()
391 dev_WARN_ONCE(dev, 1, in swiotlb_bounce()
401 dev_WARN_ONCE(dev, 1, in swiotlb_bounce()
464 static int swiotlb_find_slots(struct device *dev, phys_addr_t orig_addr, in swiotlb_find_slots() argument
467 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in swiotlb_find_slots()
[all …]
Dcoherent.c23 static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) in dev_get_coherent_memory() argument
25 if (dev && dev->dma_mem) in dev_get_coherent_memory()
26 return dev->dma_mem; in dev_get_coherent_memory()
30 static inline dma_addr_t dma_get_device_base(struct device *dev, in dma_get_device_base() argument
34 return phys_to_dma(dev, PFN_PHYS(mem->pfn_base)); in dma_get_device_base()
88 static int dma_assign_coherent_memory(struct device *dev, in dma_assign_coherent_memory() argument
91 if (!dev) in dma_assign_coherent_memory()
94 if (dev->dma_mem) in dma_assign_coherent_memory()
97 dev->dma_mem = mem; in dma_assign_coherent_memory()
118 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, in dma_declare_coherent_memory() argument
[all …]
Dops_helpers.c18 int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, in dma_common_get_sgtable() argument
34 int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, in dma_common_mmap() argument
45 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in dma_common_mmap()
47 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_common_mmap()
61 struct page *dma_common_alloc_pages(struct device *dev, size_t size, in dma_common_alloc_pages() argument
64 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_common_alloc_pages()
67 page = dma_alloc_contiguous(dev, size, gfp); in dma_common_alloc_pages()
69 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size)); in dma_common_alloc_pages()
73 *dma_handle = ops->map_page(dev, page, 0, size, dir, in dma_common_alloc_pages()
76 dma_free_contiguous(dev, page, size); in dma_common_alloc_pages()
[all …]
Dmap_benchmark.c47 struct device *dev; member
85 dma_addr = dma_map_single(map->dev, buf, size, map->dir); in map_benchmark_thread()
86 if (unlikely(dma_mapping_error(map->dev, dma_addr))) { in map_benchmark_thread()
88 dev_name(map->dev)); in map_benchmark_thread()
99 dma_unmap_single(map->dev, dma_addr, size, map->dir); in map_benchmark_thread()
136 get_device(map->dev); in do_map_benchmark()
198 put_device(map->dev); in do_map_benchmark()
259 old_dma_mask = dma_get_mask(map->dev); in map_benchmark_ioctl()
261 ret = dma_set_mask(map->dev, in map_benchmark_ioctl()
265 dev_name(map->dev)); in map_benchmark_ioctl()
[all …]
Dcontiguous.c258 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, in dma_alloc_from_contiguous() argument
264 return cma_alloc(dev_get_cma_area(dev), count, align, GFP_KERNEL | in dma_alloc_from_contiguous()
278 bool dma_release_from_contiguous(struct device *dev, struct page *pages, in dma_release_from_contiguous() argument
281 return cma_release(dev_get_cma_area(dev), pages, count); in dma_release_from_contiguous()
307 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) in dma_alloc_contiguous() argument
310 int nid = dev_to_node(dev); in dma_alloc_contiguous()
316 if (dev->cma_area) in dma_alloc_contiguous()
317 return cma_alloc_aligned(dev->cma_area, size, gfp); in dma_alloc_contiguous()
350 void dma_free_contiguous(struct device *dev, struct page *page, size_t size) in dma_free_contiguous() argument
355 if (dev->cma_area) { in dma_free_contiguous()
[all …]
/kernel/power/
Denergy_model.c26 static bool _is_cpu_device(struct device *dev) in _is_cpu_device() argument
28 return (dev->bus == &cpu_subsys); in _is_cpu_device()
67 static void em_debug_create_pd(struct device *dev) in em_debug_create_pd() argument
73 d = debugfs_create_dir(dev_name(dev), rootdir); in em_debug_create_pd()
75 if (_is_cpu_device(dev)) in em_debug_create_pd()
76 debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus, in em_debug_create_pd()
79 debugfs_create_file("units", 0444, d, dev->em_pd, &em_debug_units_fops); in em_debug_create_pd()
82 for (i = 0; i < dev->em_pd->nr_perf_states; i++) in em_debug_create_pd()
83 em_debug_create_ps(&dev->em_pd->table[i], d); in em_debug_create_pd()
87 static void em_debug_remove_pd(struct device *dev) in em_debug_remove_pd() argument
[all …]
Dconsole.c24 struct device *dev; member
46 void pm_vt_switch_required(struct device *dev, bool required) in pm_vt_switch_required() argument
52 if (tmp->dev == dev) { in pm_vt_switch_required()
64 entry->dev = dev; in pm_vt_switch_required()
78 void pm_vt_switch_unregister(struct device *dev) in pm_vt_switch_unregister() argument
84 if (tmp->dev == dev) { in pm_vt_switch_unregister()
Dsuspend_test.c81 printk(err_readtime, dev_name(&rtc->dev), status); in test_wakealarm()
92 printk(err_wakealarm, dev_name(&rtc->dev), status); in test_wakealarm()
128 static int __init has_wakealarm(struct device *dev, const void *data) in has_wakealarm() argument
130 struct rtc_device *candidate = to_rtc_device(dev); in has_wakealarm()
134 if (!device_may_wakeup(candidate->dev.parent)) in has_wakealarm()
185 struct device *dev; in test_suspend() local
204 dev = class_find_device(rtc_class, NULL, NULL, has_wakealarm); in test_suspend()
205 if (dev) { in test_suspend()
206 rtc = rtc_class_open(dev_name(dev)); in test_suspend()
207 put_device(dev); in test_suspend()
/kernel/bpf/
Ddevmap.c58 struct net_device *dev; member
65 struct net_device *dev; /* must be first member, due to tracepoint */ member
209 struct bpf_dtab_netdev *dev; in dev_map_free() local
215 hlist_for_each_entry_safe(dev, next, head, index_hlist) { in dev_map_free()
216 hlist_del_rcu(&dev->index_hlist); in dev_map_free()
217 if (dev->xdp_prog) in dev_map_free()
218 bpf_prog_put(dev->xdp_prog); in dev_map_free()
219 dev_put(dev->dev); in dev_map_free()
220 kfree(dev); in dev_map_free()
227 struct bpf_dtab_netdev *dev; in dev_map_free() local
[all …]
/kernel/irq/
Ddevres.c18 static void devm_irq_release(struct device *dev, void *res) in devm_irq_release() argument
25 static int devm_irq_match(struct device *dev, void *res, void *data) in devm_irq_match() argument
51 int devm_request_threaded_irq(struct device *dev, unsigned int irq, in devm_request_threaded_irq() argument
65 devname = dev_name(dev); in devm_request_threaded_irq()
76 devres_add(dev, dr); in devm_request_threaded_irq()
99 int devm_request_any_context_irq(struct device *dev, unsigned int irq, in devm_request_any_context_irq() argument
112 devname = dev_name(dev); in devm_request_any_context_irq()
122 devres_add(dev, dr); in devm_request_any_context_irq()
139 void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id) in devm_free_irq() argument
143 WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match, in devm_free_irq()
[all …]
Dmsi.c32 struct msi_desc *alloc_msi_entry(struct device *dev, int nvec, in alloc_msi_entry() argument
42 desc->dev = dev; in alloc_msi_entry()
75 static ssize_t msi_mode_show(struct device *dev, struct device_attribute *attr, in msi_mode_show() argument
91 if (dev_is_pci(dev)) in msi_mode_show()
105 const struct attribute_group **msi_populate_sysfs(struct device *dev) in msi_populate_sysfs() argument
118 for_each_msi_entry(entry, dev) in msi_populate_sysfs()
128 for_each_msi_entry(entry, dev) { in msi_populate_sysfs()
157 ret = sysfs_create_groups(&dev->kobj, msi_irq_groups); in msi_populate_sysfs()
186 void msi_destroy_sysfs(struct device *dev, const struct attribute_group **msi_irq_groups) in msi_destroy_sysfs() argument
193 sysfs_remove_groups(&dev->kobj, msi_irq_groups); in msi_destroy_sysfs()
[all …]
/kernel/sched/
Didle.c132 struct cpuidle_device *dev) in call_cpuidle_s2idle() argument
137 return cpuidle_enter_s2idle(drv, dev); in call_cpuidle_s2idle()
140 static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, in call_cpuidle() argument
148 dev->last_residency_ns = 0; in call_cpuidle()
158 return cpuidle_enter(drv, dev, next_state); in call_cpuidle()
172 struct cpuidle_device *dev = cpuidle_get_device(); in cpuidle_idle_call() local
173 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); in cpuidle_idle_call()
191 if (cpuidle_not_available(drv, dev)) { in cpuidle_idle_call()
208 if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) { in cpuidle_idle_call()
213 entered_state = call_cpuidle_s2idle(drv, dev); in cpuidle_idle_call()
[all …]
/kernel/trace/
Dtrace_mmiotrace.c20 struct pci_dev *dev; member
61 static void mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) in mmio_print_pcidev() argument
65 const struct pci_driver *drv = pci_dev_driver(dev); in mmio_print_pcidev()
68 dev->bus->number, dev->devfn, in mmio_print_pcidev()
69 dev->vendor, dev->device, dev->irq); in mmio_print_pcidev()
71 start = dev->resource[i].start; in mmio_print_pcidev()
74 (dev->resource[i].flags & PCI_REGION_FLAG_MASK))); in mmio_print_pcidev()
77 start = dev->resource[i].start; in mmio_print_pcidev()
78 end = dev->resource[i].end; in mmio_print_pcidev()
80 dev->resource[i].start < dev->resource[i].end ? in mmio_print_pcidev()
[all …]
/kernel/
Diomem.c129 static void devm_memremap_release(struct device *dev, void *res) in devm_memremap_release() argument
134 static int devm_memremap_match(struct device *dev, void *res, void *match_data) in devm_memremap_match() argument
139 void *devm_memremap(struct device *dev, resource_size_t offset, in devm_memremap() argument
145 dev_to_node(dev)); in devm_memremap()
152 devres_add(dev, ptr); in devm_memremap()
162 void devm_memunmap(struct device *dev, void *addr) in devm_memunmap() argument
164 WARN_ON(devres_release(dev, devm_memremap_release, in devm_memunmap()

123