| /kernel/time/ |
| D | clockevents.c | 91 static int __clockevents_switch_state(struct clock_event_device *dev, in __clockevents_switch_state() argument 94 if (dev->features & CLOCK_EVT_FEAT_DUMMY) in __clockevents_switch_state() 103 if (dev->set_state_shutdown) in __clockevents_switch_state() 104 return dev->set_state_shutdown(dev); in __clockevents_switch_state() 109 if (!(dev->features & CLOCK_EVT_FEAT_PERIODIC)) in __clockevents_switch_state() 111 if (dev->set_state_periodic) in __clockevents_switch_state() 112 return dev->set_state_periodic(dev); in __clockevents_switch_state() 117 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) in __clockevents_switch_state() 119 if (dev->set_state_oneshot) in __clockevents_switch_state() 120 return dev->set_state_oneshot(dev); in __clockevents_switch_state() [all …]
|
| D | tick-internal.h | 25 extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast); 26 extern void tick_handle_periodic(struct clock_event_device *dev); 27 extern void tick_check_new_device(struct clock_event_device *dev); 33 extern void tick_install_replacement(struct clock_event_device *dev); 37 extern int clockevents_tick_resume(struct clock_event_device *dev); 39 static inline int tick_device_is_functional(struct clock_event_device *dev) in tick_device_is_functional() argument 41 return !(dev->features & CLOCK_EVT_FEAT_DUMMY); in tick_device_is_functional() 44 static inline enum clock_event_state clockevent_get_state(struct clock_event_device *dev) in clockevent_get_state() argument 46 return dev->state_use_accessors; in clockevent_get_state() 49 static inline void clockevent_set_state(struct clock_event_device *dev, in clockevent_set_state() argument [all …]
|
| D | tick-broadcast.c | 163 void tick_install_broadcast_device(struct clock_event_device *dev, int cpu) in tick_install_broadcast_device() argument 167 if (tick_set_oneshot_wakeup_device(dev, cpu)) in tick_install_broadcast_device() 170 if (!tick_check_broadcast_device(cur, dev)) in tick_install_broadcast_device() 173 if (!try_module_get(dev->owner)) in tick_install_broadcast_device() 176 clockevents_exchange_device(cur, dev); in tick_install_broadcast_device() 179 tick_broadcast_device.evtdev = dev; in tick_install_broadcast_device() 181 tick_broadcast_start_periodic(dev); in tick_install_broadcast_device() 183 if (!(dev->features & CLOCK_EVT_FEAT_ONESHOT)) in tick_install_broadcast_device() 209 int tick_is_broadcast_device(struct clock_event_device *dev) in tick_is_broadcast_device() argument 211 return (dev && tick_broadcast_device.evtdev == dev); in tick_is_broadcast_device() [all …]
|
| D | tick-oneshot.c | 25 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); in tick_program_event() local 31 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT_STOPPED); in tick_program_event() 32 dev->next_event = KTIME_MAX; in tick_program_event() 36 if (unlikely(clockevent_state_oneshot_stopped(dev))) { in tick_program_event() 41 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); in tick_program_event() 44 return clockevents_program_event(dev, expires, force); in tick_program_event() 52 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); in tick_resume_oneshot() local 54 clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT); in tick_resume_oneshot() 55 clockevents_program_event(dev, ktime_get(), true); in tick_resume_oneshot() 76 struct clock_event_device *dev = td->evtdev; in tick_switch_to_oneshot() local [all …]
|
| D | tick-common.c | 75 struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev); in tick_is_oneshot_available() local 77 if (!dev || !(dev->features & CLOCK_EVT_FEAT_ONESHOT)) in tick_is_oneshot_available() 79 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) in tick_is_oneshot_available() 110 void tick_handle_periodic(struct clock_event_device *dev) in tick_handle_periodic() argument 113 ktime_t next = dev->next_event; in tick_handle_periodic() 122 if (IS_ENABLED(CONFIG_TICK_ONESHOT) && dev->event_handler != tick_handle_periodic) in tick_handle_periodic() 125 if (!clockevent_state_oneshot(dev)) in tick_handle_periodic() 134 if (!clockevents_program_event(dev, next, false)) in tick_handle_periodic() 153 void tick_setup_periodic(struct clock_event_device *dev, int broadcast) in tick_setup_periodic() argument 155 tick_set_periodic_handler(dev, broadcast); in tick_setup_periodic() [all …]
|
| D | timer_list.c | 184 struct clock_event_device *dev = td->evtdev; in print_tickdevice() local 195 if (!dev) { in print_tickdevice() 199 SEQ_printf(m, "%s\n", dev->name); in print_tickdevice() 201 (unsigned long long) dev->max_delta_ns); in print_tickdevice() 203 (unsigned long long) dev->min_delta_ns); in print_tickdevice() 204 SEQ_printf(m, " mult: %u\n", dev->mult); in print_tickdevice() 205 SEQ_printf(m, " shift: %u\n", dev->shift); in print_tickdevice() 206 SEQ_printf(m, " mode: %d\n", clockevent_get_state(dev)); in print_tickdevice() 208 (unsigned long long) ktime_to_ns(dev->next_event)); in print_tickdevice() 210 SEQ_printf(m, " set_next_event: %ps\n", dev->set_next_event); in print_tickdevice() [all …]
|
| /kernel/dma/ |
| D | mapping.c | 40 static void dmam_release(struct device *dev, void *res) in dmam_release() argument 44 dma_free_attrs(dev, this->size, this->vaddr, this->dma_handle, in dmam_release() 48 static int dmam_match(struct device *dev, void *res, void *match_data) in dmam_match() argument 69 void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, in dmam_free_coherent() argument 74 WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); in dmam_free_coherent() 75 dma_free_coherent(dev, size, vaddr, dma_handle); in dmam_free_coherent() 93 void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, in dmam_alloc_attrs() argument 103 vaddr = dma_alloc_attrs(dev, size, dma_handle, gfp, attrs); in dmam_alloc_attrs() 114 devres_add(dev, dr); in dmam_alloc_attrs() 120 static bool dma_go_direct(struct device *dev, dma_addr_t mask, in dma_go_direct() argument [all …]
|
| D | direct.c | 26 static inline dma_addr_t phys_to_dma_direct(struct device *dev, in phys_to_dma_direct() argument 29 if (force_dma_unencrypted(dev)) in phys_to_dma_direct() 30 return phys_to_dma_unencrypted(dev, phys); in phys_to_dma_direct() 31 return phys_to_dma(dev, phys); in phys_to_dma_direct() 34 static inline struct page *dma_direct_to_page(struct device *dev, in dma_direct_to_page() argument 37 return pfn_to_page(PHYS_PFN(dma_to_phys(dev, dma_addr))); in dma_direct_to_page() 40 u64 dma_direct_get_required_mask(struct device *dev) in dma_direct_get_required_mask() argument 43 u64 max_dma = phys_to_dma_direct(dev, phys); in dma_direct_get_required_mask() 48 static gfp_t dma_direct_optimal_gfp_mask(struct device *dev, u64 *phys_limit) in dma_direct_optimal_gfp_mask() argument 51 dev->coherent_dma_mask, in dma_direct_optimal_gfp_mask() [all …]
|
| D | direct.h | 13 int dma_direct_get_sgtable(struct device *dev, struct sg_table *sgt, 16 bool dma_direct_can_mmap(struct device *dev); 17 int dma_direct_mmap(struct device *dev, struct vm_area_struct *vma, 20 bool dma_direct_need_sync(struct device *dev, dma_addr_t dma_addr); 21 int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents, 23 bool dma_direct_all_ram_mapped(struct device *dev); 24 size_t dma_direct_max_mapping_size(struct device *dev); 28 void dma_direct_sync_sg_for_device(struct device *dev, struct scatterlist *sgl, 31 static inline void dma_direct_sync_sg_for_device(struct device *dev, in dma_direct_sync_sg_for_device() argument 40 void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl, [all …]
|
| D | debug.c | 70 struct device *dev; member 178 static bool driver_filter(struct device *dev) in driver_filter() argument 189 if (current_driver && dev && dev->driver == current_driver) in driver_filter() 193 if (!dev) in driver_filter() 200 drv = dev->driver; in driver_filter() 219 #define err_printk(dev, entry, format, arg...) do { \ argument 221 if (driver_filter(dev) && \ 224 dev ? dev_driver_string(dev) : "NULL", \ 225 dev ? dev_name(dev) : "NULL", ## arg); \ 275 (a->dev == b->dev)) ? true : false; in exact_match() [all …]
|
| D | debug.h | 12 extern void debug_dma_map_page(struct device *dev, struct page *page, 17 extern void debug_dma_unmap_page(struct device *dev, dma_addr_t addr, 20 extern void debug_dma_map_sg(struct device *dev, struct scatterlist *sg, 24 extern void debug_dma_unmap_sg(struct device *dev, struct scatterlist *sglist, 27 extern void debug_dma_alloc_coherent(struct device *dev, size_t size, 31 extern void debug_dma_free_coherent(struct device *dev, size_t size, 34 extern void debug_dma_map_resource(struct device *dev, phys_addr_t addr, 39 extern void debug_dma_unmap_resource(struct device *dev, dma_addr_t dma_addr, 42 extern void debug_dma_sync_single_for_cpu(struct device *dev, 46 extern void debug_dma_sync_single_for_device(struct device *dev, [all …]
|
| D | swiotlb.c | 612 static struct page *swiotlb_alloc_tlb(struct device *dev, size_t bytes, in swiotlb_alloc_tlb() argument 621 if (!gfpflags_allow_blocking(gfp) && dev && force_dma_unencrypted(dev)) { in swiotlb_alloc_tlb() 627 return dma_alloc_from_pool(dev, bytes, &vaddr, gfp, in swiotlb_alloc_tlb() 683 static struct io_tlb_pool *swiotlb_alloc_pool(struct device *dev, in swiotlb_alloc_pool() argument 705 while (!(tlb = swiotlb_alloc_tlb(dev, tlb_size, phys_limit, gfp))) { in swiotlb_alloc_pool() 777 struct io_tlb_pool *__swiotlb_find_pool(struct device *dev, phys_addr_t paddr) in __swiotlb_find_pool() argument 779 struct io_tlb_mem *mem = dev->dma_io_tlb_mem; in __swiotlb_find_pool() 788 list_for_each_entry_rcu(pool, &dev->dma_io_tlb_pools, node) { in __swiotlb_find_pool() 804 static void swiotlb_del_pool(struct device *dev, struct io_tlb_pool *pool) in swiotlb_del_pool() argument 808 spin_lock_irqsave(&dev->dma_io_tlb_lock, flags); in swiotlb_del_pool() [all …]
|
| D | coherent.c | 23 static inline struct dma_coherent_mem *dev_get_coherent_memory(struct device *dev) in dev_get_coherent_memory() argument 25 if (dev && dev->dma_mem) in dev_get_coherent_memory() 26 return dev->dma_mem; in dev_get_coherent_memory() 30 static inline dma_addr_t dma_get_device_base(struct device *dev, in dma_get_device_base() argument 34 return phys_to_dma(dev, PFN_PHYS(mem->pfn_base)); in dma_get_device_base() 87 static int dma_assign_coherent_memory(struct device *dev, in dma_assign_coherent_memory() argument 90 if (!dev) in dma_assign_coherent_memory() 93 if (dev->dma_mem) in dma_assign_coherent_memory() 96 dev->dma_mem = mem; in dma_assign_coherent_memory() 117 int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, in dma_declare_coherent_memory() argument [all …]
|
| D | ops_helpers.c | 19 int dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, in dma_common_get_sgtable() argument 35 int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, in dma_common_mmap() argument 46 vma->vm_page_prot = dma_pgprot(dev, vma->vm_page_prot, attrs); in dma_common_mmap() 48 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret)) in dma_common_mmap() 62 struct page *dma_common_alloc_pages(struct device *dev, size_t size, in dma_common_alloc_pages() argument 65 const struct dma_map_ops *ops = get_dma_ops(dev); in dma_common_alloc_pages() 68 page = dma_alloc_contiguous(dev, size, gfp); in dma_common_alloc_pages() 70 page = alloc_pages_node(dev_to_node(dev), gfp, get_order(size)); in dma_common_alloc_pages() 74 if (use_dma_iommu(dev)) in dma_common_alloc_pages() 75 *dma_handle = iommu_dma_map_page(dev, page, 0, size, dir, in dma_common_alloc_pages() [all …]
|
| D | map_benchmark.c | 24 struct device *dev; member 62 dma_addr = dma_map_single(map->dev, buf, size, map->dir); in map_benchmark_thread() 63 if (unlikely(dma_mapping_error(map->dev, dma_addr))) { in map_benchmark_thread() 65 dev_name(map->dev)); in map_benchmark_thread() 76 dma_unmap_single(map->dev, dma_addr, size, map->dir); in map_benchmark_thread() 128 get_device(map->dev); in do_map_benchmark() 194 put_device(map->dev); in do_map_benchmark() 256 old_dma_mask = dma_get_mask(map->dev); in map_benchmark_ioctl() 258 ret = dma_set_mask(map->dev, in map_benchmark_ioctl() 262 dev_name(map->dev)); in map_benchmark_ioctl() [all …]
|
| D | contiguous.c | 309 struct page *dma_alloc_from_contiguous(struct device *dev, size_t count, in dma_alloc_from_contiguous() argument 315 return cma_alloc(dev_get_cma_area(dev), count, align, no_warn); in dma_alloc_from_contiguous() 328 bool dma_release_from_contiguous(struct device *dev, struct page *pages, in dma_release_from_contiguous() argument 331 return cma_release(dev_get_cma_area(dev), pages, count); in dma_release_from_contiguous() 356 struct page *dma_alloc_contiguous(struct device *dev, size_t size, gfp_t gfp) in dma_alloc_contiguous() argument 359 int nid = dev_to_node(dev); in dma_alloc_contiguous() 365 if (dev->cma_area) in dma_alloc_contiguous() 366 return cma_alloc_aligned(dev->cma_area, size, gfp); in dma_alloc_contiguous() 406 void dma_free_contiguous(struct device *dev, struct page *page, size_t size) in dma_free_contiguous() argument 411 if (dev->cma_area) { in dma_free_contiguous() [all …]
|
| /kernel/irq/ |
| D | msi.c | 58 static void msi_domain_free_locked(struct device *dev, struct msi_ctrl *ctrl); 59 static unsigned int msi_domain_get_hwsize(struct device *dev, unsigned int domid); 60 static inline int msi_sysfs_create_group(struct device *dev); 74 static struct msi_desc *msi_alloc_desc(struct device *dev, int nvec, in msi_alloc_desc() argument 82 desc->dev = dev; in msi_alloc_desc() 100 static int msi_insert_desc(struct device *dev, struct msi_desc *desc, in msi_insert_desc() argument 103 struct msi_device_data *md = dev->msi.data; in msi_insert_desc() 108 hwsize = msi_domain_get_hwsize(dev, domid); in msi_insert_desc() 148 int msi_domain_insert_msi_desc(struct device *dev, unsigned int domid, in msi_domain_insert_msi_desc() argument 153 lockdep_assert_held(&dev->msi.data->mutex); in msi_domain_insert_msi_desc() [all …]
|
| D | devres.c | 19 static void devm_irq_release(struct device *dev, void *res) in devm_irq_release() argument 26 static int devm_irq_match(struct device *dev, void *res, void *data) in devm_irq_match() argument 52 int devm_request_threaded_irq(struct device *dev, unsigned int irq, in devm_request_threaded_irq() argument 66 devname = dev_name(dev); in devm_request_threaded_irq() 77 devres_add(dev, dr); in devm_request_threaded_irq() 100 int devm_request_any_context_irq(struct device *dev, unsigned int irq, in devm_request_any_context_irq() argument 113 devname = dev_name(dev); in devm_request_any_context_irq() 123 devres_add(dev, dr); in devm_request_any_context_irq() 140 void devm_free_irq(struct device *dev, unsigned int irq, void *dev_id) in devm_free_irq() argument 144 WARN_ON(devres_destroy(dev, devm_irq_release, devm_irq_match, in devm_free_irq() [all …]
|
| /kernel/power/ |
| D | energy_model.c | 26 static void em_cpufreq_update_efficiencies(struct device *dev, 32 static bool _is_cpu_device(struct device *dev) in _is_cpu_device() argument 34 return (dev->bus == &cpu_subsys); in _is_cpu_device() 119 static void em_debug_create_pd(struct device *dev) in em_debug_create_pd() argument 126 d = debugfs_create_dir(dev_name(dev), rootdir); in em_debug_create_pd() 128 if (_is_cpu_device(dev)) in em_debug_create_pd() 129 debugfs_create_file("cpus", 0444, d, dev->em_pd->cpus, in em_debug_create_pd() 132 debugfs_create_file("flags", 0444, d, dev->em_pd, in em_debug_create_pd() 135 em_dbg = devm_kcalloc(dev, dev->em_pd->nr_perf_states, in em_debug_create_pd() 141 for (i = 0; i < dev->em_pd->nr_perf_states; i++) in em_debug_create_pd() [all …]
|
| D | console.c | 25 struct device *dev; member 47 void pm_vt_switch_required(struct device *dev, bool required) in pm_vt_switch_required() argument 53 if (tmp->dev == dev) { in pm_vt_switch_required() 65 entry->dev = dev; in pm_vt_switch_required() 79 void pm_vt_switch_unregister(struct device *dev) in pm_vt_switch_unregister() argument 85 if (tmp->dev == dev) { in pm_vt_switch_unregister()
|
| D | suspend_test.c | 81 printk(err_readtime, dev_name(&rtc->dev), status); in test_wakealarm() 92 printk(err_wakealarm, dev_name(&rtc->dev), status); in test_wakealarm() 128 static int __init has_wakealarm(struct device *dev, const void *data) in has_wakealarm() argument 130 struct rtc_device *candidate = to_rtc_device(dev); in has_wakealarm() 134 if (!device_may_wakeup(candidate->dev.parent)) in has_wakealarm() 185 struct device *dev; in test_suspend() local 204 dev = class_find_device(&rtc_class, NULL, NULL, has_wakealarm); in test_suspend() 205 if (dev) { in test_suspend() 206 rtc = rtc_class_open(dev_name(dev)); in test_suspend() 207 put_device(dev); in test_suspend()
|
| /kernel/bpf/ |
| D | devmap.c | 59 struct net_device *dev; member 66 struct net_device *dev; /* must be first member, due to tracepoint */ member 218 struct bpf_dtab_netdev *dev; in dev_map_free() local 224 hlist_for_each_entry_safe(dev, next, head, index_hlist) { in dev_map_free() 225 hlist_del_rcu(&dev->index_hlist); in dev_map_free() 226 if (dev->xdp_prog) in dev_map_free() 227 bpf_prog_put(dev->xdp_prog); in dev_map_free() 228 dev_put(dev->dev); in dev_map_free() 229 kfree(dev); in dev_map_free() 236 struct bpf_dtab_netdev *dev; in dev_map_free() local [all …]
|
| D | tcx.c | 16 struct net_device *dev; in tcx_prog_attach() local 20 dev = __dev_get_by_index(net, attr->target_ifindex); in tcx_prog_attach() 21 if (!dev) { in tcx_prog_attach() 34 entry = tcx_entry_fetch_or_create(dev, ingress, &created); in tcx_prog_attach() 44 tcx_entry_update(dev, entry_new, ingress); in tcx_prog_attach() 64 struct net_device *dev; in tcx_prog_detach() local 68 dev = __dev_get_by_index(net, attr->target_ifindex); in tcx_prog_detach() 69 if (!dev) { in tcx_prog_detach() 73 entry = tcx_entry_fetch(dev, ingress); in tcx_prog_detach() 83 tcx_entry_update(dev, entry_new, ingress); in tcx_prog_detach() [all …]
|
| /kernel/trace/ |
| D | trace_mmiotrace.c | 20 struct pci_dev *dev; member 61 static void mmio_print_pcidev(struct trace_seq *s, const struct pci_dev *dev) in mmio_print_pcidev() argument 65 const struct pci_driver *drv = pci_dev_driver(dev); in mmio_print_pcidev() 68 dev->bus->number, dev->devfn, in mmio_print_pcidev() 69 dev->vendor, dev->device, dev->irq); in mmio_print_pcidev() 71 start = dev->resource[i].start; in mmio_print_pcidev() 74 (dev->resource[i].flags & PCI_REGION_FLAG_MASK))); in mmio_print_pcidev() 77 start = dev->resource[i].start; in mmio_print_pcidev() 78 end = dev->resource[i].end; in mmio_print_pcidev() 80 dev->resource[i].start < dev->resource[i].end ? in mmio_print_pcidev() [all …]
|
| /kernel/sched/ |
| D | idle.c | 129 struct cpuidle_device *dev) in call_cpuidle_s2idle() argument 134 return cpuidle_enter_s2idle(drv, dev); in call_cpuidle_s2idle() 137 static int call_cpuidle(struct cpuidle_driver *drv, struct cpuidle_device *dev, in call_cpuidle() argument 145 dev->last_residency_ns = 0; in call_cpuidle() 155 return cpuidle_enter(drv, dev, next_state); in call_cpuidle() 169 struct cpuidle_device *dev = cpuidle_get_device(); in cpuidle_idle_call() local 170 struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev); in cpuidle_idle_call() 182 if (cpuidle_not_available(drv, dev)) { in cpuidle_idle_call() 199 if (idle_should_enter_s2idle() || dev->forced_idle_latency_limit_ns) { in cpuidle_idle_call() 204 entered_state = call_cpuidle_s2idle(drv, dev); in cpuidle_idle_call() [all …]
|