Lines Matching +full:chan +full:- +full:name
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
7 * This code implements the DMA subsystem. It provides a HW-neutral interface
29 * See Documentation/driver-api/dmaengine for more details
35 #include <linux/dma-mapping.h>
63 /* --- debugfs implementation --- */
71 dma_dev->dbg_dev_root = debugfs_create_dir(dev_name(dma_dev->dev), in dmaengine_debug_register()
73 if (IS_ERR(dma_dev->dbg_dev_root)) in dmaengine_debug_register()
74 dma_dev->dbg_dev_root = NULL; in dmaengine_debug_register()
79 debugfs_remove_recursive(dma_dev->dbg_dev_root); in dmaengine_debug_unregister()
80 dma_dev->dbg_dev_root = NULL; in dmaengine_debug_unregister()
86 struct dma_chan *chan; in dmaengine_dbg_summary_show() local
88 list_for_each_entry(chan, &dma_dev->channels, device_node) { in dmaengine_dbg_summary_show()
89 if (chan->client_count) { in dmaengine_dbg_summary_show()
90 seq_printf(s, " %-13s| %s", dma_chan_name(chan), in dmaengine_dbg_summary_show()
91 chan->dbg_client_name ?: "in-use"); in dmaengine_dbg_summary_show()
93 if (chan->router) in dmaengine_dbg_summary_show()
95 dev_name(chan->router->dev)); in dmaengine_dbg_summary_show()
109 dma_dev->dev_id, dev_name(dma_dev->dev), in dmaengine_summary_show()
110 dma_dev->chancnt); in dmaengine_summary_show()
112 if (dma_dev->dbg_summary_show) in dmaengine_summary_show()
113 dma_dev->dbg_summary_show(s, dma_dev); in dmaengine_summary_show()
117 if (!list_is_last(&dma_dev->global_node, &dma_device_list)) in dmaengine_summary_show()
144 /* --- sysfs implementation --- */
149 * dev_to_dma_chan - convert a device pointer to its sysfs container object
159 return chan_dev->chan; in dev_to_dma_chan()
165 struct dma_chan *chan; in memcpy_count_show() local
171 chan = dev_to_dma_chan(dev); in memcpy_count_show()
172 if (chan) { in memcpy_count_show()
174 count += per_cpu_ptr(chan->local, i)->memcpy_count; in memcpy_count_show()
177 err = -ENODEV; in memcpy_count_show()
187 struct dma_chan *chan; in bytes_transferred_show() local
193 chan = dev_to_dma_chan(dev); in bytes_transferred_show()
194 if (chan) { in bytes_transferred_show()
196 count += per_cpu_ptr(chan->local, i)->bytes_transferred; in bytes_transferred_show()
199 err = -ENODEV; in bytes_transferred_show()
209 struct dma_chan *chan; in in_use_show() local
213 chan = dev_to_dma_chan(dev); in in_use_show()
214 if (chan) in in_use_show()
215 err = sprintf(buf, "%d\n", chan->client_count); in in_use_show()
217 err = -ENODEV; in in_use_show()
241 .name = "dma",
246 /* --- client and device registration --- */
252 * struct dma_chan_tbl_ent - tracks channel allocations per core/operation
253 * @chan: associated channel for this entry
256 struct dma_chan *chan; member
259 /* percpu lookup table for memory-to-memory offload providers */
280 err = -ENOMEM; in dma_channel_table_init()
296 * dma_chan_is_local - checks if the channel is in the same NUMA-node as the CPU
297 * @chan: DMA channel to test
300 * Returns true if the channel is in the same NUMA-node as the CPU.
302 static bool dma_chan_is_local(struct dma_chan *chan, int cpu) in dma_chan_is_local() argument
304 int node = dev_to_node(chan->device->dev); in dma_chan_is_local()
310 * min_chan - finds the channel with min count and in the same NUMA-node as the CPU
323 struct dma_chan *chan; in min_chan() local
328 if (!dma_has_cap(cap, device->cap_mask) || in min_chan()
329 dma_has_cap(DMA_PRIVATE, device->cap_mask)) in min_chan()
331 list_for_each_entry(chan, &device->channels, device_node) { in min_chan()
332 if (!chan->client_count) in min_chan()
334 if (!min || chan->table_count < min->table_count) in min_chan()
335 min = chan; in min_chan()
337 if (dma_chan_is_local(chan, cpu)) in min_chan()
339 chan->table_count < localmin->table_count) in min_chan()
340 localmin = chan; in min_chan()
344 chan = localmin ? localmin : min; in min_chan()
346 if (chan) in min_chan()
347 chan->table_count++; in min_chan()
349 return chan; in min_chan()
353 * dma_channel_rebalance - redistribute the available channels
357 * multi-tasking channels) in the non-SMP case.
363 struct dma_chan *chan; in dma_channel_rebalance() local
371 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; in dma_channel_rebalance()
374 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dma_channel_rebalance()
376 list_for_each_entry(chan, &device->channels, device_node) in dma_channel_rebalance()
377 chan->table_count = 0; in dma_channel_rebalance()
387 chan = min_chan(cap, cpu); in dma_channel_rebalance()
388 per_cpu_ptr(channel_table[cap], cpu)->chan = chan; in dma_channel_rebalance()
397 bitmap_and(has.bits, want->bits, device->cap_mask.bits, in dma_device_satisfies_mask()
399 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); in dma_device_satisfies_mask()
402 static struct module *dma_chan_to_owner(struct dma_chan *chan) in dma_chan_to_owner() argument
404 return chan->device->owner; in dma_chan_to_owner()
408 * balance_ref_count - catch up the channel reference count
409 * @chan: channel to balance ->client_count versus dmaengine_ref_count
413 static void balance_ref_count(struct dma_chan *chan) in balance_ref_count() argument
415 struct module *owner = dma_chan_to_owner(chan); in balance_ref_count()
417 while (chan->client_count < dmaengine_ref_count) { in balance_ref_count()
419 chan->client_count++; in balance_ref_count()
427 list_del_rcu(&device->global_node); in dma_device_release()
430 if (device->device_release) in dma_device_release()
431 device->device_release(device); in dma_device_release()
437 kref_put(&device->ref, dma_device_release); in dma_device_put()
441 * dma_chan_get - try to grab a DMA channel's parent driver module
442 * @chan: channel to grab
446 static int dma_chan_get(struct dma_chan *chan) in dma_chan_get() argument
448 struct module *owner = dma_chan_to_owner(chan); in dma_chan_get()
452 if (chan->client_count) { in dma_chan_get()
458 return -ENODEV; in dma_chan_get()
460 ret = kref_get_unless_zero(&chan->device->ref); in dma_chan_get()
462 ret = -ENODEV; in dma_chan_get()
467 if (chan->device->device_alloc_chan_resources) { in dma_chan_get()
468 ret = chan->device->device_alloc_chan_resources(chan); in dma_chan_get()
473 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) in dma_chan_get()
474 balance_ref_count(chan); in dma_chan_get()
477 chan->client_count++; in dma_chan_get()
481 dma_device_put(chan->device); in dma_chan_get()
488 * dma_chan_put - drop a reference to a DMA channel's parent driver module
489 * @chan: channel to release
493 static void dma_chan_put(struct dma_chan *chan) in dma_chan_put() argument
496 if (!chan->client_count) in dma_chan_put()
499 chan->client_count--; in dma_chan_put()
502 if (!chan->client_count && chan->device->device_free_chan_resources) { in dma_chan_put()
504 dmaengine_synchronize(chan); in dma_chan_put()
505 chan->device->device_free_chan_resources(chan); in dma_chan_put()
509 if (chan->router && chan->router->route_free) { in dma_chan_put()
510 chan->router->route_free(chan->router->dev, chan->route_data); in dma_chan_put()
511 chan->router = NULL; in dma_chan_put()
512 chan->route_data = NULL; in dma_chan_put()
515 dma_device_put(chan->device); in dma_chan_put()
516 module_put(dma_chan_to_owner(chan)); in dma_chan_put()
519 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie) in dma_sync_wait() argument
524 dma_async_issue_pending(chan); in dma_sync_wait()
526 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); in dma_sync_wait()
528 dev_err(chan->device->dev, "%s: timeout!\n", __func__); in dma_sync_wait()
541 * dma_find_channel - find a channel to carry out the operation
546 return this_cpu_read(channel_table[tx_type]->chan); in dma_find_channel()
551 * dma_issue_pending_all - flush all pending operations across all channels
556 struct dma_chan *chan; in dma_issue_pending_all() local
560 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dma_issue_pending_all()
562 list_for_each_entry(chan, &device->channels, device_node) in dma_issue_pending_all()
563 if (chan->client_count) in dma_issue_pending_all()
564 device->device_issue_pending(chan); in dma_issue_pending_all()
570 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps) in dma_get_slave_caps() argument
574 if (!chan || !caps) in dma_get_slave_caps()
575 return -EINVAL; in dma_get_slave_caps()
577 device = chan->device; in dma_get_slave_caps()
580 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) || in dma_get_slave_caps()
581 test_bit(DMA_CYCLIC, device->cap_mask.bits))) in dma_get_slave_caps()
582 return -ENXIO; in dma_get_slave_caps()
589 if (!device->directions) in dma_get_slave_caps()
590 return -ENXIO; in dma_get_slave_caps()
592 caps->src_addr_widths = device->src_addr_widths; in dma_get_slave_caps()
593 caps->dst_addr_widths = device->dst_addr_widths; in dma_get_slave_caps()
594 caps->directions = device->directions; in dma_get_slave_caps()
595 caps->min_burst = device->min_burst; in dma_get_slave_caps()
596 caps->max_burst = device->max_burst; in dma_get_slave_caps()
597 caps->max_sg_burst = device->max_sg_burst; in dma_get_slave_caps()
598 caps->residue_granularity = device->residue_granularity; in dma_get_slave_caps()
599 caps->descriptor_reuse = device->descriptor_reuse; in dma_get_slave_caps()
600 caps->cmd_pause = !!device->device_pause; in dma_get_slave_caps()
601 caps->cmd_resume = !!device->device_resume; in dma_get_slave_caps()
602 caps->cmd_terminate = !!device->device_terminate_all; in dma_get_slave_caps()
605 * DMA engine device might be configured with non-uniformly in dma_get_slave_caps()
609 * channel-specific ones. in dma_get_slave_caps()
611 if (device->device_caps) in dma_get_slave_caps()
612 device->device_caps(chan, caps); in dma_get_slave_caps()
622 struct dma_chan *chan; in private_candidate() local
625 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); in private_candidate()
631 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) in private_candidate()
632 list_for_each_entry(chan, &dev->channels, device_node) { in private_candidate()
634 if (chan->client_count) in private_candidate()
638 list_for_each_entry(chan, &dev->channels, device_node) { in private_candidate()
639 if (chan->client_count) { in private_candidate()
640 dev_dbg(dev->dev, "%s: %s busy\n", in private_candidate()
641 __func__, dma_chan_name(chan)); in private_candidate()
644 if (fn && !fn(chan, fn_param)) { in private_candidate()
645 dev_dbg(dev->dev, "%s: %s filter said false\n", in private_candidate()
646 __func__, dma_chan_name(chan)); in private_candidate()
649 return chan; in private_candidate()
659 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param); in find_candidate() local
662 if (chan) { in find_candidate()
665 * channel will not be published in the general-purpose in find_candidate()
668 dma_cap_set(DMA_PRIVATE, device->cap_mask); in find_candidate()
669 device->privatecnt++; in find_candidate()
670 err = dma_chan_get(chan); in find_candidate()
673 if (err == -ENODEV) { in find_candidate()
674 dev_dbg(device->dev, "%s: %s module removed\n", in find_candidate()
675 __func__, dma_chan_name(chan)); in find_candidate()
676 list_del_rcu(&device->global_node); in find_candidate()
678 dev_dbg(device->dev, in find_candidate()
680 __func__, dma_chan_name(chan), err); in find_candidate()
682 if (--device->privatecnt == 0) in find_candidate()
683 dma_cap_clear(DMA_PRIVATE, device->cap_mask); in find_candidate()
685 chan = ERR_PTR(err); in find_candidate()
689 return chan ? chan : ERR_PTR(-EPROBE_DEFER); in find_candidate()
693 * dma_get_slave_channel - try to get specific channel exclusively
694 * @chan: target channel
696 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan) in dma_get_slave_channel() argument
698 int err = -EBUSY; in dma_get_slave_channel()
703 if (chan->client_count == 0) { in dma_get_slave_channel()
704 struct dma_device *device = chan->device; in dma_get_slave_channel()
706 dma_cap_set(DMA_PRIVATE, device->cap_mask); in dma_get_slave_channel()
707 device->privatecnt++; in dma_get_slave_channel()
708 err = dma_chan_get(chan); in dma_get_slave_channel()
710 dev_dbg(chan->device->dev, in dma_get_slave_channel()
712 __func__, dma_chan_name(chan), err); in dma_get_slave_channel()
713 chan = NULL; in dma_get_slave_channel()
714 if (--device->privatecnt == 0) in dma_get_slave_channel()
715 dma_cap_clear(DMA_PRIVATE, device->cap_mask); in dma_get_slave_channel()
718 chan = NULL; in dma_get_slave_channel()
723 return chan; in dma_get_slave_channel()
730 struct dma_chan *chan; in dma_get_any_slave_channel() local
738 chan = find_candidate(device, &mask, NULL, NULL); in dma_get_any_slave_channel()
742 return IS_ERR(chan) ? NULL : chan; in dma_get_any_slave_channel()
747 * __dma_request_channel - try to allocate an exclusive channel
760 struct dma_chan *chan = NULL; in __dma_request_channel() local
766 if (np && device->dev->of_node && np != device->dev->of_node) in __dma_request_channel()
769 chan = find_candidate(device, mask, fn, fn_param); in __dma_request_channel()
770 if (!IS_ERR(chan)) in __dma_request_channel()
773 chan = NULL; in __dma_request_channel()
779 chan ? "success" : "fail", in __dma_request_channel()
780 chan ? dma_chan_name(chan) : NULL); in __dma_request_channel()
782 return chan; in __dma_request_channel()
787 const char *name, in dma_filter_match() argument
792 if (!device->filter.mapcnt) in dma_filter_match()
795 for (i = 0; i < device->filter.mapcnt; i++) { in dma_filter_match()
796 const struct dma_slave_map *map = &device->filter.map[i]; in dma_filter_match()
798 if (!strcmp(map->devname, dev_name(dev)) && in dma_filter_match()
799 !strcmp(map->slave, name)) in dma_filter_match()
807 * dma_request_chan - try to allocate an exclusive slave channel
809 * @name: slave channel name
813 struct dma_chan *dma_request_chan(struct device *dev, const char *name) in dma_request_chan() argument
816 struct dma_chan *chan = NULL; in dma_request_chan() local
818 /* If device-tree is present get slave info from here */ in dma_request_chan()
819 if (dev->of_node) in dma_request_chan()
820 chan = of_dma_request_slave_channel(dev->of_node, name); in dma_request_chan()
823 if (has_acpi_companion(dev) && !chan) in dma_request_chan()
824 chan = acpi_dma_request_slave_chan_by_name(dev, name); in dma_request_chan()
826 if (PTR_ERR(chan) == -EPROBE_DEFER) in dma_request_chan()
827 return chan; in dma_request_chan()
829 if (!IS_ERR_OR_NULL(chan)) in dma_request_chan()
836 const struct dma_slave_map *map = dma_filter_match(d, name, dev); in dma_request_chan()
844 chan = find_candidate(d, &mask, d->filter.fn, map->param); in dma_request_chan()
845 if (!IS_ERR(chan)) in dma_request_chan()
850 if (IS_ERR(chan)) in dma_request_chan()
851 return chan; in dma_request_chan()
852 if (!chan) in dma_request_chan()
853 return ERR_PTR(-EPROBE_DEFER); in dma_request_chan()
857 chan->dbg_client_name = kasprintf(GFP_KERNEL, "%s:%s", dev_name(dev), in dma_request_chan()
858 name); in dma_request_chan()
861 chan->name = kasprintf(GFP_KERNEL, "dma:%s", name); in dma_request_chan()
862 if (!chan->name) in dma_request_chan()
863 return chan; in dma_request_chan()
864 chan->slave = dev; in dma_request_chan()
866 if (sysfs_create_link(&chan->dev->device.kobj, &dev->kobj, in dma_request_chan()
869 if (sysfs_create_link(&dev->kobj, &chan->dev->device.kobj, chan->name)) in dma_request_chan()
870 dev_warn(dev, "Cannot create DMA %s symlink\n", chan->name); in dma_request_chan()
872 return chan; in dma_request_chan()
877 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
884 struct dma_chan *chan; in dma_request_chan_by_mask() local
887 return ERR_PTR(-ENODEV); in dma_request_chan_by_mask()
889 chan = __dma_request_channel(mask, NULL, NULL, NULL); in dma_request_chan_by_mask()
890 if (!chan) { in dma_request_chan_by_mask()
893 chan = ERR_PTR(-EPROBE_DEFER); in dma_request_chan_by_mask()
895 chan = ERR_PTR(-ENODEV); in dma_request_chan_by_mask()
899 return chan; in dma_request_chan_by_mask()
903 void dma_release_channel(struct dma_chan *chan) in dma_release_channel() argument
906 WARN_ONCE(chan->client_count != 1, in dma_release_channel()
907 "chan reference count %d != 1\n", chan->client_count); in dma_release_channel()
908 dma_chan_put(chan); in dma_release_channel()
910 if (--chan->device->privatecnt == 0) in dma_release_channel()
911 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); in dma_release_channel()
913 if (chan->slave) { in dma_release_channel()
914 sysfs_remove_link(&chan->dev->device.kobj, DMA_SLAVE_NAME); in dma_release_channel()
915 sysfs_remove_link(&chan->slave->kobj, chan->name); in dma_release_channel()
916 kfree(chan->name); in dma_release_channel()
917 chan->name = NULL; in dma_release_channel()
918 chan->slave = NULL; in dma_release_channel()
922 kfree(chan->dbg_client_name); in dma_release_channel()
923 chan->dbg_client_name = NULL; in dma_release_channel()
930 * dmaengine_get - register interest in dma_channels
935 struct dma_chan *chan; in dmaengine_get() local
943 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dmaengine_get()
945 list_for_each_entry(chan, &device->channels, device_node) { in dmaengine_get()
946 err = dma_chan_get(chan); in dmaengine_get()
947 if (err == -ENODEV) { in dmaengine_get()
949 list_del_rcu(&device->global_node); in dmaengine_get()
952 dev_dbg(chan->device->dev, in dmaengine_get()
954 __func__, dma_chan_name(chan), err); in dmaengine_get()
969 * dmaengine_put - let DMA drivers be removed when ref_count == 0
974 struct dma_chan *chan; in dmaengine_put() local
977 dmaengine_ref_count--; in dmaengine_put()
981 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dmaengine_put()
983 list_for_each_entry(chan, &device->channels, device_node) in dmaengine_put()
984 dma_chan_put(chan); in dmaengine_put()
997 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) in device_has_all_tx_types()
1002 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) in device_has_all_tx_types()
1007 if (!dma_has_cap(DMA_XOR, device->cap_mask)) in device_has_all_tx_types()
1011 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) in device_has_all_tx_types()
1017 if (!dma_has_cap(DMA_PQ, device->cap_mask)) in device_has_all_tx_types()
1021 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) in device_has_all_tx_types()
1035 device->dev_id = rc; in get_dma_id()
1040 struct dma_chan *chan) in __dma_async_device_channel_register() argument
1044 chan->local = alloc_percpu(typeof(*chan->local)); in __dma_async_device_channel_register()
1045 if (!chan->local) in __dma_async_device_channel_register()
1046 return -ENOMEM; in __dma_async_device_channel_register()
1047 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); in __dma_async_device_channel_register()
1048 if (!chan->dev) { in __dma_async_device_channel_register()
1049 rc = -ENOMEM; in __dma_async_device_channel_register()
1057 mutex_lock(&device->chan_mutex); in __dma_async_device_channel_register()
1058 chan->chan_id = ida_alloc(&device->chan_ida, GFP_KERNEL); in __dma_async_device_channel_register()
1059 mutex_unlock(&device->chan_mutex); in __dma_async_device_channel_register()
1060 if (chan->chan_id < 0) { in __dma_async_device_channel_register()
1061 pr_err("%s: unable to alloc ida for chan: %d\n", in __dma_async_device_channel_register()
1062 __func__, chan->chan_id); in __dma_async_device_channel_register()
1063 rc = chan->chan_id; in __dma_async_device_channel_register()
1067 chan->dev->device.class = &dma_devclass; in __dma_async_device_channel_register()
1068 chan->dev->device.parent = device->dev; in __dma_async_device_channel_register()
1069 chan->dev->chan = chan; in __dma_async_device_channel_register()
1070 chan->dev->dev_id = device->dev_id; in __dma_async_device_channel_register()
1071 dev_set_name(&chan->dev->device, "dma%dchan%d", in __dma_async_device_channel_register()
1072 device->dev_id, chan->chan_id); in __dma_async_device_channel_register()
1073 rc = device_register(&chan->dev->device); in __dma_async_device_channel_register()
1076 chan->client_count = 0; in __dma_async_device_channel_register()
1077 device->chancnt++; in __dma_async_device_channel_register()
1082 mutex_lock(&device->chan_mutex); in __dma_async_device_channel_register()
1083 ida_free(&device->chan_ida, chan->chan_id); in __dma_async_device_channel_register()
1084 mutex_unlock(&device->chan_mutex); in __dma_async_device_channel_register()
1086 kfree(chan->dev); in __dma_async_device_channel_register()
1088 free_percpu(chan->local); in __dma_async_device_channel_register()
1089 chan->local = NULL; in __dma_async_device_channel_register()
1094 struct dma_chan *chan) in dma_async_device_channel_register() argument
1098 rc = __dma_async_device_channel_register(device, chan); in dma_async_device_channel_register()
1108 struct dma_chan *chan) in __dma_async_device_channel_unregister() argument
1110 WARN_ONCE(!device->device_release && chan->client_count, in __dma_async_device_channel_unregister()
1112 __func__, chan->client_count); in __dma_async_device_channel_unregister()
1114 device->chancnt--; in __dma_async_device_channel_unregister()
1115 chan->dev->chan = NULL; in __dma_async_device_channel_unregister()
1117 mutex_lock(&device->chan_mutex); in __dma_async_device_channel_unregister()
1118 ida_free(&device->chan_ida, chan->chan_id); in __dma_async_device_channel_unregister()
1119 mutex_unlock(&device->chan_mutex); in __dma_async_device_channel_unregister()
1120 device_unregister(&chan->dev->device); in __dma_async_device_channel_unregister()
1121 free_percpu(chan->local); in __dma_async_device_channel_unregister()
1125 struct dma_chan *chan) in dma_async_device_channel_unregister() argument
1127 __dma_async_device_channel_unregister(device, chan); in dma_async_device_channel_unregister()
1133 * dma_async_device_register - registers DMA devices found
1143 struct dma_chan* chan; in dma_async_device_register() local
1146 return -ENODEV; in dma_async_device_register()
1149 if (!device->dev) { in dma_async_device_register()
1151 return -EIO; in dma_async_device_register()
1154 device->owner = device->dev->driver->owner; in dma_async_device_register()
1156 if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { in dma_async_device_register()
1157 dev_err(device->dev, in dma_async_device_register()
1160 return -EIO; in dma_async_device_register()
1163 if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) { in dma_async_device_register()
1164 dev_err(device->dev, in dma_async_device_register()
1167 return -EIO; in dma_async_device_register()
1170 if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) { in dma_async_device_register()
1171 dev_err(device->dev, in dma_async_device_register()
1174 return -EIO; in dma_async_device_register()
1177 if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) { in dma_async_device_register()
1178 dev_err(device->dev, in dma_async_device_register()
1181 return -EIO; in dma_async_device_register()
1184 if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) { in dma_async_device_register()
1185 dev_err(device->dev, in dma_async_device_register()
1188 return -EIO; in dma_async_device_register()
1191 if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) { in dma_async_device_register()
1192 dev_err(device->dev, in dma_async_device_register()
1195 return -EIO; in dma_async_device_register()
1198 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) { in dma_async_device_register()
1199 dev_err(device->dev, in dma_async_device_register()
1202 return -EIO; in dma_async_device_register()
1205 if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) { in dma_async_device_register()
1206 dev_err(device->dev, in dma_async_device_register()
1209 return -EIO; in dma_async_device_register()
1212 if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) { in dma_async_device_register()
1213 dev_err(device->dev, in dma_async_device_register()
1216 return -EIO; in dma_async_device_register()
1220 if (!device->device_tx_status) { in dma_async_device_register()
1221 dev_err(device->dev, "Device tx_status is not defined\n"); in dma_async_device_register()
1222 return -EIO; in dma_async_device_register()
1226 if (!device->device_issue_pending) { in dma_async_device_register()
1227 dev_err(device->dev, "Device issue_pending is not defined\n"); in dma_async_device_register()
1228 return -EIO; in dma_async_device_register()
1231 if (!device->device_release) in dma_async_device_register()
1232 dev_dbg(device->dev, in dma_async_device_register()
1235 kref_init(&device->ref); in dma_async_device_register()
1241 dma_cap_set(DMA_ASYNC_TX, device->cap_mask); in dma_async_device_register()
1247 mutex_init(&device->chan_mutex); in dma_async_device_register()
1248 ida_init(&device->chan_ida); in dma_async_device_register()
1251 list_for_each_entry(chan, &device->channels, device_node) { in dma_async_device_register()
1252 rc = __dma_async_device_channel_register(device, chan); in dma_async_device_register()
1259 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dma_async_device_register()
1260 list_for_each_entry(chan, &device->channels, device_node) { in dma_async_device_register()
1264 if (dma_chan_get(chan) == -ENODEV) { in dma_async_device_register()
1269 rc = -ENODEV; in dma_async_device_register()
1274 list_add_tail_rcu(&device->global_node, &dma_device_list); in dma_async_device_register()
1275 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dma_async_device_register()
1276 device->privatecnt++; /* Always private */ in dma_async_device_register()
1286 if (!device->chancnt) { in dma_async_device_register()
1287 ida_free(&dma_ida, device->dev_id); in dma_async_device_register()
1291 list_for_each_entry(chan, &device->channels, device_node) { in dma_async_device_register()
1292 if (chan->local == NULL) in dma_async_device_register()
1295 chan->dev->chan = NULL; in dma_async_device_register()
1297 device_unregister(&chan->dev->device); in dma_async_device_register()
1298 free_percpu(chan->local); in dma_async_device_register()
1305 * dma_async_device_unregister - unregister a DMA device
1313 struct dma_chan *chan, *n; in dma_async_device_unregister() local
1317 list_for_each_entry_safe(chan, n, &device->channels, device_node) in dma_async_device_unregister()
1318 __dma_async_device_channel_unregister(device, chan); in dma_async_device_unregister()
1325 dma_cap_set(DMA_PRIVATE, device->cap_mask); in dma_async_device_unregister()
1327 ida_free(&dma_ida, device->dev_id); in dma_async_device_unregister()
1342 * dmaenginem_async_device_register - registers DMA devices found
1354 return -ENOMEM; in dmaenginem_async_device_register()
1359 devres_add(device->dev, p); in dmaenginem_async_device_register()
1370 const char *name; member
1375 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1409 struct device *dev = unmap->dev; in dmaengine_unmap()
1412 cnt = unmap->to_cnt; in dmaengine_unmap()
1414 dma_unmap_page(dev, unmap->addr[i], unmap->len, in dmaengine_unmap()
1416 cnt += unmap->from_cnt; in dmaengine_unmap()
1418 dma_unmap_page(dev, unmap->addr[i], unmap->len, in dmaengine_unmap()
1420 cnt += unmap->bidi_cnt; in dmaengine_unmap()
1422 if (unmap->addr[i] == 0) in dmaengine_unmap()
1424 dma_unmap_page(dev, unmap->addr[i], unmap->len, in dmaengine_unmap()
1427 cnt = unmap->map_cnt; in dmaengine_unmap()
1428 mempool_free(unmap, __get_unmap_pool(cnt)->pool); in dmaengine_unmap()
1434 kref_put(&unmap->kref, dmaengine_unmap); in dmaengine_unmap_put()
1445 mempool_destroy(p->pool); in dmaengine_destroy_unmap_pool()
1446 p->pool = NULL; in dmaengine_destroy_unmap_pool()
1447 kmem_cache_destroy(p->cache); in dmaengine_destroy_unmap_pool()
1448 p->cache = NULL; in dmaengine_destroy_unmap_pool()
1461 sizeof(dma_addr_t) * p->size; in dmaengine_init_unmap_pool()
1463 p->cache = kmem_cache_create(p->name, size, 0, in dmaengine_init_unmap_pool()
1465 if (!p->cache) in dmaengine_init_unmap_pool()
1467 p->pool = mempool_create_slab_pool(1, p->cache); in dmaengine_init_unmap_pool()
1468 if (!p->pool) in dmaengine_init_unmap_pool()
1476 return -ENOMEM; in dmaengine_init_unmap_pool()
1484 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); in dmaengine_get_unmap_data()
1489 kref_init(&unmap->kref); in dmaengine_get_unmap_data()
1490 unmap->dev = dev; in dmaengine_get_unmap_data()
1491 unmap->map_cnt = nr; in dmaengine_get_unmap_data()
1498 struct dma_chan *chan) in dma_async_tx_descriptor_init() argument
1500 tx->chan = chan; in dma_async_tx_descriptor_init()
1502 spin_lock_init(&tx->lock); in dma_async_tx_descriptor_init()
1511 if (!desc->desc_metadata_mode) { in desc_check_and_set_metadata_mode()
1512 if (dmaengine_is_metadata_mode_supported(desc->chan, mode)) in desc_check_and_set_metadata_mode()
1513 desc->desc_metadata_mode = mode; in desc_check_and_set_metadata_mode()
1515 return -ENOTSUPP; in desc_check_and_set_metadata_mode()
1516 } else if (desc->desc_metadata_mode != mode) { in desc_check_and_set_metadata_mode()
1517 return -EINVAL; in desc_check_and_set_metadata_mode()
1529 return -EINVAL; in dmaengine_desc_attach_metadata()
1535 if (!desc->metadata_ops || !desc->metadata_ops->attach) in dmaengine_desc_attach_metadata()
1536 return -ENOTSUPP; in dmaengine_desc_attach_metadata()
1538 return desc->metadata_ops->attach(desc, data, len); in dmaengine_desc_attach_metadata()
1548 return ERR_PTR(-EINVAL); in dmaengine_desc_get_metadata_ptr()
1554 if (!desc->metadata_ops || !desc->metadata_ops->get_ptr) in dmaengine_desc_get_metadata_ptr()
1555 return ERR_PTR(-ENOTSUPP); in dmaengine_desc_get_metadata_ptr()
1557 return desc->metadata_ops->get_ptr(desc, payload_len, max_len); in dmaengine_desc_get_metadata_ptr()
1567 return -EINVAL; in dmaengine_desc_set_metadata_len()
1573 if (!desc->metadata_ops || !desc->metadata_ops->set_len) in dmaengine_desc_set_metadata_len()
1574 return -ENOTSUPP; in dmaengine_desc_set_metadata_len()
1576 return desc->metadata_ops->set_len(desc, payload_len); in dmaengine_desc_set_metadata_len()
1581 * dma_wait_for_async_tx - spin wait for a transaction to complete
1582 * @tx: in-flight transaction to wait on
1592 while (tx->cookie == -EBUSY) { in dma_wait_for_async_tx()
1594 dev_err(tx->chan->device->dev, in dma_wait_for_async_tx()
1601 return dma_sync_wait(tx->chan, tx->cookie); in dma_wait_for_async_tx()
1606 * dma_run_dependencies - process dependent operations on the target channel
1616 struct dma_chan *chan; in dma_run_dependencies() local
1621 /* we'll submit tx->next now, so clear the link */ in dma_run_dependencies()
1623 chan = dep->chan; in dma_run_dependencies()
1633 if (dep_next && dep_next->chan == chan) in dma_run_dependencies()
1634 txd_clear_next(dep); /* ->next will be submitted */ in dma_run_dependencies()
1639 dep->tx_submit(dep); in dma_run_dependencies()
1642 chan->device->device_issue_pending(chan); in dma_run_dependencies()