Lines Matching +full:map +full:- +full:to +full:- +full:dma +full:- +full:channel
2 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
19 * This code implements the DMA subsystem. It provides a HW-neutral interface
20 * for other kernel code to use asynchronous memory copy capabilities,
21 * if present, and allows different HW DMA drivers to register as providing
24 * Due to the fact we are accelerating what is already a relatively fast
25 * operation, the code goes to great lengths to avoid additional overhead,
33 * A subsystem can get access to a channel by calling dmaengine_get() followed
34 * by dma_find_channel(), or if it has need for an exclusive channel it can call
35 * dma_request_channel(). Once a channel is allocated a reference is taken
36 * against its corresponding driver to disable removal.
41 * See Documentation/driver-api/dmaengine for more details
47 #include <linux/dma-mapping.h>
72 /* --- sysfs implementation --- */
75 * dev_to_dma_chan - convert a device pointer to the its sysfs container object
76 * @dev - device node
85 return chan_dev->chan; in dev_to_dma_chan()
100 count += per_cpu_ptr(chan->local, i)->memcpy_count; in memcpy_count_show()
103 err = -ENODEV; in memcpy_count_show()
122 count += per_cpu_ptr(chan->local, i)->bytes_transferred; in bytes_transferred_show()
125 err = -ENODEV; in bytes_transferred_show()
141 err = sprintf(buf, "%d\n", chan->client_count); in in_use_show()
143 err = -ENODEV; in in_use_show()
163 if (atomic_dec_and_test(chan_dev->idr_ref)) { in chan_dev_release()
164 ida_free(&dma_ida, chan_dev->dev_id); in chan_dev_release()
165 kfree(chan_dev->idr_ref); in chan_dev_release()
171 .name = "dma",
176 /* --- client and device registration --- */
186 bitmap_and(has.bits, want->bits, device->cap_mask.bits, in __dma_device_satisfies_mask()
188 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END); in __dma_device_satisfies_mask()
193 return chan->device->owner; in dma_chan_to_owner()
197 * balance_ref_count - catch up the channel reference count
198 * @chan - channel to balance ->client_count versus dmaengine_ref_count
206 while (chan->client_count < dmaengine_ref_count) { in balance_ref_count()
208 chan->client_count++; in balance_ref_count()
213 * dma_chan_get - try to grab a dma channel's parent driver module
214 * @chan - channel to grab
223 /* The channel is already in use, update client count */ in dma_chan_get()
224 if (chan->client_count) { in dma_chan_get()
230 return -ENODEV; in dma_chan_get()
233 if (chan->device->device_alloc_chan_resources) { in dma_chan_get()
234 ret = chan->device->device_alloc_chan_resources(chan); in dma_chan_get()
239 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) in dma_chan_get()
243 chan->client_count++; in dma_chan_get()
252 * dma_chan_put - drop a reference to a dma channel's parent driver module
253 * @chan - channel to release
259 /* This channel is not in use, bail out */ in dma_chan_put()
260 if (!chan->client_count) in dma_chan_put()
263 chan->client_count--; in dma_chan_put()
266 /* This channel is not in use anymore, free it */ in dma_chan_put()
267 if (!chan->client_count && chan->device->device_free_chan_resources) { in dma_chan_put()
270 chan->device->device_free_chan_resources(chan); in dma_chan_put()
273 /* If the channel is used via a DMA request router, free the mapping */ in dma_chan_put()
274 if (chan->router && chan->router->route_free) { in dma_chan_put()
275 chan->router->route_free(chan->router->dev, chan->route_data); in dma_chan_put()
276 chan->router = NULL; in dma_chan_put()
277 chan->route_data = NULL; in dma_chan_put()
290 dev_err(chan->device->dev, "%s: timeout!\n", __func__); in dma_sync_wait()
303 * dma_cap_mask_all - enable iteration over all operation types
308 * dma_chan_tbl_ent - tracks channel allocations per core/operation
309 * @chan - associated channel for this entry
316 * channel_table - percpu lookup table for memory-to-memory offload providers
327 /* 'interrupt', 'private', and 'slave' are channel capabilities, in dma_channel_table_init()
338 err = -ENOMEM; in dma_channel_table_init()
354 * dma_find_channel - find a channel to carry out the operation
359 return this_cpu_read(channel_table[tx_type]->chan); in dma_find_channel()
364 * dma_issue_pending_all - flush all pending operations across all channels
373 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dma_issue_pending_all()
375 list_for_each_entry(chan, &device->channels, device_node) in dma_issue_pending_all()
376 if (chan->client_count) in dma_issue_pending_all()
377 device->device_issue_pending(chan); in dma_issue_pending_all()
384 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
388 int node = dev_to_node(chan->device->dev); in dma_chan_is_local()
389 return node == -1 || cpumask_test_cpu(cpu, cpumask_of_node(node)); in dma_chan_is_local()
393 * min_chan - returns the channel with min count and in the same numa-node as the cpu
394 * @cap: capability to match
395 * @cpu: cpu index which the channel should be close to
397 * If some channels are close to the given cpu, the one with the lowest
410 if (!dma_has_cap(cap, device->cap_mask) || in min_chan()
411 dma_has_cap(DMA_PRIVATE, device->cap_mask)) in min_chan()
413 list_for_each_entry(chan, &device->channels, device_node) { in min_chan()
414 if (!chan->client_count) in min_chan()
416 if (!min || chan->table_count < min->table_count) in min_chan()
421 chan->table_count < localmin->table_count) in min_chan()
429 chan->table_count++; in min_chan()
435 * dma_channel_rebalance - redistribute the available channels
437 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
439 * multi-tasking channels) in the non-SMP case. Must be called under
452 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL; in dma_channel_rebalance()
455 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dma_channel_rebalance()
457 list_for_each_entry(chan, &device->channels, device_node) in dma_channel_rebalance()
458 chan->table_count = 0; in dma_channel_rebalance()
469 per_cpu_ptr(channel_table[cap], cpu)->chan = chan; in dma_channel_rebalance()
478 return -EINVAL; in dma_get_slave_caps()
480 device = chan->device; in dma_get_slave_caps()
482 /* check if the channel supports slave transactions */ in dma_get_slave_caps()
483 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) || in dma_get_slave_caps()
484 test_bit(DMA_CYCLIC, device->cap_mask.bits))) in dma_get_slave_caps()
485 return -ENXIO; in dma_get_slave_caps()
492 if (!device->directions) in dma_get_slave_caps()
493 return -ENXIO; in dma_get_slave_caps()
495 caps->src_addr_widths = device->src_addr_widths; in dma_get_slave_caps()
496 caps->dst_addr_widths = device->dst_addr_widths; in dma_get_slave_caps()
497 caps->directions = device->directions; in dma_get_slave_caps()
498 caps->max_burst = device->max_burst; in dma_get_slave_caps()
499 caps->residue_granularity = device->residue_granularity; in dma_get_slave_caps()
500 caps->descriptor_reuse = device->descriptor_reuse; in dma_get_slave_caps()
501 caps->cmd_pause = !!device->device_pause; in dma_get_slave_caps()
502 caps->cmd_resume = !!device->device_resume; in dma_get_slave_caps()
503 caps->cmd_terminate = !!device->device_terminate_all; in dma_get_slave_caps()
516 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__); in private_candidate()
519 /* devices with multiple channels need special handling as we need to in private_candidate()
522 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask)) in private_candidate()
523 list_for_each_entry(chan, &dev->channels, device_node) { in private_candidate()
525 if (chan->client_count) in private_candidate()
529 list_for_each_entry(chan, &dev->channels, device_node) { in private_candidate()
530 if (chan->client_count) { in private_candidate()
531 dev_dbg(dev->dev, "%s: %s busy\n", in private_candidate()
536 dev_dbg(dev->dev, "%s: %s filter said false\n", in private_candidate()
554 /* Found a suitable channel, try to grab, prep, and return it. in find_candidate()
555 * We first set DMA_PRIVATE to disable balance_ref_count as this in find_candidate()
556 * channel will not be published in the general-purpose in find_candidate()
559 dma_cap_set(DMA_PRIVATE, device->cap_mask); in find_candidate()
560 device->privatecnt++; in find_candidate()
564 if (err == -ENODEV) { in find_candidate()
565 dev_dbg(device->dev, "%s: %s module removed\n", in find_candidate()
567 list_del_rcu(&device->global_node); in find_candidate()
569 dev_dbg(device->dev, in find_candidate()
570 "%s: failed to get %s: (%d)\n", in find_candidate()
573 if (--device->privatecnt == 0) in find_candidate()
574 dma_cap_clear(DMA_PRIVATE, device->cap_mask); in find_candidate()
580 return chan ? chan : ERR_PTR(-EPROBE_DEFER); in find_candidate()
584 * dma_get_slave_channel - try to get specific channel exclusively
585 * @chan: target channel
589 int err = -EBUSY; in dma_get_slave_channel()
594 if (chan->client_count == 0) { in dma_get_slave_channel()
595 struct dma_device *device = chan->device; in dma_get_slave_channel()
597 dma_cap_set(DMA_PRIVATE, device->cap_mask); in dma_get_slave_channel()
598 device->privatecnt++; in dma_get_slave_channel()
601 dev_dbg(chan->device->dev, in dma_get_slave_channel()
602 "%s: failed to get %s: (%d)\n", in dma_get_slave_channel()
605 if (--device->privatecnt == 0) in dma_get_slave_channel()
606 dma_cap_clear(DMA_PRIVATE, device->cap_mask); in dma_get_slave_channel()
638 * __dma_request_channel - try to allocate an exclusive channel
639 * @mask: capabilities that the channel must satisfy
640 * @fn: optional callback to disposition available channels
641 * @fn_param: opaque parameter to pass to dma_filter_fn
643 * Returns pointer to appropriate DMA channel on success or NULL.
651 /* Find a channel */ in __dma_request_channel()
677 if (!device->filter.mapcnt) in dma_filter_match()
680 for (i = 0; i < device->filter.mapcnt; i++) { in dma_filter_match()
681 const struct dma_slave_map *map = &device->filter.map[i]; in dma_filter_match() local
683 if (!strcmp(map->devname, dev_name(dev)) && in dma_filter_match()
684 !strcmp(map->slave, name)) in dma_filter_match()
685 return map; in dma_filter_match()
692 * dma_request_chan - try to allocate an exclusive slave channel
693 * @dev: pointer to client device structure
694 * @name: slave channel name
696 * Returns pointer to appropriate DMA channel on success or an error pointer.
703 /* If device-tree is present get slave info from here */ in dma_request_chan()
704 if (dev->of_node) in dma_request_chan()
705 chan = of_dma_request_slave_channel(dev->of_node, name); in dma_request_chan()
712 /* Valid channel found or requester need to be deferred */ in dma_request_chan()
713 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER) in dma_request_chan()
717 /* Try to find the channel via the DMA filter map(s) */ in dma_request_chan()
721 const struct dma_slave_map *map = dma_filter_match(d, name, dev); in dma_request_chan() local
723 if (!map) in dma_request_chan()
729 chan = find_candidate(d, &mask, d->filter.fn, map->param); in dma_request_chan()
735 return chan ? chan : ERR_PTR(-EPROBE_DEFER); in dma_request_chan()
740 * dma_request_slave_channel - try to allocate an exclusive slave channel
741 * @dev: pointer to client device structure
742 * @name: slave channel name
744 * Returns pointer to appropriate DMA channel on success or NULL.
758 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
759 * @mask: capabilities that the channel must satisfy
761 * Returns pointer to appropriate DMA channel on success or an error pointer.
768 return ERR_PTR(-ENODEV); in dma_request_chan_by_mask()
774 chan = ERR_PTR(-EPROBE_DEFER); in dma_request_chan_by_mask()
776 chan = ERR_PTR(-ENODEV); in dma_request_chan_by_mask()
787 WARN_ONCE(chan->client_count != 1, in dma_release_channel()
788 "chan reference count %d != 1\n", chan->client_count); in dma_release_channel()
791 if (--chan->device->privatecnt == 0) in dma_release_channel()
792 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask); in dma_release_channel()
798 * dmaengine_get - register interest in dma_channels
809 /* try to grab channels */ in dmaengine_get()
811 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dmaengine_get()
813 list_for_each_entry(chan, &device->channels, device_node) { in dmaengine_get()
815 if (err == -ENODEV) { in dmaengine_get()
817 list_del_rcu(&device->global_node); in dmaengine_get()
820 dev_dbg(chan->device->dev, in dmaengine_get()
821 "%s: failed to get %s: (%d)\n", in dmaengine_get()
827 * waiting we need to rebalance to get those channels in dmaengine_get()
828 * incorporated into the channel table in dmaengine_get()
837 * dmaengine_put - let dma drivers be removed when ref_count == 0
845 dmaengine_ref_count--; in dmaengine_put()
847 /* drop channel references */ in dmaengine_put()
849 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dmaengine_put()
851 list_for_each_entry(chan, &device->channels, device_node) in dmaengine_put()
861 * an async_tx channel switch event as all possible operation types can in device_has_all_tx_types()
865 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask)) in device_has_all_tx_types()
870 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask)) in device_has_all_tx_types()
875 if (!dma_has_cap(DMA_XOR, device->cap_mask)) in device_has_all_tx_types()
879 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask)) in device_has_all_tx_types()
885 if (!dma_has_cap(DMA_PQ, device->cap_mask)) in device_has_all_tx_types()
889 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask)) in device_has_all_tx_types()
903 device->dev_id = rc; in get_dma_id()
908 * dma_async_device_register - registers DMA devices found
918 return -ENODEV; in dma_async_device_register()
921 if (!device->dev) { in dma_async_device_register()
923 return -EIO; in dma_async_device_register()
926 device->owner = device->dev->driver->owner; in dma_async_device_register()
928 if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) { in dma_async_device_register()
929 dev_err(device->dev, in dma_async_device_register()
932 return -EIO; in dma_async_device_register()
935 if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) { in dma_async_device_register()
936 dev_err(device->dev, in dma_async_device_register()
939 return -EIO; in dma_async_device_register()
942 if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) { in dma_async_device_register()
943 dev_err(device->dev, in dma_async_device_register()
946 return -EIO; in dma_async_device_register()
949 if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) { in dma_async_device_register()
950 dev_err(device->dev, in dma_async_device_register()
953 return -EIO; in dma_async_device_register()
956 if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) { in dma_async_device_register()
957 dev_err(device->dev, in dma_async_device_register()
960 return -EIO; in dma_async_device_register()
963 if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) { in dma_async_device_register()
964 dev_err(device->dev, in dma_async_device_register()
967 return -EIO; in dma_async_device_register()
970 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) { in dma_async_device_register()
971 dev_err(device->dev, in dma_async_device_register()
974 return -EIO; in dma_async_device_register()
977 if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) { in dma_async_device_register()
978 dev_err(device->dev, in dma_async_device_register()
981 return -EIO; in dma_async_device_register()
984 if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) { in dma_async_device_register()
985 dev_err(device->dev, in dma_async_device_register()
988 return -EIO; in dma_async_device_register()
992 if (!device->device_tx_status) { in dma_async_device_register()
993 dev_err(device->dev, "Device tx_status is not defined\n"); in dma_async_device_register()
994 return -EIO; in dma_async_device_register()
998 if (!device->device_issue_pending) { in dma_async_device_register()
999 dev_err(device->dev, "Device issue_pending is not defined\n"); in dma_async_device_register()
1000 return -EIO; in dma_async_device_register()
1007 dma_cap_set(DMA_ASYNC_TX, device->cap_mask); in dma_async_device_register()
1011 return -ENOMEM; in dma_async_device_register()
1021 list_for_each_entry(chan, &device->channels, device_node) { in dma_async_device_register()
1022 rc = -ENOMEM; in dma_async_device_register()
1023 chan->local = alloc_percpu(typeof(*chan->local)); in dma_async_device_register()
1024 if (chan->local == NULL) in dma_async_device_register()
1026 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL); in dma_async_device_register()
1027 if (chan->dev == NULL) { in dma_async_device_register()
1028 free_percpu(chan->local); in dma_async_device_register()
1029 chan->local = NULL; in dma_async_device_register()
1033 chan->chan_id = chancnt++; in dma_async_device_register()
1034 chan->dev->device.class = &dma_devclass; in dma_async_device_register()
1035 chan->dev->device.parent = device->dev; in dma_async_device_register()
1036 chan->dev->chan = chan; in dma_async_device_register()
1037 chan->dev->idr_ref = idr_ref; in dma_async_device_register()
1038 chan->dev->dev_id = device->dev_id; in dma_async_device_register()
1040 dev_set_name(&chan->dev->device, "dma%dchan%d", in dma_async_device_register()
1041 device->dev_id, chan->chan_id); in dma_async_device_register()
1043 rc = device_register(&chan->dev->device); in dma_async_device_register()
1045 free_percpu(chan->local); in dma_async_device_register()
1046 chan->local = NULL; in dma_async_device_register()
1047 kfree(chan->dev); in dma_async_device_register()
1051 chan->client_count = 0; in dma_async_device_register()
1055 dev_err(device->dev, "%s: device has no channels!\n", __func__); in dma_async_device_register()
1056 rc = -ENODEV; in dma_async_device_register()
1060 device->chancnt = chancnt; in dma_async_device_register()
1064 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dma_async_device_register()
1065 list_for_each_entry(chan, &device->channels, device_node) { in dma_async_device_register()
1067 * to take references on their behalf in dma_async_device_register()
1069 if (dma_chan_get(chan) == -ENODEV) { in dma_async_device_register()
1071 * channel as the remaining channels are in dma_async_device_register()
1072 * guaranteed to get a reference in dma_async_device_register()
1074 rc = -ENODEV; in dma_async_device_register()
1079 list_add_tail_rcu(&device->global_node, &dma_device_list); in dma_async_device_register()
1080 if (dma_has_cap(DMA_PRIVATE, device->cap_mask)) in dma_async_device_register()
1081 device->privatecnt++; /* Always private */ in dma_async_device_register()
1088 /* if we never registered a channel just release the idr */ in dma_async_device_register()
1090 ida_free(&dma_ida, device->dev_id); in dma_async_device_register()
1095 list_for_each_entry(chan, &device->channels, device_node) { in dma_async_device_register()
1096 if (chan->local == NULL) in dma_async_device_register()
1099 chan->dev->chan = NULL; in dma_async_device_register()
1101 device_unregister(&chan->dev->device); in dma_async_device_register()
1102 free_percpu(chan->local); in dma_async_device_register()
1109 * dma_async_device_unregister - unregister a DMA device
1112 * This routine is called by dma driver exit routines, dmaengine holds module
1113 * references to prevent it being called while channels are in use.
1120 list_del_rcu(&device->global_node); in dma_async_device_unregister()
1124 list_for_each_entry(chan, &device->channels, device_node) { in dma_async_device_unregister()
1125 WARN_ONCE(chan->client_count, in dma_async_device_unregister()
1127 __func__, chan->client_count); in dma_async_device_unregister()
1129 chan->dev->chan = NULL; in dma_async_device_unregister()
1131 device_unregister(&chan->dev->device); in dma_async_device_unregister()
1132 free_percpu(chan->local); in dma_async_device_unregister()
1146 * dmaenginem_async_device_register - registers DMA devices found
1158 return -ENOMEM; in dmaenginem_async_device_register()
1163 devres_add(device->dev, p); in dmaenginem_async_device_register()
1179 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1213 struct device *dev = unmap->dev; in dmaengine_unmap()
1216 cnt = unmap->to_cnt; in dmaengine_unmap()
1218 dma_unmap_page(dev, unmap->addr[i], unmap->len, in dmaengine_unmap()
1220 cnt += unmap->from_cnt; in dmaengine_unmap()
1222 dma_unmap_page(dev, unmap->addr[i], unmap->len, in dmaengine_unmap()
1224 cnt += unmap->bidi_cnt; in dmaengine_unmap()
1226 if (unmap->addr[i] == 0) in dmaengine_unmap()
1228 dma_unmap_page(dev, unmap->addr[i], unmap->len, in dmaengine_unmap()
1231 cnt = unmap->map_cnt; in dmaengine_unmap()
1232 mempool_free(unmap, __get_unmap_pool(cnt)->pool); in dmaengine_unmap()
1238 kref_put(&unmap->kref, dmaengine_unmap); in dmaengine_unmap_put()
1249 mempool_destroy(p->pool); in dmaengine_destroy_unmap_pool()
1250 p->pool = NULL; in dmaengine_destroy_unmap_pool()
1251 kmem_cache_destroy(p->cache); in dmaengine_destroy_unmap_pool()
1252 p->cache = NULL; in dmaengine_destroy_unmap_pool()
1265 sizeof(dma_addr_t) * p->size; in dmaengine_init_unmap_pool()
1267 p->cache = kmem_cache_create(p->name, size, 0, in dmaengine_init_unmap_pool()
1269 if (!p->cache) in dmaengine_init_unmap_pool()
1271 p->pool = mempool_create_slab_pool(1, p->cache); in dmaengine_init_unmap_pool()
1272 if (!p->pool) in dmaengine_init_unmap_pool()
1280 return -ENOMEM; in dmaengine_init_unmap_pool()
1288 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags); in dmaengine_get_unmap_data()
1293 kref_init(&unmap->kref); in dmaengine_get_unmap_data()
1294 unmap->dev = dev; in dmaengine_get_unmap_data()
1295 unmap->map_cnt = nr; in dmaengine_get_unmap_data()
1304 tx->chan = chan; in dma_async_tx_descriptor_init()
1306 spin_lock_init(&tx->lock); in dma_async_tx_descriptor_init()
1311 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1312 * @tx: in-flight transaction to wait on
1322 while (tx->cookie == -EBUSY) { in dma_wait_for_async_tx()
1324 dev_err(tx->chan->device->dev, in dma_wait_for_async_tx()
1331 return dma_sync_wait(tx->chan, tx->cookie); in dma_wait_for_async_tx()
1335 /* dma_run_dependencies - helper routine for dma drivers to process
1336 * (start) dependent operations on their target channel
1348 /* we'll submit tx->next now, so clear the link */ in dma_run_dependencies()
1350 chan = dep->chan; in dma_run_dependencies()
1352 /* keep submitting up until a channel switch is detected in dma_run_dependencies()
1360 if (dep_next && dep_next->chan == chan) in dma_run_dependencies()
1361 txd_clear_next(dep); /* ->next will be submitted */ in dma_run_dependencies()
1366 dep->tx_submit(dep); in dma_run_dependencies()
1369 chan->device->device_issue_pending(chan); in dma_run_dependencies()