1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved.
4 */
5
6 /*
7 * This code implements the DMA subsystem. It provides a HW-neutral interface
8 * for other kernel code to use asynchronous memory copy capabilities,
9 * if present, and allows different HW DMA drivers to register as providing
10 * this capability.
11 *
12 * Due to the fact we are accelerating what is already a relatively fast
13 * operation, the code goes to great lengths to avoid additional overhead,
14 * such as locking.
15 *
16 * LOCKING:
17 *
18 * The subsystem keeps a global list of dma_device structs it is protected by a
19 * mutex, dma_list_mutex.
20 *
21 * A subsystem can get access to a channel by calling dmaengine_get() followed
22 * by dma_find_channel(), or if it has need for an exclusive channel it can call
23 * dma_request_channel(). Once a channel is allocated a reference is taken
24 * against its corresponding driver to disable removal.
25 *
26 * Each device has a channels list, which runs unlocked but is never modified
27 * once the device is registered, it's just setup by the driver.
28 *
29 * See Documentation/driver-api/dmaengine for more details
30 */
31
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
33
34 #include <linux/platform_device.h>
35 #include <linux/dma-mapping.h>
36 #include <linux/init.h>
37 #include <linux/module.h>
38 #include <linux/mm.h>
39 #include <linux/device.h>
40 #include <linux/dmaengine.h>
41 #include <linux/hardirq.h>
42 #include <linux/spinlock.h>
43 #include <linux/percpu.h>
44 #include <linux/rcupdate.h>
45 #include <linux/mutex.h>
46 #include <linux/jiffies.h>
47 #include <linux/rculist.h>
48 #include <linux/idr.h>
49 #include <linux/slab.h>
50 #include <linux/acpi.h>
51 #include <linux/acpi_dma.h>
52 #include <linux/of_dma.h>
53 #include <linux/mempool.h>
54 #include <linux/numa.h>
55
56 static DEFINE_MUTEX(dma_list_mutex);
57 static DEFINE_IDA(dma_ida);
58 static LIST_HEAD(dma_device_list);
59 static long dmaengine_ref_count;
60
61 /* --- sysfs implementation --- */
62
63 /**
64 * dev_to_dma_chan - convert a device pointer to its sysfs container object
65 * @dev - device node
66 *
67 * Must be called under dma_list_mutex
68 */
dev_to_dma_chan(struct device * dev)69 static struct dma_chan *dev_to_dma_chan(struct device *dev)
70 {
71 struct dma_chan_dev *chan_dev;
72
73 chan_dev = container_of(dev, typeof(*chan_dev), device);
74 return chan_dev->chan;
75 }
76
memcpy_count_show(struct device * dev,struct device_attribute * attr,char * buf)77 static ssize_t memcpy_count_show(struct device *dev,
78 struct device_attribute *attr, char *buf)
79 {
80 struct dma_chan *chan;
81 unsigned long count = 0;
82 int i;
83 int err;
84
85 mutex_lock(&dma_list_mutex);
86 chan = dev_to_dma_chan(dev);
87 if (chan) {
88 for_each_possible_cpu(i)
89 count += per_cpu_ptr(chan->local, i)->memcpy_count;
90 err = sprintf(buf, "%lu\n", count);
91 } else
92 err = -ENODEV;
93 mutex_unlock(&dma_list_mutex);
94
95 return err;
96 }
97 static DEVICE_ATTR_RO(memcpy_count);
98
bytes_transferred_show(struct device * dev,struct device_attribute * attr,char * buf)99 static ssize_t bytes_transferred_show(struct device *dev,
100 struct device_attribute *attr, char *buf)
101 {
102 struct dma_chan *chan;
103 unsigned long count = 0;
104 int i;
105 int err;
106
107 mutex_lock(&dma_list_mutex);
108 chan = dev_to_dma_chan(dev);
109 if (chan) {
110 for_each_possible_cpu(i)
111 count += per_cpu_ptr(chan->local, i)->bytes_transferred;
112 err = sprintf(buf, "%lu\n", count);
113 } else
114 err = -ENODEV;
115 mutex_unlock(&dma_list_mutex);
116
117 return err;
118 }
119 static DEVICE_ATTR_RO(bytes_transferred);
120
in_use_show(struct device * dev,struct device_attribute * attr,char * buf)121 static ssize_t in_use_show(struct device *dev, struct device_attribute *attr,
122 char *buf)
123 {
124 struct dma_chan *chan;
125 int err;
126
127 mutex_lock(&dma_list_mutex);
128 chan = dev_to_dma_chan(dev);
129 if (chan)
130 err = sprintf(buf, "%d\n", chan->client_count);
131 else
132 err = -ENODEV;
133 mutex_unlock(&dma_list_mutex);
134
135 return err;
136 }
137 static DEVICE_ATTR_RO(in_use);
138
139 static struct attribute *dma_dev_attrs[] = {
140 &dev_attr_memcpy_count.attr,
141 &dev_attr_bytes_transferred.attr,
142 &dev_attr_in_use.attr,
143 NULL,
144 };
145 ATTRIBUTE_GROUPS(dma_dev);
146
chan_dev_release(struct device * dev)147 static void chan_dev_release(struct device *dev)
148 {
149 struct dma_chan_dev *chan_dev;
150
151 chan_dev = container_of(dev, typeof(*chan_dev), device);
152 if (atomic_dec_and_test(chan_dev->idr_ref)) {
153 ida_free(&dma_ida, chan_dev->dev_id);
154 kfree(chan_dev->idr_ref);
155 }
156 kfree(chan_dev);
157 }
158
159 static struct class dma_devclass = {
160 .name = "dma",
161 .dev_groups = dma_dev_groups,
162 .dev_release = chan_dev_release,
163 };
164
165 /* --- client and device registration --- */
166
167 #define dma_device_satisfies_mask(device, mask) \
168 __dma_device_satisfies_mask((device), &(mask))
169 static int
__dma_device_satisfies_mask(struct dma_device * device,const dma_cap_mask_t * want)170 __dma_device_satisfies_mask(struct dma_device *device,
171 const dma_cap_mask_t *want)
172 {
173 dma_cap_mask_t has;
174
175 bitmap_and(has.bits, want->bits, device->cap_mask.bits,
176 DMA_TX_TYPE_END);
177 return bitmap_equal(want->bits, has.bits, DMA_TX_TYPE_END);
178 }
179
dma_chan_to_owner(struct dma_chan * chan)180 static struct module *dma_chan_to_owner(struct dma_chan *chan)
181 {
182 return chan->device->owner;
183 }
184
185 /**
186 * balance_ref_count - catch up the channel reference count
187 * @chan - channel to balance ->client_count versus dmaengine_ref_count
188 *
189 * balance_ref_count must be called under dma_list_mutex
190 */
balance_ref_count(struct dma_chan * chan)191 static void balance_ref_count(struct dma_chan *chan)
192 {
193 struct module *owner = dma_chan_to_owner(chan);
194
195 while (chan->client_count < dmaengine_ref_count) {
196 __module_get(owner);
197 chan->client_count++;
198 }
199 }
200
201 /**
202 * dma_chan_get - try to grab a dma channel's parent driver module
203 * @chan - channel to grab
204 *
205 * Must be called under dma_list_mutex
206 */
dma_chan_get(struct dma_chan * chan)207 static int dma_chan_get(struct dma_chan *chan)
208 {
209 struct module *owner = dma_chan_to_owner(chan);
210 int ret;
211
212 /* The channel is already in use, update client count */
213 if (chan->client_count) {
214 __module_get(owner);
215 chan->client_count++;
216 return 0;
217 }
218
219 if (!try_module_get(owner))
220 return -ENODEV;
221
222 /* allocate upon first client reference */
223 if (chan->device->device_alloc_chan_resources) {
224 ret = chan->device->device_alloc_chan_resources(chan);
225 if (ret < 0)
226 goto err_out;
227 }
228
229 chan->client_count++;
230
231 if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask))
232 balance_ref_count(chan);
233
234 return 0;
235
236 err_out:
237 module_put(owner);
238 return ret;
239 }
240
241 /**
242 * dma_chan_put - drop a reference to a dma channel's parent driver module
243 * @chan - channel to release
244 *
245 * Must be called under dma_list_mutex
246 */
dma_chan_put(struct dma_chan * chan)247 static void dma_chan_put(struct dma_chan *chan)
248 {
249 /* This channel is not in use, bail out */
250 if (!chan->client_count)
251 return;
252
253 chan->client_count--;
254 module_put(dma_chan_to_owner(chan));
255
256 /* This channel is not in use anymore, free it */
257 if (!chan->client_count && chan->device->device_free_chan_resources) {
258 /* Make sure all operations have completed */
259 dmaengine_synchronize(chan);
260 chan->device->device_free_chan_resources(chan);
261 }
262
263 /* If the channel is used via a DMA request router, free the mapping */
264 if (chan->router && chan->router->route_free) {
265 chan->router->route_free(chan->router->dev, chan->route_data);
266 chan->router = NULL;
267 chan->route_data = NULL;
268 }
269 }
270
dma_sync_wait(struct dma_chan * chan,dma_cookie_t cookie)271 enum dma_status dma_sync_wait(struct dma_chan *chan, dma_cookie_t cookie)
272 {
273 enum dma_status status;
274 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
275
276 dma_async_issue_pending(chan);
277 do {
278 status = dma_async_is_tx_complete(chan, cookie, NULL, NULL);
279 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
280 dev_err(chan->device->dev, "%s: timeout!\n", __func__);
281 return DMA_ERROR;
282 }
283 if (status != DMA_IN_PROGRESS)
284 break;
285 cpu_relax();
286 } while (1);
287
288 return status;
289 }
290 EXPORT_SYMBOL(dma_sync_wait);
291
292 /**
293 * dma_cap_mask_all - enable iteration over all operation types
294 */
295 static dma_cap_mask_t dma_cap_mask_all;
296
297 /**
298 * dma_chan_tbl_ent - tracks channel allocations per core/operation
299 * @chan - associated channel for this entry
300 */
301 struct dma_chan_tbl_ent {
302 struct dma_chan *chan;
303 };
304
305 /**
306 * channel_table - percpu lookup table for memory-to-memory offload providers
307 */
308 static struct dma_chan_tbl_ent __percpu *channel_table[DMA_TX_TYPE_END];
309
dma_channel_table_init(void)310 static int __init dma_channel_table_init(void)
311 {
312 enum dma_transaction_type cap;
313 int err = 0;
314
315 bitmap_fill(dma_cap_mask_all.bits, DMA_TX_TYPE_END);
316
317 /* 'interrupt', 'private', and 'slave' are channel capabilities,
318 * but are not associated with an operation so they do not need
319 * an entry in the channel_table
320 */
321 clear_bit(DMA_INTERRUPT, dma_cap_mask_all.bits);
322 clear_bit(DMA_PRIVATE, dma_cap_mask_all.bits);
323 clear_bit(DMA_SLAVE, dma_cap_mask_all.bits);
324
325 for_each_dma_cap_mask(cap, dma_cap_mask_all) {
326 channel_table[cap] = alloc_percpu(struct dma_chan_tbl_ent);
327 if (!channel_table[cap]) {
328 err = -ENOMEM;
329 break;
330 }
331 }
332
333 if (err) {
334 pr_err("initialization failure\n");
335 for_each_dma_cap_mask(cap, dma_cap_mask_all)
336 free_percpu(channel_table[cap]);
337 }
338
339 return err;
340 }
341 arch_initcall(dma_channel_table_init);
342
343 /**
344 * dma_find_channel - find a channel to carry out the operation
345 * @tx_type: transaction type
346 */
dma_find_channel(enum dma_transaction_type tx_type)347 struct dma_chan *dma_find_channel(enum dma_transaction_type tx_type)
348 {
349 return this_cpu_read(channel_table[tx_type]->chan);
350 }
351 EXPORT_SYMBOL(dma_find_channel);
352
353 /**
354 * dma_issue_pending_all - flush all pending operations across all channels
355 */
dma_issue_pending_all(void)356 void dma_issue_pending_all(void)
357 {
358 struct dma_device *device;
359 struct dma_chan *chan;
360
361 rcu_read_lock();
362 list_for_each_entry_rcu(device, &dma_device_list, global_node) {
363 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
364 continue;
365 list_for_each_entry(chan, &device->channels, device_node)
366 if (chan->client_count)
367 device->device_issue_pending(chan);
368 }
369 rcu_read_unlock();
370 }
371 EXPORT_SYMBOL(dma_issue_pending_all);
372
373 /**
374 * dma_chan_is_local - returns true if the channel is in the same numa-node as the cpu
375 */
dma_chan_is_local(struct dma_chan * chan,int cpu)376 static bool dma_chan_is_local(struct dma_chan *chan, int cpu)
377 {
378 int node = dev_to_node(chan->device->dev);
379 return node == NUMA_NO_NODE ||
380 cpumask_test_cpu(cpu, cpumask_of_node(node));
381 }
382
383 /**
384 * min_chan - returns the channel with min count and in the same numa-node as the cpu
385 * @cap: capability to match
386 * @cpu: cpu index which the channel should be close to
387 *
388 * If some channels are close to the given cpu, the one with the lowest
389 * reference count is returned. Otherwise, cpu is ignored and only the
390 * reference count is taken into account.
391 * Must be called under dma_list_mutex.
392 */
min_chan(enum dma_transaction_type cap,int cpu)393 static struct dma_chan *min_chan(enum dma_transaction_type cap, int cpu)
394 {
395 struct dma_device *device;
396 struct dma_chan *chan;
397 struct dma_chan *min = NULL;
398 struct dma_chan *localmin = NULL;
399
400 list_for_each_entry(device, &dma_device_list, global_node) {
401 if (!dma_has_cap(cap, device->cap_mask) ||
402 dma_has_cap(DMA_PRIVATE, device->cap_mask))
403 continue;
404 list_for_each_entry(chan, &device->channels, device_node) {
405 if (!chan->client_count)
406 continue;
407 if (!min || chan->table_count < min->table_count)
408 min = chan;
409
410 if (dma_chan_is_local(chan, cpu))
411 if (!localmin ||
412 chan->table_count < localmin->table_count)
413 localmin = chan;
414 }
415 }
416
417 chan = localmin ? localmin : min;
418
419 if (chan)
420 chan->table_count++;
421
422 return chan;
423 }
424
425 /**
426 * dma_channel_rebalance - redistribute the available channels
427 *
428 * Optimize for cpu isolation (each cpu gets a dedicated channel for an
429 * operation type) in the SMP case, and operation isolation (avoid
430 * multi-tasking channels) in the non-SMP case. Must be called under
431 * dma_list_mutex.
432 */
dma_channel_rebalance(void)433 static void dma_channel_rebalance(void)
434 {
435 struct dma_chan *chan;
436 struct dma_device *device;
437 int cpu;
438 int cap;
439
440 /* undo the last distribution */
441 for_each_dma_cap_mask(cap, dma_cap_mask_all)
442 for_each_possible_cpu(cpu)
443 per_cpu_ptr(channel_table[cap], cpu)->chan = NULL;
444
445 list_for_each_entry(device, &dma_device_list, global_node) {
446 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
447 continue;
448 list_for_each_entry(chan, &device->channels, device_node)
449 chan->table_count = 0;
450 }
451
452 /* don't populate the channel_table if no clients are available */
453 if (!dmaengine_ref_count)
454 return;
455
456 /* redistribute available channels */
457 for_each_dma_cap_mask(cap, dma_cap_mask_all)
458 for_each_online_cpu(cpu) {
459 chan = min_chan(cap, cpu);
460 per_cpu_ptr(channel_table[cap], cpu)->chan = chan;
461 }
462 }
463
dma_get_slave_caps(struct dma_chan * chan,struct dma_slave_caps * caps)464 int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
465 {
466 struct dma_device *device;
467
468 if (!chan || !caps)
469 return -EINVAL;
470
471 device = chan->device;
472
473 /* check if the channel supports slave transactions */
474 if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
475 test_bit(DMA_CYCLIC, device->cap_mask.bits)))
476 return -ENXIO;
477
478 /*
479 * Check whether it reports it uses the generic slave
480 * capabilities, if not, that means it doesn't support any
481 * kind of slave capabilities reporting.
482 */
483 if (!device->directions)
484 return -ENXIO;
485
486 caps->src_addr_widths = device->src_addr_widths;
487 caps->dst_addr_widths = device->dst_addr_widths;
488 caps->directions = device->directions;
489 caps->max_burst = device->max_burst;
490 caps->residue_granularity = device->residue_granularity;
491 caps->descriptor_reuse = device->descriptor_reuse;
492 caps->cmd_pause = !!device->device_pause;
493 caps->cmd_resume = !!device->device_resume;
494 caps->cmd_terminate = !!device->device_terminate_all;
495
496 return 0;
497 }
498 EXPORT_SYMBOL_GPL(dma_get_slave_caps);
499
private_candidate(const dma_cap_mask_t * mask,struct dma_device * dev,dma_filter_fn fn,void * fn_param)500 static struct dma_chan *private_candidate(const dma_cap_mask_t *mask,
501 struct dma_device *dev,
502 dma_filter_fn fn, void *fn_param)
503 {
504 struct dma_chan *chan;
505
506 if (mask && !__dma_device_satisfies_mask(dev, mask)) {
507 dev_dbg(dev->dev, "%s: wrong capabilities\n", __func__);
508 return NULL;
509 }
510 /* devices with multiple channels need special handling as we need to
511 * ensure that all channels are either private or public.
512 */
513 if (dev->chancnt > 1 && !dma_has_cap(DMA_PRIVATE, dev->cap_mask))
514 list_for_each_entry(chan, &dev->channels, device_node) {
515 /* some channels are already publicly allocated */
516 if (chan->client_count)
517 return NULL;
518 }
519
520 list_for_each_entry(chan, &dev->channels, device_node) {
521 if (chan->client_count) {
522 dev_dbg(dev->dev, "%s: %s busy\n",
523 __func__, dma_chan_name(chan));
524 continue;
525 }
526 if (fn && !fn(chan, fn_param)) {
527 dev_dbg(dev->dev, "%s: %s filter said false\n",
528 __func__, dma_chan_name(chan));
529 continue;
530 }
531 return chan;
532 }
533
534 return NULL;
535 }
536
find_candidate(struct dma_device * device,const dma_cap_mask_t * mask,dma_filter_fn fn,void * fn_param)537 static struct dma_chan *find_candidate(struct dma_device *device,
538 const dma_cap_mask_t *mask,
539 dma_filter_fn fn, void *fn_param)
540 {
541 struct dma_chan *chan = private_candidate(mask, device, fn, fn_param);
542 int err;
543
544 if (chan) {
545 /* Found a suitable channel, try to grab, prep, and return it.
546 * We first set DMA_PRIVATE to disable balance_ref_count as this
547 * channel will not be published in the general-purpose
548 * allocator
549 */
550 dma_cap_set(DMA_PRIVATE, device->cap_mask);
551 device->privatecnt++;
552 err = dma_chan_get(chan);
553
554 if (err) {
555 if (err == -ENODEV) {
556 dev_dbg(device->dev, "%s: %s module removed\n",
557 __func__, dma_chan_name(chan));
558 list_del_rcu(&device->global_node);
559 } else
560 dev_dbg(device->dev,
561 "%s: failed to get %s: (%d)\n",
562 __func__, dma_chan_name(chan), err);
563
564 if (--device->privatecnt == 0)
565 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
566
567 chan = ERR_PTR(err);
568 }
569 }
570
571 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
572 }
573
574 /**
575 * dma_get_slave_channel - try to get specific channel exclusively
576 * @chan: target channel
577 */
dma_get_slave_channel(struct dma_chan * chan)578 struct dma_chan *dma_get_slave_channel(struct dma_chan *chan)
579 {
580 int err = -EBUSY;
581
582 /* lock against __dma_request_channel */
583 mutex_lock(&dma_list_mutex);
584
585 if (chan->client_count == 0) {
586 struct dma_device *device = chan->device;
587
588 dma_cap_set(DMA_PRIVATE, device->cap_mask);
589 device->privatecnt++;
590 err = dma_chan_get(chan);
591 if (err) {
592 dev_dbg(chan->device->dev,
593 "%s: failed to get %s: (%d)\n",
594 __func__, dma_chan_name(chan), err);
595 chan = NULL;
596 if (--device->privatecnt == 0)
597 dma_cap_clear(DMA_PRIVATE, device->cap_mask);
598 }
599 } else
600 chan = NULL;
601
602 mutex_unlock(&dma_list_mutex);
603
604
605 return chan;
606 }
607 EXPORT_SYMBOL_GPL(dma_get_slave_channel);
608
dma_get_any_slave_channel(struct dma_device * device)609 struct dma_chan *dma_get_any_slave_channel(struct dma_device *device)
610 {
611 dma_cap_mask_t mask;
612 struct dma_chan *chan;
613
614 dma_cap_zero(mask);
615 dma_cap_set(DMA_SLAVE, mask);
616
617 /* lock against __dma_request_channel */
618 mutex_lock(&dma_list_mutex);
619
620 chan = find_candidate(device, &mask, NULL, NULL);
621
622 mutex_unlock(&dma_list_mutex);
623
624 return IS_ERR(chan) ? NULL : chan;
625 }
626 EXPORT_SYMBOL_GPL(dma_get_any_slave_channel);
627
628 /**
629 * __dma_request_channel - try to allocate an exclusive channel
630 * @mask: capabilities that the channel must satisfy
631 * @fn: optional callback to disposition available channels
632 * @fn_param: opaque parameter to pass to dma_filter_fn
633 * @np: device node to look for DMA channels
634 *
635 * Returns pointer to appropriate DMA channel on success or NULL.
636 */
__dma_request_channel(const dma_cap_mask_t * mask,dma_filter_fn fn,void * fn_param,struct device_node * np)637 struct dma_chan *__dma_request_channel(const dma_cap_mask_t *mask,
638 dma_filter_fn fn, void *fn_param,
639 struct device_node *np)
640 {
641 struct dma_device *device, *_d;
642 struct dma_chan *chan = NULL;
643
644 /* Find a channel */
645 mutex_lock(&dma_list_mutex);
646 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
647 /* Finds a DMA controller with matching device node */
648 if (np && device->dev->of_node && np != device->dev->of_node)
649 continue;
650
651 chan = find_candidate(device, mask, fn, fn_param);
652 if (!IS_ERR(chan))
653 break;
654
655 chan = NULL;
656 }
657 mutex_unlock(&dma_list_mutex);
658
659 pr_debug("%s: %s (%s)\n",
660 __func__,
661 chan ? "success" : "fail",
662 chan ? dma_chan_name(chan) : NULL);
663
664 return chan;
665 }
666 EXPORT_SYMBOL_GPL(__dma_request_channel);
667
dma_filter_match(struct dma_device * device,const char * name,struct device * dev)668 static const struct dma_slave_map *dma_filter_match(struct dma_device *device,
669 const char *name,
670 struct device *dev)
671 {
672 int i;
673
674 if (!device->filter.mapcnt)
675 return NULL;
676
677 for (i = 0; i < device->filter.mapcnt; i++) {
678 const struct dma_slave_map *map = &device->filter.map[i];
679
680 if (!strcmp(map->devname, dev_name(dev)) &&
681 !strcmp(map->slave, name))
682 return map;
683 }
684
685 return NULL;
686 }
687
688 /**
689 * dma_request_chan - try to allocate an exclusive slave channel
690 * @dev: pointer to client device structure
691 * @name: slave channel name
692 *
693 * Returns pointer to appropriate DMA channel on success or an error pointer.
694 */
dma_request_chan(struct device * dev,const char * name)695 struct dma_chan *dma_request_chan(struct device *dev, const char *name)
696 {
697 struct dma_device *d, *_d;
698 struct dma_chan *chan = NULL;
699
700 /* If device-tree is present get slave info from here */
701 if (dev->of_node)
702 chan = of_dma_request_slave_channel(dev->of_node, name);
703
704 /* If device was enumerated by ACPI get slave info from here */
705 if (has_acpi_companion(dev) && !chan)
706 chan = acpi_dma_request_slave_chan_by_name(dev, name);
707
708 if (chan) {
709 /* Valid channel found or requester needs to be deferred */
710 if (!IS_ERR(chan) || PTR_ERR(chan) == -EPROBE_DEFER)
711 return chan;
712 }
713
714 /* Try to find the channel via the DMA filter map(s) */
715 mutex_lock(&dma_list_mutex);
716 list_for_each_entry_safe(d, _d, &dma_device_list, global_node) {
717 dma_cap_mask_t mask;
718 const struct dma_slave_map *map = dma_filter_match(d, name, dev);
719
720 if (!map)
721 continue;
722
723 dma_cap_zero(mask);
724 dma_cap_set(DMA_SLAVE, mask);
725
726 chan = find_candidate(d, &mask, d->filter.fn, map->param);
727 if (!IS_ERR(chan))
728 break;
729 }
730 mutex_unlock(&dma_list_mutex);
731
732 return chan ? chan : ERR_PTR(-EPROBE_DEFER);
733 }
734 EXPORT_SYMBOL_GPL(dma_request_chan);
735
736 /**
737 * dma_request_slave_channel - try to allocate an exclusive slave channel
738 * @dev: pointer to client device structure
739 * @name: slave channel name
740 *
741 * Returns pointer to appropriate DMA channel on success or NULL.
742 */
dma_request_slave_channel(struct device * dev,const char * name)743 struct dma_chan *dma_request_slave_channel(struct device *dev,
744 const char *name)
745 {
746 struct dma_chan *ch = dma_request_chan(dev, name);
747 if (IS_ERR(ch))
748 return NULL;
749
750 return ch;
751 }
752 EXPORT_SYMBOL_GPL(dma_request_slave_channel);
753
754 /**
755 * dma_request_chan_by_mask - allocate a channel satisfying certain capabilities
756 * @mask: capabilities that the channel must satisfy
757 *
758 * Returns pointer to appropriate DMA channel on success or an error pointer.
759 */
dma_request_chan_by_mask(const dma_cap_mask_t * mask)760 struct dma_chan *dma_request_chan_by_mask(const dma_cap_mask_t *mask)
761 {
762 struct dma_chan *chan;
763
764 if (!mask)
765 return ERR_PTR(-ENODEV);
766
767 chan = __dma_request_channel(mask, NULL, NULL, NULL);
768 if (!chan) {
769 mutex_lock(&dma_list_mutex);
770 if (list_empty(&dma_device_list))
771 chan = ERR_PTR(-EPROBE_DEFER);
772 else
773 chan = ERR_PTR(-ENODEV);
774 mutex_unlock(&dma_list_mutex);
775 }
776
777 return chan;
778 }
779 EXPORT_SYMBOL_GPL(dma_request_chan_by_mask);
780
dma_release_channel(struct dma_chan * chan)781 void dma_release_channel(struct dma_chan *chan)
782 {
783 mutex_lock(&dma_list_mutex);
784 WARN_ONCE(chan->client_count != 1,
785 "chan reference count %d != 1\n", chan->client_count);
786 dma_chan_put(chan);
787 /* drop PRIVATE cap enabled by __dma_request_channel() */
788 if (--chan->device->privatecnt == 0)
789 dma_cap_clear(DMA_PRIVATE, chan->device->cap_mask);
790 mutex_unlock(&dma_list_mutex);
791 }
792 EXPORT_SYMBOL_GPL(dma_release_channel);
793
794 /**
795 * dmaengine_get - register interest in dma_channels
796 */
dmaengine_get(void)797 void dmaengine_get(void)
798 {
799 struct dma_device *device, *_d;
800 struct dma_chan *chan;
801 int err;
802
803 mutex_lock(&dma_list_mutex);
804 dmaengine_ref_count++;
805
806 /* try to grab channels */
807 list_for_each_entry_safe(device, _d, &dma_device_list, global_node) {
808 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
809 continue;
810 list_for_each_entry(chan, &device->channels, device_node) {
811 err = dma_chan_get(chan);
812 if (err == -ENODEV) {
813 /* module removed before we could use it */
814 list_del_rcu(&device->global_node);
815 break;
816 } else if (err)
817 dev_dbg(chan->device->dev,
818 "%s: failed to get %s: (%d)\n",
819 __func__, dma_chan_name(chan), err);
820 }
821 }
822
823 /* if this is the first reference and there were channels
824 * waiting we need to rebalance to get those channels
825 * incorporated into the channel table
826 */
827 if (dmaengine_ref_count == 1)
828 dma_channel_rebalance();
829 mutex_unlock(&dma_list_mutex);
830 }
831 EXPORT_SYMBOL(dmaengine_get);
832
833 /**
834 * dmaengine_put - let dma drivers be removed when ref_count == 0
835 */
dmaengine_put(void)836 void dmaengine_put(void)
837 {
838 struct dma_device *device;
839 struct dma_chan *chan;
840
841 mutex_lock(&dma_list_mutex);
842 dmaengine_ref_count--;
843 BUG_ON(dmaengine_ref_count < 0);
844 /* drop channel references */
845 list_for_each_entry(device, &dma_device_list, global_node) {
846 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
847 continue;
848 list_for_each_entry(chan, &device->channels, device_node)
849 dma_chan_put(chan);
850 }
851 mutex_unlock(&dma_list_mutex);
852 }
853 EXPORT_SYMBOL(dmaengine_put);
854
device_has_all_tx_types(struct dma_device * device)855 static bool device_has_all_tx_types(struct dma_device *device)
856 {
857 /* A device that satisfies this test has channels that will never cause
858 * an async_tx channel switch event as all possible operation types can
859 * be handled.
860 */
861 #ifdef CONFIG_ASYNC_TX_DMA
862 if (!dma_has_cap(DMA_INTERRUPT, device->cap_mask))
863 return false;
864 #endif
865
866 #if IS_ENABLED(CONFIG_ASYNC_MEMCPY)
867 if (!dma_has_cap(DMA_MEMCPY, device->cap_mask))
868 return false;
869 #endif
870
871 #if IS_ENABLED(CONFIG_ASYNC_XOR)
872 if (!dma_has_cap(DMA_XOR, device->cap_mask))
873 return false;
874
875 #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
876 if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
877 return false;
878 #endif
879 #endif
880
881 #if IS_ENABLED(CONFIG_ASYNC_PQ)
882 if (!dma_has_cap(DMA_PQ, device->cap_mask))
883 return false;
884
885 #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
886 if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
887 return false;
888 #endif
889 #endif
890
891 return true;
892 }
893
get_dma_id(struct dma_device * device)894 static int get_dma_id(struct dma_device *device)
895 {
896 int rc = ida_alloc(&dma_ida, GFP_KERNEL);
897
898 if (rc < 0)
899 return rc;
900 device->dev_id = rc;
901 return 0;
902 }
903
904 /**
905 * dma_async_device_register - registers DMA devices found
906 * @device: &dma_device
907 */
dma_async_device_register(struct dma_device * device)908 int dma_async_device_register(struct dma_device *device)
909 {
910 int chancnt = 0, rc;
911 struct dma_chan* chan;
912 atomic_t *idr_ref;
913
914 if (!device)
915 return -ENODEV;
916
917 /* validate device routines */
918 if (!device->dev) {
919 pr_err("DMAdevice must have dev\n");
920 return -EIO;
921 }
922
923 device->owner = device->dev->driver->owner;
924
925 if (dma_has_cap(DMA_MEMCPY, device->cap_mask) && !device->device_prep_dma_memcpy) {
926 dev_err(device->dev,
927 "Device claims capability %s, but op is not defined\n",
928 "DMA_MEMCPY");
929 return -EIO;
930 }
931
932 if (dma_has_cap(DMA_XOR, device->cap_mask) && !device->device_prep_dma_xor) {
933 dev_err(device->dev,
934 "Device claims capability %s, but op is not defined\n",
935 "DMA_XOR");
936 return -EIO;
937 }
938
939 if (dma_has_cap(DMA_XOR_VAL, device->cap_mask) && !device->device_prep_dma_xor_val) {
940 dev_err(device->dev,
941 "Device claims capability %s, but op is not defined\n",
942 "DMA_XOR_VAL");
943 return -EIO;
944 }
945
946 if (dma_has_cap(DMA_PQ, device->cap_mask) && !device->device_prep_dma_pq) {
947 dev_err(device->dev,
948 "Device claims capability %s, but op is not defined\n",
949 "DMA_PQ");
950 return -EIO;
951 }
952
953 if (dma_has_cap(DMA_PQ_VAL, device->cap_mask) && !device->device_prep_dma_pq_val) {
954 dev_err(device->dev,
955 "Device claims capability %s, but op is not defined\n",
956 "DMA_PQ_VAL");
957 return -EIO;
958 }
959
960 if (dma_has_cap(DMA_MEMSET, device->cap_mask) && !device->device_prep_dma_memset) {
961 dev_err(device->dev,
962 "Device claims capability %s, but op is not defined\n",
963 "DMA_MEMSET");
964 return -EIO;
965 }
966
967 if (dma_has_cap(DMA_INTERRUPT, device->cap_mask) && !device->device_prep_dma_interrupt) {
968 dev_err(device->dev,
969 "Device claims capability %s, but op is not defined\n",
970 "DMA_INTERRUPT");
971 return -EIO;
972 }
973
974 if (dma_has_cap(DMA_CYCLIC, device->cap_mask) && !device->device_prep_dma_cyclic) {
975 dev_err(device->dev,
976 "Device claims capability %s, but op is not defined\n",
977 "DMA_CYCLIC");
978 return -EIO;
979 }
980
981 if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
982 dev_err(device->dev,
983 "Device claims capability %s, but op is not defined\n",
984 "DMA_INTERLEAVE");
985 return -EIO;
986 }
987
988
989 if (!device->device_tx_status) {
990 dev_err(device->dev, "Device tx_status is not defined\n");
991 return -EIO;
992 }
993
994
995 if (!device->device_issue_pending) {
996 dev_err(device->dev, "Device issue_pending is not defined\n");
997 return -EIO;
998 }
999
1000 /* note: this only matters in the
1001 * CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=n case
1002 */
1003 if (device_has_all_tx_types(device))
1004 dma_cap_set(DMA_ASYNC_TX, device->cap_mask);
1005
1006 idr_ref = kmalloc(sizeof(*idr_ref), GFP_KERNEL);
1007 if (!idr_ref)
1008 return -ENOMEM;
1009 rc = get_dma_id(device);
1010 if (rc != 0) {
1011 kfree(idr_ref);
1012 return rc;
1013 }
1014
1015 atomic_set(idr_ref, 0);
1016
1017 /* represent channels in sysfs. Probably want devs too */
1018 list_for_each_entry(chan, &device->channels, device_node) {
1019 rc = -ENOMEM;
1020 chan->local = alloc_percpu(typeof(*chan->local));
1021 if (chan->local == NULL)
1022 goto err_out;
1023 chan->dev = kzalloc(sizeof(*chan->dev), GFP_KERNEL);
1024 if (chan->dev == NULL) {
1025 free_percpu(chan->local);
1026 chan->local = NULL;
1027 goto err_out;
1028 }
1029
1030 chan->chan_id = chancnt++;
1031 chan->dev->device.class = &dma_devclass;
1032 chan->dev->device.parent = device->dev;
1033 chan->dev->chan = chan;
1034 chan->dev->idr_ref = idr_ref;
1035 chan->dev->dev_id = device->dev_id;
1036 atomic_inc(idr_ref);
1037 dev_set_name(&chan->dev->device, "dma%dchan%d",
1038 device->dev_id, chan->chan_id);
1039
1040 rc = device_register(&chan->dev->device);
1041 if (rc) {
1042 free_percpu(chan->local);
1043 chan->local = NULL;
1044 kfree(chan->dev);
1045 atomic_dec(idr_ref);
1046 goto err_out;
1047 }
1048 chan->client_count = 0;
1049 }
1050
1051 if (!chancnt) {
1052 dev_err(device->dev, "%s: device has no channels!\n", __func__);
1053 rc = -ENODEV;
1054 goto err_out;
1055 }
1056
1057 device->chancnt = chancnt;
1058
1059 mutex_lock(&dma_list_mutex);
1060 /* take references on public channels */
1061 if (dmaengine_ref_count && !dma_has_cap(DMA_PRIVATE, device->cap_mask))
1062 list_for_each_entry(chan, &device->channels, device_node) {
1063 /* if clients are already waiting for channels we need
1064 * to take references on their behalf
1065 */
1066 if (dma_chan_get(chan) == -ENODEV) {
1067 /* note we can only get here for the first
1068 * channel as the remaining channels are
1069 * guaranteed to get a reference
1070 */
1071 rc = -ENODEV;
1072 mutex_unlock(&dma_list_mutex);
1073 goto err_out;
1074 }
1075 }
1076 list_add_tail_rcu(&device->global_node, &dma_device_list);
1077 if (dma_has_cap(DMA_PRIVATE, device->cap_mask))
1078 device->privatecnt++; /* Always private */
1079 dma_channel_rebalance();
1080 mutex_unlock(&dma_list_mutex);
1081
1082 return 0;
1083
1084 err_out:
1085 /* if we never registered a channel just release the idr */
1086 if (atomic_read(idr_ref) == 0) {
1087 ida_free(&dma_ida, device->dev_id);
1088 kfree(idr_ref);
1089 return rc;
1090 }
1091
1092 list_for_each_entry(chan, &device->channels, device_node) {
1093 if (chan->local == NULL)
1094 continue;
1095 mutex_lock(&dma_list_mutex);
1096 chan->dev->chan = NULL;
1097 mutex_unlock(&dma_list_mutex);
1098 device_unregister(&chan->dev->device);
1099 free_percpu(chan->local);
1100 }
1101 return rc;
1102 }
1103 EXPORT_SYMBOL(dma_async_device_register);
1104
1105 /**
1106 * dma_async_device_unregister - unregister a DMA device
1107 * @device: &dma_device
1108 *
1109 * This routine is called by dma driver exit routines, dmaengine holds module
1110 * references to prevent it being called while channels are in use.
1111 */
dma_async_device_unregister(struct dma_device * device)1112 void dma_async_device_unregister(struct dma_device *device)
1113 {
1114 struct dma_chan *chan;
1115
1116 mutex_lock(&dma_list_mutex);
1117 list_del_rcu(&device->global_node);
1118 dma_channel_rebalance();
1119 mutex_unlock(&dma_list_mutex);
1120
1121 list_for_each_entry(chan, &device->channels, device_node) {
1122 WARN_ONCE(chan->client_count,
1123 "%s called while %d clients hold a reference\n",
1124 __func__, chan->client_count);
1125 mutex_lock(&dma_list_mutex);
1126 chan->dev->chan = NULL;
1127 mutex_unlock(&dma_list_mutex);
1128 device_unregister(&chan->dev->device);
1129 free_percpu(chan->local);
1130 }
1131 }
1132 EXPORT_SYMBOL(dma_async_device_unregister);
1133
dmam_device_release(struct device * dev,void * res)1134 static void dmam_device_release(struct device *dev, void *res)
1135 {
1136 struct dma_device *device;
1137
1138 device = *(struct dma_device **)res;
1139 dma_async_device_unregister(device);
1140 }
1141
1142 /**
1143 * dmaenginem_async_device_register - registers DMA devices found
1144 * @device: &dma_device
1145 *
1146 * The operation is managed and will be undone on driver detach.
1147 */
dmaenginem_async_device_register(struct dma_device * device)1148 int dmaenginem_async_device_register(struct dma_device *device)
1149 {
1150 void *p;
1151 int ret;
1152
1153 p = devres_alloc(dmam_device_release, sizeof(void *), GFP_KERNEL);
1154 if (!p)
1155 return -ENOMEM;
1156
1157 ret = dma_async_device_register(device);
1158 if (!ret) {
1159 *(struct dma_device **)p = device;
1160 devres_add(device->dev, p);
1161 } else {
1162 devres_free(p);
1163 }
1164
1165 return ret;
1166 }
1167 EXPORT_SYMBOL(dmaenginem_async_device_register);
1168
1169 struct dmaengine_unmap_pool {
1170 struct kmem_cache *cache;
1171 const char *name;
1172 mempool_t *pool;
1173 size_t size;
1174 };
1175
1176 #define __UNMAP_POOL(x) { .size = x, .name = "dmaengine-unmap-" __stringify(x) }
1177 static struct dmaengine_unmap_pool unmap_pool[] = {
1178 __UNMAP_POOL(2),
1179 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1180 __UNMAP_POOL(16),
1181 __UNMAP_POOL(128),
1182 __UNMAP_POOL(256),
1183 #endif
1184 };
1185
__get_unmap_pool(int nr)1186 static struct dmaengine_unmap_pool *__get_unmap_pool(int nr)
1187 {
1188 int order = get_count_order(nr);
1189
1190 switch (order) {
1191 case 0 ... 1:
1192 return &unmap_pool[0];
1193 #if IS_ENABLED(CONFIG_DMA_ENGINE_RAID)
1194 case 2 ... 4:
1195 return &unmap_pool[1];
1196 case 5 ... 7:
1197 return &unmap_pool[2];
1198 case 8:
1199 return &unmap_pool[3];
1200 #endif
1201 default:
1202 BUG();
1203 return NULL;
1204 }
1205 }
1206
dmaengine_unmap(struct kref * kref)1207 static void dmaengine_unmap(struct kref *kref)
1208 {
1209 struct dmaengine_unmap_data *unmap = container_of(kref, typeof(*unmap), kref);
1210 struct device *dev = unmap->dev;
1211 int cnt, i;
1212
1213 cnt = unmap->to_cnt;
1214 for (i = 0; i < cnt; i++)
1215 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1216 DMA_TO_DEVICE);
1217 cnt += unmap->from_cnt;
1218 for (; i < cnt; i++)
1219 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1220 DMA_FROM_DEVICE);
1221 cnt += unmap->bidi_cnt;
1222 for (; i < cnt; i++) {
1223 if (unmap->addr[i] == 0)
1224 continue;
1225 dma_unmap_page(dev, unmap->addr[i], unmap->len,
1226 DMA_BIDIRECTIONAL);
1227 }
1228 cnt = unmap->map_cnt;
1229 mempool_free(unmap, __get_unmap_pool(cnt)->pool);
1230 }
1231
dmaengine_unmap_put(struct dmaengine_unmap_data * unmap)1232 void dmaengine_unmap_put(struct dmaengine_unmap_data *unmap)
1233 {
1234 if (unmap)
1235 kref_put(&unmap->kref, dmaengine_unmap);
1236 }
1237 EXPORT_SYMBOL_GPL(dmaengine_unmap_put);
1238
dmaengine_destroy_unmap_pool(void)1239 static void dmaengine_destroy_unmap_pool(void)
1240 {
1241 int i;
1242
1243 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1244 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1245
1246 mempool_destroy(p->pool);
1247 p->pool = NULL;
1248 kmem_cache_destroy(p->cache);
1249 p->cache = NULL;
1250 }
1251 }
1252
dmaengine_init_unmap_pool(void)1253 static int __init dmaengine_init_unmap_pool(void)
1254 {
1255 int i;
1256
1257 for (i = 0; i < ARRAY_SIZE(unmap_pool); i++) {
1258 struct dmaengine_unmap_pool *p = &unmap_pool[i];
1259 size_t size;
1260
1261 size = sizeof(struct dmaengine_unmap_data) +
1262 sizeof(dma_addr_t) * p->size;
1263
1264 p->cache = kmem_cache_create(p->name, size, 0,
1265 SLAB_HWCACHE_ALIGN, NULL);
1266 if (!p->cache)
1267 break;
1268 p->pool = mempool_create_slab_pool(1, p->cache);
1269 if (!p->pool)
1270 break;
1271 }
1272
1273 if (i == ARRAY_SIZE(unmap_pool))
1274 return 0;
1275
1276 dmaengine_destroy_unmap_pool();
1277 return -ENOMEM;
1278 }
1279
1280 struct dmaengine_unmap_data *
dmaengine_get_unmap_data(struct device * dev,int nr,gfp_t flags)1281 dmaengine_get_unmap_data(struct device *dev, int nr, gfp_t flags)
1282 {
1283 struct dmaengine_unmap_data *unmap;
1284
1285 unmap = mempool_alloc(__get_unmap_pool(nr)->pool, flags);
1286 if (!unmap)
1287 return NULL;
1288
1289 memset(unmap, 0, sizeof(*unmap));
1290 kref_init(&unmap->kref);
1291 unmap->dev = dev;
1292 unmap->map_cnt = nr;
1293
1294 return unmap;
1295 }
1296 EXPORT_SYMBOL(dmaengine_get_unmap_data);
1297
dma_async_tx_descriptor_init(struct dma_async_tx_descriptor * tx,struct dma_chan * chan)1298 void dma_async_tx_descriptor_init(struct dma_async_tx_descriptor *tx,
1299 struct dma_chan *chan)
1300 {
1301 tx->chan = chan;
1302 #ifdef CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH
1303 spin_lock_init(&tx->lock);
1304 #endif
1305 }
1306 EXPORT_SYMBOL(dma_async_tx_descriptor_init);
1307
1308 /* dma_wait_for_async_tx - spin wait for a transaction to complete
1309 * @tx: in-flight transaction to wait on
1310 */
1311 enum dma_status
dma_wait_for_async_tx(struct dma_async_tx_descriptor * tx)1312 dma_wait_for_async_tx(struct dma_async_tx_descriptor *tx)
1313 {
1314 unsigned long dma_sync_wait_timeout = jiffies + msecs_to_jiffies(5000);
1315
1316 if (!tx)
1317 return DMA_COMPLETE;
1318
1319 while (tx->cookie == -EBUSY) {
1320 if (time_after_eq(jiffies, dma_sync_wait_timeout)) {
1321 dev_err(tx->chan->device->dev,
1322 "%s timeout waiting for descriptor submission\n",
1323 __func__);
1324 return DMA_ERROR;
1325 }
1326 cpu_relax();
1327 }
1328 return dma_sync_wait(tx->chan, tx->cookie);
1329 }
1330 EXPORT_SYMBOL_GPL(dma_wait_for_async_tx);
1331
1332 /* dma_run_dependencies - helper routine for dma drivers to process
1333 * (start) dependent operations on their target channel
1334 * @tx: transaction with dependencies
1335 */
dma_run_dependencies(struct dma_async_tx_descriptor * tx)1336 void dma_run_dependencies(struct dma_async_tx_descriptor *tx)
1337 {
1338 struct dma_async_tx_descriptor *dep = txd_next(tx);
1339 struct dma_async_tx_descriptor *dep_next;
1340 struct dma_chan *chan;
1341
1342 if (!dep)
1343 return;
1344
1345 /* we'll submit tx->next now, so clear the link */
1346 txd_clear_next(tx);
1347 chan = dep->chan;
1348
1349 /* keep submitting up until a channel switch is detected
1350 * in that case we will be called again as a result of
1351 * processing the interrupt from async_tx_channel_switch
1352 */
1353 for (; dep; dep = dep_next) {
1354 txd_lock(dep);
1355 txd_clear_parent(dep);
1356 dep_next = txd_next(dep);
1357 if (dep_next && dep_next->chan == chan)
1358 txd_clear_next(dep); /* ->next will be submitted */
1359 else
1360 dep_next = NULL; /* submit current dep and terminate */
1361 txd_unlock(dep);
1362
1363 dep->tx_submit(dep);
1364 }
1365
1366 chan->device->device_issue_pending(chan);
1367 }
1368 EXPORT_SYMBOL_GPL(dma_run_dependencies);
1369
dma_bus_init(void)1370 static int __init dma_bus_init(void)
1371 {
1372 int err = dmaengine_init_unmap_pool();
1373
1374 if (err)
1375 return err;
1376 return class_register(&dma_devclass);
1377 }
1378 arch_initcall(dma_bus_init);
1379
1380
1381