1 /*
2 * Qualcomm Technologies HIDMA DMA engine interface
3 *
4 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 and
8 * only version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 */
15
16 /*
17 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
18 * Copyright (C) Semihalf 2009
19 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
20 * Copyright (C) Alexander Popov, Promcontroller 2014
21 *
22 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
23 * (defines, structures and comments) was taken from MPC5121 DMA driver
24 * written by Hongjun Chen <hong-jun.chen@freescale.com>.
25 *
26 * Approved as OSADL project by a majority of OSADL members and funded
27 * by OSADL membership fees in 2009; for details see www.osadl.org.
28 *
29 * This program is free software; you can redistribute it and/or modify it
30 * under the terms of the GNU General Public License as published by the Free
31 * Software Foundation; either version 2 of the License, or (at your option)
32 * any later version.
33 *
34 * This program is distributed in the hope that it will be useful, but WITHOUT
35 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
36 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
37 * more details.
38 *
39 * The full GNU General Public License is included in this distribution in the
40 * file called COPYING.
41 */
42
43 /* Linux Foundation elects GPLv2 license only. */
44
45 #include <linux/dmaengine.h>
46 #include <linux/dma-mapping.h>
47 #include <linux/list.h>
48 #include <linux/module.h>
49 #include <linux/platform_device.h>
50 #include <linux/slab.h>
51 #include <linux/spinlock.h>
52 #include <linux/of_dma.h>
53 #include <linux/property.h>
54 #include <linux/delay.h>
55 #include <linux/acpi.h>
56 #include <linux/irq.h>
57 #include <linux/atomic.h>
58 #include <linux/pm_runtime.h>
59 #include <linux/msi.h>
60
61 #include "../dmaengine.h"
62 #include "hidma.h"
63
64 /*
65 * Default idle time is 2 seconds. This parameter can
66 * be overridden by changing the following
67 * /sys/bus/platform/devices/QCOM8061:<xy>/power/autosuspend_delay_ms
68 * during kernel boot.
69 */
70 #define HIDMA_AUTOSUSPEND_TIMEOUT 2000
71 #define HIDMA_ERR_INFO_SW 0xFF
72 #define HIDMA_ERR_CODE_UNEXPECTED_TERMINATE 0x0
73 #define HIDMA_NR_DEFAULT_DESC 10
74 #define HIDMA_MSI_INTS 11
75
to_hidma_dev(struct dma_device * dmadev)76 static inline struct hidma_dev *to_hidma_dev(struct dma_device *dmadev)
77 {
78 return container_of(dmadev, struct hidma_dev, ddev);
79 }
80
81 static inline
to_hidma_dev_from_lldev(struct hidma_lldev ** _lldevp)82 struct hidma_dev *to_hidma_dev_from_lldev(struct hidma_lldev **_lldevp)
83 {
84 return container_of(_lldevp, struct hidma_dev, lldev);
85 }
86
to_hidma_chan(struct dma_chan * dmach)87 static inline struct hidma_chan *to_hidma_chan(struct dma_chan *dmach)
88 {
89 return container_of(dmach, struct hidma_chan, chan);
90 }
91
92 static inline
to_hidma_desc(struct dma_async_tx_descriptor * t)93 struct hidma_desc *to_hidma_desc(struct dma_async_tx_descriptor *t)
94 {
95 return container_of(t, struct hidma_desc, desc);
96 }
97
hidma_free(struct hidma_dev * dmadev)98 static void hidma_free(struct hidma_dev *dmadev)
99 {
100 INIT_LIST_HEAD(&dmadev->ddev.channels);
101 }
102
103 static unsigned int nr_desc_prm;
104 module_param(nr_desc_prm, uint, 0644);
105 MODULE_PARM_DESC(nr_desc_prm, "number of descriptors (default: 0)");
106
107
108 /* process completed descriptors */
hidma_process_completed(struct hidma_chan * mchan)109 static void hidma_process_completed(struct hidma_chan *mchan)
110 {
111 struct dma_device *ddev = mchan->chan.device;
112 struct hidma_dev *mdma = to_hidma_dev(ddev);
113 struct dma_async_tx_descriptor *desc;
114 dma_cookie_t last_cookie;
115 struct hidma_desc *mdesc;
116 struct hidma_desc *next;
117 unsigned long irqflags;
118 struct list_head list;
119
120 INIT_LIST_HEAD(&list);
121
122 /* Get all completed descriptors */
123 spin_lock_irqsave(&mchan->lock, irqflags);
124 list_splice_tail_init(&mchan->completed, &list);
125 spin_unlock_irqrestore(&mchan->lock, irqflags);
126
127 /* Execute callbacks and run dependencies */
128 list_for_each_entry_safe(mdesc, next, &list, node) {
129 enum dma_status llstat;
130 struct dmaengine_desc_callback cb;
131 struct dmaengine_result result;
132
133 desc = &mdesc->desc;
134 last_cookie = desc->cookie;
135
136 llstat = hidma_ll_status(mdma->lldev, mdesc->tre_ch);
137
138 spin_lock_irqsave(&mchan->lock, irqflags);
139 if (llstat == DMA_COMPLETE) {
140 mchan->last_success = last_cookie;
141 result.result = DMA_TRANS_NOERROR;
142 } else {
143 result.result = DMA_TRANS_ABORTED;
144 }
145
146 dma_cookie_complete(desc);
147 spin_unlock_irqrestore(&mchan->lock, irqflags);
148
149 dmaengine_desc_get_callback(desc, &cb);
150
151 dma_run_dependencies(desc);
152
153 spin_lock_irqsave(&mchan->lock, irqflags);
154 list_move(&mdesc->node, &mchan->free);
155 spin_unlock_irqrestore(&mchan->lock, irqflags);
156
157 dmaengine_desc_callback_invoke(&cb, &result);
158 }
159 }
160
161 /*
162 * Called once for each submitted descriptor.
163 * PM is locked once for each descriptor that is currently
164 * in execution.
165 */
hidma_callback(void * data)166 static void hidma_callback(void *data)
167 {
168 struct hidma_desc *mdesc = data;
169 struct hidma_chan *mchan = to_hidma_chan(mdesc->desc.chan);
170 struct dma_device *ddev = mchan->chan.device;
171 struct hidma_dev *dmadev = to_hidma_dev(ddev);
172 unsigned long irqflags;
173 bool queued = false;
174
175 spin_lock_irqsave(&mchan->lock, irqflags);
176 if (mdesc->node.next) {
177 /* Delete from the active list, add to completed list */
178 list_move_tail(&mdesc->node, &mchan->completed);
179 queued = true;
180
181 /* calculate the next running descriptor */
182 mchan->running = list_first_entry(&mchan->active,
183 struct hidma_desc, node);
184 }
185 spin_unlock_irqrestore(&mchan->lock, irqflags);
186
187 hidma_process_completed(mchan);
188
189 if (queued) {
190 pm_runtime_mark_last_busy(dmadev->ddev.dev);
191 pm_runtime_put_autosuspend(dmadev->ddev.dev);
192 }
193 }
194
hidma_chan_init(struct hidma_dev * dmadev,u32 dma_sig)195 static int hidma_chan_init(struct hidma_dev *dmadev, u32 dma_sig)
196 {
197 struct hidma_chan *mchan;
198 struct dma_device *ddev;
199
200 mchan = devm_kzalloc(dmadev->ddev.dev, sizeof(*mchan), GFP_KERNEL);
201 if (!mchan)
202 return -ENOMEM;
203
204 ddev = &dmadev->ddev;
205 mchan->dma_sig = dma_sig;
206 mchan->dmadev = dmadev;
207 mchan->chan.device = ddev;
208 dma_cookie_init(&mchan->chan);
209
210 INIT_LIST_HEAD(&mchan->free);
211 INIT_LIST_HEAD(&mchan->prepared);
212 INIT_LIST_HEAD(&mchan->active);
213 INIT_LIST_HEAD(&mchan->completed);
214 INIT_LIST_HEAD(&mchan->queued);
215
216 spin_lock_init(&mchan->lock);
217 list_add_tail(&mchan->chan.device_node, &ddev->channels);
218 dmadev->ddev.chancnt++;
219 return 0;
220 }
221
hidma_issue_task(unsigned long arg)222 static void hidma_issue_task(unsigned long arg)
223 {
224 struct hidma_dev *dmadev = (struct hidma_dev *)arg;
225
226 pm_runtime_get_sync(dmadev->ddev.dev);
227 hidma_ll_start(dmadev->lldev);
228 }
229
hidma_issue_pending(struct dma_chan * dmach)230 static void hidma_issue_pending(struct dma_chan *dmach)
231 {
232 struct hidma_chan *mchan = to_hidma_chan(dmach);
233 struct hidma_dev *dmadev = mchan->dmadev;
234 unsigned long flags;
235 struct hidma_desc *qdesc, *next;
236 int status;
237
238 spin_lock_irqsave(&mchan->lock, flags);
239 list_for_each_entry_safe(qdesc, next, &mchan->queued, node) {
240 hidma_ll_queue_request(dmadev->lldev, qdesc->tre_ch);
241 list_move_tail(&qdesc->node, &mchan->active);
242 }
243
244 if (!mchan->running) {
245 struct hidma_desc *desc = list_first_entry(&mchan->active,
246 struct hidma_desc,
247 node);
248 mchan->running = desc;
249 }
250 spin_unlock_irqrestore(&mchan->lock, flags);
251
252 /* PM will be released in hidma_callback function. */
253 status = pm_runtime_get(dmadev->ddev.dev);
254 if (status < 0)
255 tasklet_schedule(&dmadev->task);
256 else
257 hidma_ll_start(dmadev->lldev);
258 }
259
hidma_txn_is_success(dma_cookie_t cookie,dma_cookie_t last_success,dma_cookie_t last_used)260 static inline bool hidma_txn_is_success(dma_cookie_t cookie,
261 dma_cookie_t last_success, dma_cookie_t last_used)
262 {
263 if (last_success <= last_used) {
264 if ((cookie <= last_success) || (cookie > last_used))
265 return true;
266 } else {
267 if ((cookie <= last_success) && (cookie > last_used))
268 return true;
269 }
270 return false;
271 }
272
hidma_tx_status(struct dma_chan * dmach,dma_cookie_t cookie,struct dma_tx_state * txstate)273 static enum dma_status hidma_tx_status(struct dma_chan *dmach,
274 dma_cookie_t cookie,
275 struct dma_tx_state *txstate)
276 {
277 struct hidma_chan *mchan = to_hidma_chan(dmach);
278 enum dma_status ret;
279
280 ret = dma_cookie_status(dmach, cookie, txstate);
281 if (ret == DMA_COMPLETE) {
282 bool is_success;
283
284 is_success = hidma_txn_is_success(cookie, mchan->last_success,
285 dmach->cookie);
286 return is_success ? ret : DMA_ERROR;
287 }
288
289 if (mchan->paused && (ret == DMA_IN_PROGRESS)) {
290 unsigned long flags;
291 dma_cookie_t runcookie;
292
293 spin_lock_irqsave(&mchan->lock, flags);
294 if (mchan->running)
295 runcookie = mchan->running->desc.cookie;
296 else
297 runcookie = -EINVAL;
298
299 if (runcookie == cookie)
300 ret = DMA_PAUSED;
301
302 spin_unlock_irqrestore(&mchan->lock, flags);
303 }
304
305 return ret;
306 }
307
308 /*
309 * Submit descriptor to hardware.
310 * Lock the PM for each descriptor we are sending.
311 */
hidma_tx_submit(struct dma_async_tx_descriptor * txd)312 static dma_cookie_t hidma_tx_submit(struct dma_async_tx_descriptor *txd)
313 {
314 struct hidma_chan *mchan = to_hidma_chan(txd->chan);
315 struct hidma_dev *dmadev = mchan->dmadev;
316 struct hidma_desc *mdesc;
317 unsigned long irqflags;
318 dma_cookie_t cookie;
319
320 pm_runtime_get_sync(dmadev->ddev.dev);
321 if (!hidma_ll_isenabled(dmadev->lldev)) {
322 pm_runtime_mark_last_busy(dmadev->ddev.dev);
323 pm_runtime_put_autosuspend(dmadev->ddev.dev);
324 return -ENODEV;
325 }
326 pm_runtime_mark_last_busy(dmadev->ddev.dev);
327 pm_runtime_put_autosuspend(dmadev->ddev.dev);
328
329 mdesc = container_of(txd, struct hidma_desc, desc);
330 spin_lock_irqsave(&mchan->lock, irqflags);
331
332 /* Move descriptor to queued */
333 list_move_tail(&mdesc->node, &mchan->queued);
334
335 /* Update cookie */
336 cookie = dma_cookie_assign(txd);
337
338 spin_unlock_irqrestore(&mchan->lock, irqflags);
339
340 return cookie;
341 }
342
hidma_alloc_chan_resources(struct dma_chan * dmach)343 static int hidma_alloc_chan_resources(struct dma_chan *dmach)
344 {
345 struct hidma_chan *mchan = to_hidma_chan(dmach);
346 struct hidma_dev *dmadev = mchan->dmadev;
347 struct hidma_desc *mdesc, *tmp;
348 unsigned long irqflags;
349 LIST_HEAD(descs);
350 unsigned int i;
351 int rc = 0;
352
353 if (mchan->allocated)
354 return 0;
355
356 /* Alloc descriptors for this channel */
357 for (i = 0; i < dmadev->nr_descriptors; i++) {
358 mdesc = kzalloc(sizeof(struct hidma_desc), GFP_NOWAIT);
359 if (!mdesc) {
360 rc = -ENOMEM;
361 break;
362 }
363 dma_async_tx_descriptor_init(&mdesc->desc, dmach);
364 mdesc->desc.tx_submit = hidma_tx_submit;
365
366 rc = hidma_ll_request(dmadev->lldev, mchan->dma_sig,
367 "DMA engine", hidma_callback, mdesc,
368 &mdesc->tre_ch);
369 if (rc) {
370 dev_err(dmach->device->dev,
371 "channel alloc failed at %u\n", i);
372 kfree(mdesc);
373 break;
374 }
375 list_add_tail(&mdesc->node, &descs);
376 }
377
378 if (rc) {
379 /* return the allocated descriptors */
380 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
381 hidma_ll_free(dmadev->lldev, mdesc->tre_ch);
382 kfree(mdesc);
383 }
384 return rc;
385 }
386
387 spin_lock_irqsave(&mchan->lock, irqflags);
388 list_splice_tail_init(&descs, &mchan->free);
389 mchan->allocated = true;
390 spin_unlock_irqrestore(&mchan->lock, irqflags);
391 return 1;
392 }
393
394 static struct dma_async_tx_descriptor *
hidma_prep_dma_memcpy(struct dma_chan * dmach,dma_addr_t dest,dma_addr_t src,size_t len,unsigned long flags)395 hidma_prep_dma_memcpy(struct dma_chan *dmach, dma_addr_t dest, dma_addr_t src,
396 size_t len, unsigned long flags)
397 {
398 struct hidma_chan *mchan = to_hidma_chan(dmach);
399 struct hidma_desc *mdesc = NULL;
400 struct hidma_dev *mdma = mchan->dmadev;
401 unsigned long irqflags;
402
403 /* Get free descriptor */
404 spin_lock_irqsave(&mchan->lock, irqflags);
405 if (!list_empty(&mchan->free)) {
406 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
407 list_del(&mdesc->node);
408 }
409 spin_unlock_irqrestore(&mchan->lock, irqflags);
410
411 if (!mdesc)
412 return NULL;
413
414 mdesc->desc.flags = flags;
415 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
416 src, dest, len, flags,
417 HIDMA_TRE_MEMCPY);
418
419 /* Place descriptor in prepared list */
420 spin_lock_irqsave(&mchan->lock, irqflags);
421 list_add_tail(&mdesc->node, &mchan->prepared);
422 spin_unlock_irqrestore(&mchan->lock, irqflags);
423
424 return &mdesc->desc;
425 }
426
427 static struct dma_async_tx_descriptor *
hidma_prep_dma_memset(struct dma_chan * dmach,dma_addr_t dest,int value,size_t len,unsigned long flags)428 hidma_prep_dma_memset(struct dma_chan *dmach, dma_addr_t dest, int value,
429 size_t len, unsigned long flags)
430 {
431 struct hidma_chan *mchan = to_hidma_chan(dmach);
432 struct hidma_desc *mdesc = NULL;
433 struct hidma_dev *mdma = mchan->dmadev;
434 unsigned long irqflags;
435
436 /* Get free descriptor */
437 spin_lock_irqsave(&mchan->lock, irqflags);
438 if (!list_empty(&mchan->free)) {
439 mdesc = list_first_entry(&mchan->free, struct hidma_desc, node);
440 list_del(&mdesc->node);
441 }
442 spin_unlock_irqrestore(&mchan->lock, irqflags);
443
444 if (!mdesc)
445 return NULL;
446
447 mdesc->desc.flags = flags;
448 hidma_ll_set_transfer_params(mdma->lldev, mdesc->tre_ch,
449 value, dest, len, flags,
450 HIDMA_TRE_MEMSET);
451
452 /* Place descriptor in prepared list */
453 spin_lock_irqsave(&mchan->lock, irqflags);
454 list_add_tail(&mdesc->node, &mchan->prepared);
455 spin_unlock_irqrestore(&mchan->lock, irqflags);
456
457 return &mdesc->desc;
458 }
459
hidma_terminate_channel(struct dma_chan * chan)460 static int hidma_terminate_channel(struct dma_chan *chan)
461 {
462 struct hidma_chan *mchan = to_hidma_chan(chan);
463 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
464 struct hidma_desc *tmp, *mdesc;
465 unsigned long irqflags;
466 LIST_HEAD(list);
467 int rc;
468
469 pm_runtime_get_sync(dmadev->ddev.dev);
470 /* give completed requests a chance to finish */
471 hidma_process_completed(mchan);
472
473 spin_lock_irqsave(&mchan->lock, irqflags);
474 mchan->last_success = 0;
475 list_splice_init(&mchan->active, &list);
476 list_splice_init(&mchan->prepared, &list);
477 list_splice_init(&mchan->completed, &list);
478 list_splice_init(&mchan->queued, &list);
479 spin_unlock_irqrestore(&mchan->lock, irqflags);
480
481 /* this suspends the existing transfer */
482 rc = hidma_ll_disable(dmadev->lldev);
483 if (rc) {
484 dev_err(dmadev->ddev.dev, "channel did not pause\n");
485 goto out;
486 }
487
488 /* return all user requests */
489 list_for_each_entry_safe(mdesc, tmp, &list, node) {
490 struct dma_async_tx_descriptor *txd = &mdesc->desc;
491
492 dma_descriptor_unmap(txd);
493 dmaengine_desc_get_callback_invoke(txd, NULL);
494 dma_run_dependencies(txd);
495
496 /* move myself to free_list */
497 list_move(&mdesc->node, &mchan->free);
498 }
499
500 rc = hidma_ll_enable(dmadev->lldev);
501 out:
502 pm_runtime_mark_last_busy(dmadev->ddev.dev);
503 pm_runtime_put_autosuspend(dmadev->ddev.dev);
504 return rc;
505 }
506
hidma_terminate_all(struct dma_chan * chan)507 static int hidma_terminate_all(struct dma_chan *chan)
508 {
509 struct hidma_chan *mchan = to_hidma_chan(chan);
510 struct hidma_dev *dmadev = to_hidma_dev(mchan->chan.device);
511 int rc;
512
513 rc = hidma_terminate_channel(chan);
514 if (rc)
515 return rc;
516
517 /* reinitialize the hardware */
518 pm_runtime_get_sync(dmadev->ddev.dev);
519 rc = hidma_ll_setup(dmadev->lldev);
520 pm_runtime_mark_last_busy(dmadev->ddev.dev);
521 pm_runtime_put_autosuspend(dmadev->ddev.dev);
522 return rc;
523 }
524
hidma_free_chan_resources(struct dma_chan * dmach)525 static void hidma_free_chan_resources(struct dma_chan *dmach)
526 {
527 struct hidma_chan *mchan = to_hidma_chan(dmach);
528 struct hidma_dev *mdma = mchan->dmadev;
529 struct hidma_desc *mdesc, *tmp;
530 unsigned long irqflags;
531 LIST_HEAD(descs);
532
533 /* terminate running transactions and free descriptors */
534 hidma_terminate_channel(dmach);
535
536 spin_lock_irqsave(&mchan->lock, irqflags);
537
538 /* Move data */
539 list_splice_tail_init(&mchan->free, &descs);
540
541 /* Free descriptors */
542 list_for_each_entry_safe(mdesc, tmp, &descs, node) {
543 hidma_ll_free(mdma->lldev, mdesc->tre_ch);
544 list_del(&mdesc->node);
545 kfree(mdesc);
546 }
547
548 mchan->allocated = 0;
549 spin_unlock_irqrestore(&mchan->lock, irqflags);
550 }
551
hidma_pause(struct dma_chan * chan)552 static int hidma_pause(struct dma_chan *chan)
553 {
554 struct hidma_chan *mchan;
555 struct hidma_dev *dmadev;
556
557 mchan = to_hidma_chan(chan);
558 dmadev = to_hidma_dev(mchan->chan.device);
559 if (!mchan->paused) {
560 pm_runtime_get_sync(dmadev->ddev.dev);
561 if (hidma_ll_disable(dmadev->lldev))
562 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
563 mchan->paused = true;
564 pm_runtime_mark_last_busy(dmadev->ddev.dev);
565 pm_runtime_put_autosuspend(dmadev->ddev.dev);
566 }
567 return 0;
568 }
569
hidma_resume(struct dma_chan * chan)570 static int hidma_resume(struct dma_chan *chan)
571 {
572 struct hidma_chan *mchan;
573 struct hidma_dev *dmadev;
574 int rc = 0;
575
576 mchan = to_hidma_chan(chan);
577 dmadev = to_hidma_dev(mchan->chan.device);
578 if (mchan->paused) {
579 pm_runtime_get_sync(dmadev->ddev.dev);
580 rc = hidma_ll_enable(dmadev->lldev);
581 if (!rc)
582 mchan->paused = false;
583 else
584 dev_err(dmadev->ddev.dev,
585 "failed to resume the channel");
586 pm_runtime_mark_last_busy(dmadev->ddev.dev);
587 pm_runtime_put_autosuspend(dmadev->ddev.dev);
588 }
589 return rc;
590 }
591
hidma_chirq_handler(int chirq,void * arg)592 static irqreturn_t hidma_chirq_handler(int chirq, void *arg)
593 {
594 struct hidma_lldev *lldev = arg;
595
596 /*
597 * All interrupts are request driven.
598 * HW doesn't send an interrupt by itself.
599 */
600 return hidma_ll_inthandler(chirq, lldev);
601 }
602
603 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
hidma_chirq_handler_msi(int chirq,void * arg)604 static irqreturn_t hidma_chirq_handler_msi(int chirq, void *arg)
605 {
606 struct hidma_lldev **lldevp = arg;
607 struct hidma_dev *dmadev = to_hidma_dev_from_lldev(lldevp);
608
609 return hidma_ll_inthandler_msi(chirq, *lldevp,
610 1 << (chirq - dmadev->msi_virqbase));
611 }
612 #endif
613
hidma_show_values(struct device * dev,struct device_attribute * attr,char * buf)614 static ssize_t hidma_show_values(struct device *dev,
615 struct device_attribute *attr, char *buf)
616 {
617 struct platform_device *pdev = to_platform_device(dev);
618 struct hidma_dev *mdev = platform_get_drvdata(pdev);
619
620 buf[0] = 0;
621
622 if (strcmp(attr->attr.name, "chid") == 0)
623 sprintf(buf, "%d\n", mdev->chidx);
624
625 return strlen(buf);
626 }
627
hidma_sysfs_uninit(struct hidma_dev * dev)628 static inline void hidma_sysfs_uninit(struct hidma_dev *dev)
629 {
630 device_remove_file(dev->ddev.dev, dev->chid_attrs);
631 }
632
633 static struct device_attribute*
hidma_create_sysfs_entry(struct hidma_dev * dev,char * name,int mode)634 hidma_create_sysfs_entry(struct hidma_dev *dev, char *name, int mode)
635 {
636 struct device_attribute *attrs;
637 char *name_copy;
638
639 attrs = devm_kmalloc(dev->ddev.dev, sizeof(struct device_attribute),
640 GFP_KERNEL);
641 if (!attrs)
642 return NULL;
643
644 name_copy = devm_kstrdup(dev->ddev.dev, name, GFP_KERNEL);
645 if (!name_copy)
646 return NULL;
647
648 attrs->attr.name = name_copy;
649 attrs->attr.mode = mode;
650 attrs->show = hidma_show_values;
651 sysfs_attr_init(&attrs->attr);
652
653 return attrs;
654 }
655
hidma_sysfs_init(struct hidma_dev * dev)656 static int hidma_sysfs_init(struct hidma_dev *dev)
657 {
658 dev->chid_attrs = hidma_create_sysfs_entry(dev, "chid", S_IRUGO);
659 if (!dev->chid_attrs)
660 return -ENOMEM;
661
662 return device_create_file(dev->ddev.dev, dev->chid_attrs);
663 }
664
665 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
hidma_write_msi_msg(struct msi_desc * desc,struct msi_msg * msg)666 static void hidma_write_msi_msg(struct msi_desc *desc, struct msi_msg *msg)
667 {
668 struct device *dev = msi_desc_to_dev(desc);
669 struct hidma_dev *dmadev = dev_get_drvdata(dev);
670
671 if (!desc->platform.msi_index) {
672 writel(msg->address_lo, dmadev->dev_evca + 0x118);
673 writel(msg->address_hi, dmadev->dev_evca + 0x11C);
674 writel(msg->data, dmadev->dev_evca + 0x120);
675 }
676 }
677 #endif
678
hidma_free_msis(struct hidma_dev * dmadev)679 static void hidma_free_msis(struct hidma_dev *dmadev)
680 {
681 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
682 struct device *dev = dmadev->ddev.dev;
683 struct msi_desc *desc;
684
685 /* free allocated MSI interrupts above */
686 for_each_msi_entry(desc, dev)
687 devm_free_irq(dev, desc->irq, &dmadev->lldev);
688
689 platform_msi_domain_free_irqs(dev);
690 #endif
691 }
692
hidma_request_msi(struct hidma_dev * dmadev,struct platform_device * pdev)693 static int hidma_request_msi(struct hidma_dev *dmadev,
694 struct platform_device *pdev)
695 {
696 #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
697 int rc;
698 struct msi_desc *desc;
699 struct msi_desc *failed_desc = NULL;
700
701 rc = platform_msi_domain_alloc_irqs(&pdev->dev, HIDMA_MSI_INTS,
702 hidma_write_msi_msg);
703 if (rc)
704 return rc;
705
706 for_each_msi_entry(desc, &pdev->dev) {
707 if (!desc->platform.msi_index)
708 dmadev->msi_virqbase = desc->irq;
709
710 rc = devm_request_irq(&pdev->dev, desc->irq,
711 hidma_chirq_handler_msi,
712 0, "qcom-hidma-msi",
713 &dmadev->lldev);
714 if (rc) {
715 failed_desc = desc;
716 break;
717 }
718 }
719
720 if (rc) {
721 /* free allocated MSI interrupts above */
722 for_each_msi_entry(desc, &pdev->dev) {
723 if (desc == failed_desc)
724 break;
725 devm_free_irq(&pdev->dev, desc->irq,
726 &dmadev->lldev);
727 }
728 } else {
729 /* Add callback to free MSIs on teardown */
730 hidma_ll_setup_irq(dmadev->lldev, true);
731
732 }
733 if (rc)
734 dev_warn(&pdev->dev,
735 "failed to request MSI irq, falling back to wired IRQ\n");
736 return rc;
737 #else
738 return -EINVAL;
739 #endif
740 }
741
hidma_msi_capable(struct device * dev)742 static bool hidma_msi_capable(struct device *dev)
743 {
744 struct acpi_device *adev = ACPI_COMPANION(dev);
745 const char *of_compat;
746 int ret = -EINVAL;
747
748 if (!adev || acpi_disabled) {
749 ret = device_property_read_string(dev, "compatible",
750 &of_compat);
751 if (ret)
752 return false;
753
754 ret = strcmp(of_compat, "qcom,hidma-1.1");
755 } else {
756 #ifdef CONFIG_ACPI
757 ret = strcmp(acpi_device_hid(adev), "QCOM8062");
758 #endif
759 }
760 return ret == 0;
761 }
762
hidma_probe(struct platform_device * pdev)763 static int hidma_probe(struct platform_device *pdev)
764 {
765 struct hidma_dev *dmadev;
766 struct resource *trca_resource;
767 struct resource *evca_resource;
768 int chirq;
769 void __iomem *evca;
770 void __iomem *trca;
771 int rc;
772 bool msi;
773
774 pm_runtime_set_autosuspend_delay(&pdev->dev, HIDMA_AUTOSUSPEND_TIMEOUT);
775 pm_runtime_use_autosuspend(&pdev->dev);
776 pm_runtime_set_active(&pdev->dev);
777 pm_runtime_enable(&pdev->dev);
778
779 trca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 0);
780 trca = devm_ioremap_resource(&pdev->dev, trca_resource);
781 if (IS_ERR(trca)) {
782 rc = -ENOMEM;
783 goto bailout;
784 }
785
786 evca_resource = platform_get_resource(pdev, IORESOURCE_MEM, 1);
787 evca = devm_ioremap_resource(&pdev->dev, evca_resource);
788 if (IS_ERR(evca)) {
789 rc = -ENOMEM;
790 goto bailout;
791 }
792
793 /*
794 * This driver only handles the channel IRQs.
795 * Common IRQ is handled by the management driver.
796 */
797 chirq = platform_get_irq(pdev, 0);
798 if (chirq < 0) {
799 rc = -ENODEV;
800 goto bailout;
801 }
802
803 dmadev = devm_kzalloc(&pdev->dev, sizeof(*dmadev), GFP_KERNEL);
804 if (!dmadev) {
805 rc = -ENOMEM;
806 goto bailout;
807 }
808
809 INIT_LIST_HEAD(&dmadev->ddev.channels);
810 spin_lock_init(&dmadev->lock);
811 dmadev->ddev.dev = &pdev->dev;
812 pm_runtime_get_sync(dmadev->ddev.dev);
813
814 dma_cap_set(DMA_MEMCPY, dmadev->ddev.cap_mask);
815 dma_cap_set(DMA_MEMSET, dmadev->ddev.cap_mask);
816 if (WARN_ON(!pdev->dev.dma_mask)) {
817 rc = -ENXIO;
818 goto dmafree;
819 }
820
821 dmadev->dev_evca = evca;
822 dmadev->evca_resource = evca_resource;
823 dmadev->dev_trca = trca;
824 dmadev->trca_resource = trca_resource;
825 dmadev->ddev.device_prep_dma_memcpy = hidma_prep_dma_memcpy;
826 dmadev->ddev.device_prep_dma_memset = hidma_prep_dma_memset;
827 dmadev->ddev.device_alloc_chan_resources = hidma_alloc_chan_resources;
828 dmadev->ddev.device_free_chan_resources = hidma_free_chan_resources;
829 dmadev->ddev.device_tx_status = hidma_tx_status;
830 dmadev->ddev.device_issue_pending = hidma_issue_pending;
831 dmadev->ddev.device_pause = hidma_pause;
832 dmadev->ddev.device_resume = hidma_resume;
833 dmadev->ddev.device_terminate_all = hidma_terminate_all;
834 dmadev->ddev.copy_align = 8;
835
836 /*
837 * Determine the MSI capability of the platform. Old HW doesn't
838 * support MSI.
839 */
840 msi = hidma_msi_capable(&pdev->dev);
841
842 device_property_read_u32(&pdev->dev, "desc-count",
843 &dmadev->nr_descriptors);
844
845 if (nr_desc_prm) {
846 dev_info(&pdev->dev, "overriding number of descriptors as %d\n",
847 nr_desc_prm);
848 dmadev->nr_descriptors = nr_desc_prm;
849 }
850
851 if (!dmadev->nr_descriptors)
852 dmadev->nr_descriptors = HIDMA_NR_DEFAULT_DESC;
853
854 dmadev->chidx = readl(dmadev->dev_trca + 0x28);
855
856 /* Set DMA mask to 64 bits. */
857 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
858 if (rc) {
859 dev_warn(&pdev->dev, "unable to set coherent mask to 64");
860 rc = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
861 if (rc)
862 goto dmafree;
863 }
864
865 dmadev->lldev = hidma_ll_init(dmadev->ddev.dev,
866 dmadev->nr_descriptors, dmadev->dev_trca,
867 dmadev->dev_evca, dmadev->chidx);
868 if (!dmadev->lldev) {
869 rc = -EPROBE_DEFER;
870 goto dmafree;
871 }
872
873 platform_set_drvdata(pdev, dmadev);
874 if (msi)
875 rc = hidma_request_msi(dmadev, pdev);
876
877 if (!msi || rc) {
878 hidma_ll_setup_irq(dmadev->lldev, false);
879 rc = devm_request_irq(&pdev->dev, chirq, hidma_chirq_handler,
880 0, "qcom-hidma", dmadev->lldev);
881 if (rc)
882 goto uninit;
883 }
884
885 INIT_LIST_HEAD(&dmadev->ddev.channels);
886 rc = hidma_chan_init(dmadev, 0);
887 if (rc)
888 goto uninit;
889
890 rc = dma_async_device_register(&dmadev->ddev);
891 if (rc)
892 goto uninit;
893
894 dmadev->irq = chirq;
895 tasklet_init(&dmadev->task, hidma_issue_task, (unsigned long)dmadev);
896 hidma_debug_init(dmadev);
897 hidma_sysfs_init(dmadev);
898 dev_info(&pdev->dev, "HI-DMA engine driver registration complete\n");
899 pm_runtime_mark_last_busy(dmadev->ddev.dev);
900 pm_runtime_put_autosuspend(dmadev->ddev.dev);
901 return 0;
902
903 uninit:
904 if (msi)
905 hidma_free_msis(dmadev);
906
907 hidma_debug_uninit(dmadev);
908 hidma_ll_uninit(dmadev->lldev);
909 dmafree:
910 if (dmadev)
911 hidma_free(dmadev);
912 bailout:
913 pm_runtime_put_sync(&pdev->dev);
914 pm_runtime_disable(&pdev->dev);
915 return rc;
916 }
917
hidma_shutdown(struct platform_device * pdev)918 static void hidma_shutdown(struct platform_device *pdev)
919 {
920 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
921
922 dev_info(dmadev->ddev.dev, "HI-DMA engine shutdown\n");
923
924 pm_runtime_get_sync(dmadev->ddev.dev);
925 if (hidma_ll_disable(dmadev->lldev))
926 dev_warn(dmadev->ddev.dev, "channel did not stop\n");
927 pm_runtime_mark_last_busy(dmadev->ddev.dev);
928 pm_runtime_put_autosuspend(dmadev->ddev.dev);
929
930 }
931
hidma_remove(struct platform_device * pdev)932 static int hidma_remove(struct platform_device *pdev)
933 {
934 struct hidma_dev *dmadev = platform_get_drvdata(pdev);
935
936 pm_runtime_get_sync(dmadev->ddev.dev);
937 dma_async_device_unregister(&dmadev->ddev);
938 if (!dmadev->lldev->msi_support)
939 devm_free_irq(dmadev->ddev.dev, dmadev->irq, dmadev->lldev);
940 else
941 hidma_free_msis(dmadev);
942
943 tasklet_kill(&dmadev->task);
944 hidma_sysfs_uninit(dmadev);
945 hidma_debug_uninit(dmadev);
946 hidma_ll_uninit(dmadev->lldev);
947 hidma_free(dmadev);
948
949 dev_info(&pdev->dev, "HI-DMA engine removed\n");
950 pm_runtime_put_sync_suspend(&pdev->dev);
951 pm_runtime_disable(&pdev->dev);
952
953 return 0;
954 }
955
956 #if IS_ENABLED(CONFIG_ACPI)
957 static const struct acpi_device_id hidma_acpi_ids[] = {
958 {"QCOM8061"},
959 {"QCOM8062"},
960 {},
961 };
962 MODULE_DEVICE_TABLE(acpi, hidma_acpi_ids);
963 #endif
964
965 static const struct of_device_id hidma_match[] = {
966 {.compatible = "qcom,hidma-1.0",},
967 {.compatible = "qcom,hidma-1.1",},
968 {},
969 };
970 MODULE_DEVICE_TABLE(of, hidma_match);
971
972 static struct platform_driver hidma_driver = {
973 .probe = hidma_probe,
974 .remove = hidma_remove,
975 .shutdown = hidma_shutdown,
976 .driver = {
977 .name = "hidma",
978 .of_match_table = hidma_match,
979 .acpi_match_table = ACPI_PTR(hidma_acpi_ids),
980 },
981 };
982
983 module_platform_driver(hidma_driver);
984 MODULE_LICENSE("GPL v2");
985