1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright 2015 Robert Jarzmik <robert.jarzmik@free.fr>
4 */
5
6 #include <linux/err.h>
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/interrupt.h>
11 #include <linux/dma-mapping.h>
12 #include <linux/slab.h>
13 #include <linux/dmaengine.h>
14 #include <linux/platform_device.h>
15 #include <linux/device.h>
16 #include <linux/platform_data/mmp_dma.h>
17 #include <linux/dmapool.h>
18 #include <linux/of_device.h>
19 #include <linux/of_dma.h>
20 #include <linux/of.h>
21 #include <linux/wait.h>
22 #include <linux/dma/pxa-dma.h>
23
24 #include "dmaengine.h"
25 #include "virt-dma.h"
26
27 #define DCSR(n) (0x0000 + ((n) << 2))
28 #define DALGN(n) 0x00a0
29 #define DINT 0x00f0
30 #define DDADR(n) (0x0200 + ((n) << 4))
31 #define DSADR(n) (0x0204 + ((n) << 4))
32 #define DTADR(n) (0x0208 + ((n) << 4))
33 #define DCMD(n) (0x020c + ((n) << 4))
34
35 #define PXA_DCSR_RUN BIT(31) /* Run Bit (read / write) */
36 #define PXA_DCSR_NODESC BIT(30) /* No-Descriptor Fetch (read / write) */
37 #define PXA_DCSR_STOPIRQEN BIT(29) /* Stop Interrupt Enable (R/W) */
38 #define PXA_DCSR_REQPEND BIT(8) /* Request Pending (read-only) */
39 #define PXA_DCSR_STOPSTATE BIT(3) /* Stop State (read-only) */
40 #define PXA_DCSR_ENDINTR BIT(2) /* End Interrupt (read / write) */
41 #define PXA_DCSR_STARTINTR BIT(1) /* Start Interrupt (read / write) */
42 #define PXA_DCSR_BUSERR BIT(0) /* Bus Error Interrupt (read / write) */
43
44 #define PXA_DCSR_EORIRQEN BIT(28) /* End of Receive IRQ Enable (R/W) */
45 #define PXA_DCSR_EORJMPEN BIT(27) /* Jump to next descriptor on EOR */
46 #define PXA_DCSR_EORSTOPEN BIT(26) /* STOP on an EOR */
47 #define PXA_DCSR_SETCMPST BIT(25) /* Set Descriptor Compare Status */
48 #define PXA_DCSR_CLRCMPST BIT(24) /* Clear Descriptor Compare Status */
49 #define PXA_DCSR_CMPST BIT(10) /* The Descriptor Compare Status */
50 #define PXA_DCSR_EORINTR BIT(9) /* The end of Receive */
51
52 #define DRCMR_MAPVLD BIT(7) /* Map Valid (read / write) */
53 #define DRCMR_CHLNUM 0x1f /* mask for Channel Number (read / write) */
54
55 #define DDADR_DESCADDR 0xfffffff0 /* Address of next descriptor (mask) */
56 #define DDADR_STOP BIT(0) /* Stop (read / write) */
57
58 #define PXA_DCMD_INCSRCADDR BIT(31) /* Source Address Increment Setting. */
59 #define PXA_DCMD_INCTRGADDR BIT(30) /* Target Address Increment Setting. */
60 #define PXA_DCMD_FLOWSRC BIT(29) /* Flow Control by the source. */
61 #define PXA_DCMD_FLOWTRG BIT(28) /* Flow Control by the target. */
62 #define PXA_DCMD_STARTIRQEN BIT(22) /* Start Interrupt Enable */
63 #define PXA_DCMD_ENDIRQEN BIT(21) /* End Interrupt Enable */
64 #define PXA_DCMD_ENDIAN BIT(18) /* Device Endian-ness. */
65 #define PXA_DCMD_BURST8 (1 << 16) /* 8 byte burst */
66 #define PXA_DCMD_BURST16 (2 << 16) /* 16 byte burst */
67 #define PXA_DCMD_BURST32 (3 << 16) /* 32 byte burst */
68 #define PXA_DCMD_WIDTH1 (1 << 14) /* 1 byte width */
69 #define PXA_DCMD_WIDTH2 (2 << 14) /* 2 byte width (HalfWord) */
70 #define PXA_DCMD_WIDTH4 (3 << 14) /* 4 byte width (Word) */
71 #define PXA_DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */
72
73 #define PDMA_ALIGNMENT 3
74 #define PDMA_MAX_DESC_BYTES (PXA_DCMD_LENGTH & ~((1 << PDMA_ALIGNMENT) - 1))
75
76 struct pxad_desc_hw {
77 u32 ddadr; /* Points to the next descriptor + flags */
78 u32 dsadr; /* DSADR value for the current transfer */
79 u32 dtadr; /* DTADR value for the current transfer */
80 u32 dcmd; /* DCMD value for the current transfer */
81 } __aligned(16);
82
83 struct pxad_desc_sw {
84 struct virt_dma_desc vd; /* Virtual descriptor */
85 int nb_desc; /* Number of hw. descriptors */
86 size_t len; /* Number of bytes xfered */
87 dma_addr_t first; /* First descriptor's addr */
88
89 /* At least one descriptor has an src/dst address not multiple of 8 */
90 bool misaligned;
91 bool cyclic;
92 struct dma_pool *desc_pool; /* Channel's used allocator */
93
94 struct pxad_desc_hw *hw_desc[]; /* DMA coherent descriptors */
95 };
96
97 struct pxad_phy {
98 int idx;
99 void __iomem *base;
100 struct pxad_chan *vchan;
101 };
102
103 struct pxad_chan {
104 struct virt_dma_chan vc; /* Virtual channel */
105 u32 drcmr; /* Requestor of the channel */
106 enum pxad_chan_prio prio; /* Required priority of phy */
107 /*
108 * At least one desc_sw in submitted or issued transfers on this channel
109 * has one address such as: addr % 8 != 0. This implies the DALGN
110 * setting on the phy.
111 */
112 bool misaligned;
113 struct dma_slave_config cfg; /* Runtime config */
114
115 /* protected by vc->lock */
116 struct pxad_phy *phy;
117 struct dma_pool *desc_pool; /* Descriptors pool */
118 dma_cookie_t bus_error;
119
120 wait_queue_head_t wq_state;
121 };
122
123 struct pxad_device {
124 struct dma_device slave;
125 int nr_chans;
126 int nr_requestors;
127 void __iomem *base;
128 struct pxad_phy *phys;
129 spinlock_t phy_lock; /* Phy association */
130 #ifdef CONFIG_DEBUG_FS
131 struct dentry *dbgfs_root;
132 struct dentry **dbgfs_chan;
133 #endif
134 };
135
136 #define tx_to_pxad_desc(tx) \
137 container_of(tx, struct pxad_desc_sw, async_tx)
138 #define to_pxad_chan(dchan) \
139 container_of(dchan, struct pxad_chan, vc.chan)
140 #define to_pxad_dev(dmadev) \
141 container_of(dmadev, struct pxad_device, slave)
142 #define to_pxad_sw_desc(_vd) \
143 container_of((_vd), struct pxad_desc_sw, vd)
144
145 #define _phy_readl_relaxed(phy, _reg) \
146 readl_relaxed((phy)->base + _reg((phy)->idx))
147 #define phy_readl_relaxed(phy, _reg) \
148 ({ \
149 u32 _v; \
150 _v = readl_relaxed((phy)->base + _reg((phy)->idx)); \
151 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
152 "%s(): readl(%s): 0x%08x\n", __func__, #_reg, \
153 _v); \
154 _v; \
155 })
156 #define phy_writel(phy, val, _reg) \
157 do { \
158 writel((val), (phy)->base + _reg((phy)->idx)); \
159 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
160 "%s(): writel(0x%08x, %s)\n", \
161 __func__, (u32)(val), #_reg); \
162 } while (0)
163 #define phy_writel_relaxed(phy, val, _reg) \
164 do { \
165 writel_relaxed((val), (phy)->base + _reg((phy)->idx)); \
166 dev_vdbg(&phy->vchan->vc.chan.dev->device, \
167 "%s(): writel_relaxed(0x%08x, %s)\n", \
168 __func__, (u32)(val), #_reg); \
169 } while (0)
170
pxad_drcmr(unsigned int line)171 static unsigned int pxad_drcmr(unsigned int line)
172 {
173 if (line < 64)
174 return 0x100 + line * 4;
175 return 0x1000 + line * 4;
176 }
177
178 static bool pxad_filter_fn(struct dma_chan *chan, void *param);
179
180 /*
181 * Debug fs
182 */
183 #ifdef CONFIG_DEBUG_FS
184 #include <linux/debugfs.h>
185 #include <linux/uaccess.h>
186 #include <linux/seq_file.h>
187
requester_chan_show(struct seq_file * s,void * p)188 static int requester_chan_show(struct seq_file *s, void *p)
189 {
190 struct pxad_phy *phy = s->private;
191 int i;
192 u32 drcmr;
193
194 seq_printf(s, "DMA channel %d requester :\n", phy->idx);
195 for (i = 0; i < 70; i++) {
196 drcmr = readl_relaxed(phy->base + pxad_drcmr(i));
197 if ((drcmr & DRCMR_CHLNUM) == phy->idx)
198 seq_printf(s, "\tRequester %d (MAPVLD=%d)\n", i,
199 !!(drcmr & DRCMR_MAPVLD));
200 }
201 return 0;
202 }
203
dbg_burst_from_dcmd(u32 dcmd)204 static inline int dbg_burst_from_dcmd(u32 dcmd)
205 {
206 int burst = (dcmd >> 16) & 0x3;
207
208 return burst ? 4 << burst : 0;
209 }
210
is_phys_valid(unsigned long addr)211 static int is_phys_valid(unsigned long addr)
212 {
213 return pfn_valid(__phys_to_pfn(addr));
214 }
215
216 #define PXA_DCSR_STR(flag) (dcsr & PXA_DCSR_##flag ? #flag" " : "")
217 #define PXA_DCMD_STR(flag) (dcmd & PXA_DCMD_##flag ? #flag" " : "")
218
descriptors_show(struct seq_file * s,void * p)219 static int descriptors_show(struct seq_file *s, void *p)
220 {
221 struct pxad_phy *phy = s->private;
222 int i, max_show = 20, burst, width;
223 u32 dcmd;
224 unsigned long phys_desc, ddadr;
225 struct pxad_desc_hw *desc;
226
227 phys_desc = ddadr = _phy_readl_relaxed(phy, DDADR);
228
229 seq_printf(s, "DMA channel %d descriptors :\n", phy->idx);
230 seq_printf(s, "[%03d] First descriptor unknown\n", 0);
231 for (i = 1; i < max_show && is_phys_valid(phys_desc); i++) {
232 desc = phys_to_virt(phys_desc);
233 dcmd = desc->dcmd;
234 burst = dbg_burst_from_dcmd(dcmd);
235 width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
236
237 seq_printf(s, "[%03d] Desc at %08lx(virt %p)\n",
238 i, phys_desc, desc);
239 seq_printf(s, "\tDDADR = %08x\n", desc->ddadr);
240 seq_printf(s, "\tDSADR = %08x\n", desc->dsadr);
241 seq_printf(s, "\tDTADR = %08x\n", desc->dtadr);
242 seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
243 dcmd,
244 PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
245 PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
246 PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
247 PXA_DCMD_STR(ENDIAN), burst, width,
248 dcmd & PXA_DCMD_LENGTH);
249 phys_desc = desc->ddadr;
250 }
251 if (i == max_show)
252 seq_printf(s, "[%03d] Desc at %08lx ... max display reached\n",
253 i, phys_desc);
254 else
255 seq_printf(s, "[%03d] Desc at %08lx is %s\n",
256 i, phys_desc, phys_desc == DDADR_STOP ?
257 "DDADR_STOP" : "invalid");
258
259 return 0;
260 }
261
chan_state_show(struct seq_file * s,void * p)262 static int chan_state_show(struct seq_file *s, void *p)
263 {
264 struct pxad_phy *phy = s->private;
265 u32 dcsr, dcmd;
266 int burst, width;
267 static const char * const str_prio[] = {
268 "high", "normal", "low", "invalid"
269 };
270
271 dcsr = _phy_readl_relaxed(phy, DCSR);
272 dcmd = _phy_readl_relaxed(phy, DCMD);
273 burst = dbg_burst_from_dcmd(dcmd);
274 width = (1 << ((dcmd >> 14) & 0x3)) >> 1;
275
276 seq_printf(s, "DMA channel %d\n", phy->idx);
277 seq_printf(s, "\tPriority : %s\n",
278 str_prio[(phy->idx & 0xf) / 4]);
279 seq_printf(s, "\tUnaligned transfer bit: %s\n",
280 _phy_readl_relaxed(phy, DALGN) & BIT(phy->idx) ?
281 "yes" : "no");
282 seq_printf(s, "\tDCSR = %08x (%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s)\n",
283 dcsr, PXA_DCSR_STR(RUN), PXA_DCSR_STR(NODESC),
284 PXA_DCSR_STR(STOPIRQEN), PXA_DCSR_STR(EORIRQEN),
285 PXA_DCSR_STR(EORJMPEN), PXA_DCSR_STR(EORSTOPEN),
286 PXA_DCSR_STR(SETCMPST), PXA_DCSR_STR(CLRCMPST),
287 PXA_DCSR_STR(CMPST), PXA_DCSR_STR(EORINTR),
288 PXA_DCSR_STR(REQPEND), PXA_DCSR_STR(STOPSTATE),
289 PXA_DCSR_STR(ENDINTR), PXA_DCSR_STR(STARTINTR),
290 PXA_DCSR_STR(BUSERR));
291
292 seq_printf(s, "\tDCMD = %08x (%s%s%s%s%s%s%sburst=%d width=%d len=%d)\n",
293 dcmd,
294 PXA_DCMD_STR(INCSRCADDR), PXA_DCMD_STR(INCTRGADDR),
295 PXA_DCMD_STR(FLOWSRC), PXA_DCMD_STR(FLOWTRG),
296 PXA_DCMD_STR(STARTIRQEN), PXA_DCMD_STR(ENDIRQEN),
297 PXA_DCMD_STR(ENDIAN), burst, width, dcmd & PXA_DCMD_LENGTH);
298 seq_printf(s, "\tDSADR = %08x\n", _phy_readl_relaxed(phy, DSADR));
299 seq_printf(s, "\tDTADR = %08x\n", _phy_readl_relaxed(phy, DTADR));
300 seq_printf(s, "\tDDADR = %08x\n", _phy_readl_relaxed(phy, DDADR));
301
302 return 0;
303 }
304
state_show(struct seq_file * s,void * p)305 static int state_show(struct seq_file *s, void *p)
306 {
307 struct pxad_device *pdev = s->private;
308
309 /* basic device status */
310 seq_puts(s, "DMA engine status\n");
311 seq_printf(s, "\tChannel number: %d\n", pdev->nr_chans);
312
313 return 0;
314 }
315
316 DEFINE_SHOW_ATTRIBUTE(state);
317 DEFINE_SHOW_ATTRIBUTE(chan_state);
318 DEFINE_SHOW_ATTRIBUTE(descriptors);
319 DEFINE_SHOW_ATTRIBUTE(requester_chan);
320
pxad_dbg_alloc_chan(struct pxad_device * pdev,int ch,struct dentry * chandir)321 static struct dentry *pxad_dbg_alloc_chan(struct pxad_device *pdev,
322 int ch, struct dentry *chandir)
323 {
324 char chan_name[11];
325 struct dentry *chan;
326 void *dt;
327
328 scnprintf(chan_name, sizeof(chan_name), "%d", ch);
329 chan = debugfs_create_dir(chan_name, chandir);
330 dt = (void *)&pdev->phys[ch];
331
332 debugfs_create_file("state", 0400, chan, dt, &chan_state_fops);
333 debugfs_create_file("descriptors", 0400, chan, dt, &descriptors_fops);
334 debugfs_create_file("requesters", 0400, chan, dt, &requester_chan_fops);
335
336 return chan;
337 }
338
pxad_init_debugfs(struct pxad_device * pdev)339 static void pxad_init_debugfs(struct pxad_device *pdev)
340 {
341 int i;
342 struct dentry *chandir;
343
344 pdev->dbgfs_chan =
345 kmalloc_array(pdev->nr_chans, sizeof(struct dentry *),
346 GFP_KERNEL);
347 if (!pdev->dbgfs_chan)
348 return;
349
350 pdev->dbgfs_root = debugfs_create_dir(dev_name(pdev->slave.dev), NULL);
351
352 debugfs_create_file("state", 0400, pdev->dbgfs_root, pdev, &state_fops);
353
354 chandir = debugfs_create_dir("channels", pdev->dbgfs_root);
355
356 for (i = 0; i < pdev->nr_chans; i++)
357 pdev->dbgfs_chan[i] = pxad_dbg_alloc_chan(pdev, i, chandir);
358 }
359
pxad_cleanup_debugfs(struct pxad_device * pdev)360 static void pxad_cleanup_debugfs(struct pxad_device *pdev)
361 {
362 debugfs_remove_recursive(pdev->dbgfs_root);
363 }
364 #else
pxad_init_debugfs(struct pxad_device * pdev)365 static inline void pxad_init_debugfs(struct pxad_device *pdev) {}
pxad_cleanup_debugfs(struct pxad_device * pdev)366 static inline void pxad_cleanup_debugfs(struct pxad_device *pdev) {}
367 #endif
368
lookup_phy(struct pxad_chan * pchan)369 static struct pxad_phy *lookup_phy(struct pxad_chan *pchan)
370 {
371 int prio, i;
372 struct pxad_device *pdev = to_pxad_dev(pchan->vc.chan.device);
373 struct pxad_phy *phy, *found = NULL;
374 unsigned long flags;
375
376 /*
377 * dma channel priorities
378 * ch 0 - 3, 16 - 19 <--> (0)
379 * ch 4 - 7, 20 - 23 <--> (1)
380 * ch 8 - 11, 24 - 27 <--> (2)
381 * ch 12 - 15, 28 - 31 <--> (3)
382 */
383
384 spin_lock_irqsave(&pdev->phy_lock, flags);
385 for (prio = pchan->prio; prio >= PXAD_PRIO_HIGHEST; prio--) {
386 for (i = 0; i < pdev->nr_chans; i++) {
387 if (prio != (i & 0xf) >> 2)
388 continue;
389 phy = &pdev->phys[i];
390 if (!phy->vchan) {
391 phy->vchan = pchan;
392 found = phy;
393 goto out_unlock;
394 }
395 }
396 }
397
398 out_unlock:
399 spin_unlock_irqrestore(&pdev->phy_lock, flags);
400 dev_dbg(&pchan->vc.chan.dev->device,
401 "%s(): phy=%p(%d)\n", __func__, found,
402 found ? found->idx : -1);
403
404 return found;
405 }
406
pxad_free_phy(struct pxad_chan * chan)407 static void pxad_free_phy(struct pxad_chan *chan)
408 {
409 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
410 unsigned long flags;
411 u32 reg;
412
413 dev_dbg(&chan->vc.chan.dev->device,
414 "%s(): freeing\n", __func__);
415 if (!chan->phy)
416 return;
417
418 /* clear the channel mapping in DRCMR */
419 if (chan->drcmr <= pdev->nr_requestors) {
420 reg = pxad_drcmr(chan->drcmr);
421 writel_relaxed(0, chan->phy->base + reg);
422 }
423
424 spin_lock_irqsave(&pdev->phy_lock, flags);
425 chan->phy->vchan = NULL;
426 chan->phy = NULL;
427 spin_unlock_irqrestore(&pdev->phy_lock, flags);
428 }
429
is_chan_running(struct pxad_chan * chan)430 static bool is_chan_running(struct pxad_chan *chan)
431 {
432 u32 dcsr;
433 struct pxad_phy *phy = chan->phy;
434
435 if (!phy)
436 return false;
437 dcsr = phy_readl_relaxed(phy, DCSR);
438 return dcsr & PXA_DCSR_RUN;
439 }
440
is_running_chan_misaligned(struct pxad_chan * chan)441 static bool is_running_chan_misaligned(struct pxad_chan *chan)
442 {
443 u32 dalgn;
444
445 BUG_ON(!chan->phy);
446 dalgn = phy_readl_relaxed(chan->phy, DALGN);
447 return dalgn & (BIT(chan->phy->idx));
448 }
449
phy_enable(struct pxad_phy * phy,bool misaligned)450 static void phy_enable(struct pxad_phy *phy, bool misaligned)
451 {
452 struct pxad_device *pdev;
453 u32 reg, dalgn;
454
455 if (!phy->vchan)
456 return;
457
458 dev_dbg(&phy->vchan->vc.chan.dev->device,
459 "%s(); phy=%p(%d) misaligned=%d\n", __func__,
460 phy, phy->idx, misaligned);
461
462 pdev = to_pxad_dev(phy->vchan->vc.chan.device);
463 if (phy->vchan->drcmr <= pdev->nr_requestors) {
464 reg = pxad_drcmr(phy->vchan->drcmr);
465 writel_relaxed(DRCMR_MAPVLD | phy->idx, phy->base + reg);
466 }
467
468 dalgn = phy_readl_relaxed(phy, DALGN);
469 if (misaligned)
470 dalgn |= BIT(phy->idx);
471 else
472 dalgn &= ~BIT(phy->idx);
473 phy_writel_relaxed(phy, dalgn, DALGN);
474
475 phy_writel(phy, PXA_DCSR_STOPIRQEN | PXA_DCSR_ENDINTR |
476 PXA_DCSR_BUSERR | PXA_DCSR_RUN, DCSR);
477 }
478
phy_disable(struct pxad_phy * phy)479 static void phy_disable(struct pxad_phy *phy)
480 {
481 u32 dcsr;
482
483 if (!phy)
484 return;
485
486 dcsr = phy_readl_relaxed(phy, DCSR);
487 dev_dbg(&phy->vchan->vc.chan.dev->device,
488 "%s(): phy=%p(%d)\n", __func__, phy, phy->idx);
489 phy_writel(phy, dcsr & ~PXA_DCSR_RUN & ~PXA_DCSR_STOPIRQEN, DCSR);
490 }
491
pxad_launch_chan(struct pxad_chan * chan,struct pxad_desc_sw * desc)492 static void pxad_launch_chan(struct pxad_chan *chan,
493 struct pxad_desc_sw *desc)
494 {
495 dev_dbg(&chan->vc.chan.dev->device,
496 "%s(): desc=%p\n", __func__, desc);
497 if (!chan->phy) {
498 chan->phy = lookup_phy(chan);
499 if (!chan->phy) {
500 dev_dbg(&chan->vc.chan.dev->device,
501 "%s(): no free dma channel\n", __func__);
502 return;
503 }
504 }
505 chan->bus_error = 0;
506
507 /*
508 * Program the descriptor's address into the DMA controller,
509 * then start the DMA transaction
510 */
511 phy_writel(chan->phy, desc->first, DDADR);
512 phy_enable(chan->phy, chan->misaligned);
513 wake_up(&chan->wq_state);
514 }
515
set_updater_desc(struct pxad_desc_sw * sw_desc,unsigned long flags)516 static void set_updater_desc(struct pxad_desc_sw *sw_desc,
517 unsigned long flags)
518 {
519 struct pxad_desc_hw *updater =
520 sw_desc->hw_desc[sw_desc->nb_desc - 1];
521 dma_addr_t dma = sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr;
522
523 updater->ddadr = DDADR_STOP;
524 updater->dsadr = dma;
525 updater->dtadr = dma + 8;
526 updater->dcmd = PXA_DCMD_WIDTH4 | PXA_DCMD_BURST32 |
527 (PXA_DCMD_LENGTH & sizeof(u32));
528 if (flags & DMA_PREP_INTERRUPT)
529 updater->dcmd |= PXA_DCMD_ENDIRQEN;
530 if (sw_desc->cyclic)
531 sw_desc->hw_desc[sw_desc->nb_desc - 2]->ddadr = sw_desc->first;
532 }
533
is_desc_completed(struct virt_dma_desc * vd)534 static bool is_desc_completed(struct virt_dma_desc *vd)
535 {
536 struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
537 struct pxad_desc_hw *updater =
538 sw_desc->hw_desc[sw_desc->nb_desc - 1];
539
540 return updater->dtadr != (updater->dsadr + 8);
541 }
542
pxad_desc_chain(struct virt_dma_desc * vd1,struct virt_dma_desc * vd2)543 static void pxad_desc_chain(struct virt_dma_desc *vd1,
544 struct virt_dma_desc *vd2)
545 {
546 struct pxad_desc_sw *desc1 = to_pxad_sw_desc(vd1);
547 struct pxad_desc_sw *desc2 = to_pxad_sw_desc(vd2);
548 dma_addr_t dma_to_chain;
549
550 dma_to_chain = desc2->first;
551 desc1->hw_desc[desc1->nb_desc - 1]->ddadr = dma_to_chain;
552 }
553
pxad_try_hotchain(struct virt_dma_chan * vc,struct virt_dma_desc * vd)554 static bool pxad_try_hotchain(struct virt_dma_chan *vc,
555 struct virt_dma_desc *vd)
556 {
557 struct virt_dma_desc *vd_last_issued = NULL;
558 struct pxad_chan *chan = to_pxad_chan(&vc->chan);
559
560 /*
561 * Attempt to hot chain the tx if the phy is still running. This is
562 * considered successful only if either the channel is still running
563 * after the chaining, or if the chained transfer is completed after
564 * having been hot chained.
565 * A change of alignment is not allowed, and forbids hotchaining.
566 */
567 if (is_chan_running(chan)) {
568 BUG_ON(list_empty(&vc->desc_issued));
569
570 if (!is_running_chan_misaligned(chan) &&
571 to_pxad_sw_desc(vd)->misaligned)
572 return false;
573
574 vd_last_issued = list_entry(vc->desc_issued.prev,
575 struct virt_dma_desc, node);
576 pxad_desc_chain(vd_last_issued, vd);
577 if (is_chan_running(chan) || is_desc_completed(vd))
578 return true;
579 }
580
581 return false;
582 }
583
clear_chan_irq(struct pxad_phy * phy)584 static unsigned int clear_chan_irq(struct pxad_phy *phy)
585 {
586 u32 dcsr;
587 u32 dint = readl(phy->base + DINT);
588
589 if (!(dint & BIT(phy->idx)))
590 return PXA_DCSR_RUN;
591
592 /* clear irq */
593 dcsr = phy_readl_relaxed(phy, DCSR);
594 phy_writel(phy, dcsr, DCSR);
595 if ((dcsr & PXA_DCSR_BUSERR) && (phy->vchan))
596 dev_warn(&phy->vchan->vc.chan.dev->device,
597 "%s(chan=%p): PXA_DCSR_BUSERR\n",
598 __func__, &phy->vchan);
599
600 return dcsr & ~PXA_DCSR_RUN;
601 }
602
pxad_chan_handler(int irq,void * dev_id)603 static irqreturn_t pxad_chan_handler(int irq, void *dev_id)
604 {
605 struct pxad_phy *phy = dev_id;
606 struct pxad_chan *chan = phy->vchan;
607 struct virt_dma_desc *vd, *tmp;
608 unsigned int dcsr;
609 unsigned long flags;
610 bool vd_completed;
611 dma_cookie_t last_started = 0;
612
613 BUG_ON(!chan);
614
615 dcsr = clear_chan_irq(phy);
616 if (dcsr & PXA_DCSR_RUN)
617 return IRQ_NONE;
618
619 spin_lock_irqsave(&chan->vc.lock, flags);
620 list_for_each_entry_safe(vd, tmp, &chan->vc.desc_issued, node) {
621 vd_completed = is_desc_completed(vd);
622 dev_dbg(&chan->vc.chan.dev->device,
623 "%s(): checking txd %p[%x]: completed=%d dcsr=0x%x\n",
624 __func__, vd, vd->tx.cookie, vd_completed,
625 dcsr);
626 last_started = vd->tx.cookie;
627 if (to_pxad_sw_desc(vd)->cyclic) {
628 vchan_cyclic_callback(vd);
629 break;
630 }
631 if (vd_completed) {
632 list_del(&vd->node);
633 vchan_cookie_complete(vd);
634 } else {
635 break;
636 }
637 }
638
639 if (dcsr & PXA_DCSR_BUSERR) {
640 chan->bus_error = last_started;
641 phy_disable(phy);
642 }
643
644 if (!chan->bus_error && dcsr & PXA_DCSR_STOPSTATE) {
645 dev_dbg(&chan->vc.chan.dev->device,
646 "%s(): channel stopped, submitted_empty=%d issued_empty=%d",
647 __func__,
648 list_empty(&chan->vc.desc_submitted),
649 list_empty(&chan->vc.desc_issued));
650 phy_writel_relaxed(phy, dcsr & ~PXA_DCSR_STOPIRQEN, DCSR);
651
652 if (list_empty(&chan->vc.desc_issued)) {
653 chan->misaligned =
654 !list_empty(&chan->vc.desc_submitted);
655 } else {
656 vd = list_first_entry(&chan->vc.desc_issued,
657 struct virt_dma_desc, node);
658 pxad_launch_chan(chan, to_pxad_sw_desc(vd));
659 }
660 }
661 spin_unlock_irqrestore(&chan->vc.lock, flags);
662 wake_up(&chan->wq_state);
663
664 return IRQ_HANDLED;
665 }
666
pxad_int_handler(int irq,void * dev_id)667 static irqreturn_t pxad_int_handler(int irq, void *dev_id)
668 {
669 struct pxad_device *pdev = dev_id;
670 struct pxad_phy *phy;
671 u32 dint = readl(pdev->base + DINT);
672 int i, ret = IRQ_NONE;
673
674 while (dint) {
675 i = __ffs(dint);
676 dint &= (dint - 1);
677 phy = &pdev->phys[i];
678 if (pxad_chan_handler(irq, phy) == IRQ_HANDLED)
679 ret = IRQ_HANDLED;
680 }
681
682 return ret;
683 }
684
pxad_alloc_chan_resources(struct dma_chan * dchan)685 static int pxad_alloc_chan_resources(struct dma_chan *dchan)
686 {
687 struct pxad_chan *chan = to_pxad_chan(dchan);
688 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
689
690 if (chan->desc_pool)
691 return 1;
692
693 chan->desc_pool = dma_pool_create(dma_chan_name(dchan),
694 pdev->slave.dev,
695 sizeof(struct pxad_desc_hw),
696 __alignof__(struct pxad_desc_hw),
697 0);
698 if (!chan->desc_pool) {
699 dev_err(&chan->vc.chan.dev->device,
700 "%s(): unable to allocate descriptor pool\n",
701 __func__);
702 return -ENOMEM;
703 }
704
705 return 1;
706 }
707
pxad_free_chan_resources(struct dma_chan * dchan)708 static void pxad_free_chan_resources(struct dma_chan *dchan)
709 {
710 struct pxad_chan *chan = to_pxad_chan(dchan);
711
712 vchan_free_chan_resources(&chan->vc);
713 dma_pool_destroy(chan->desc_pool);
714 chan->desc_pool = NULL;
715
716 chan->drcmr = U32_MAX;
717 chan->prio = PXAD_PRIO_LOWEST;
718 }
719
pxad_free_desc(struct virt_dma_desc * vd)720 static void pxad_free_desc(struct virt_dma_desc *vd)
721 {
722 int i;
723 dma_addr_t dma;
724 struct pxad_desc_sw *sw_desc = to_pxad_sw_desc(vd);
725
726 BUG_ON(sw_desc->nb_desc == 0);
727 for (i = sw_desc->nb_desc - 1; i >= 0; i--) {
728 if (i > 0)
729 dma = sw_desc->hw_desc[i - 1]->ddadr;
730 else
731 dma = sw_desc->first;
732 dma_pool_free(sw_desc->desc_pool,
733 sw_desc->hw_desc[i], dma);
734 }
735 sw_desc->nb_desc = 0;
736 kfree(sw_desc);
737 }
738
739 static struct pxad_desc_sw *
pxad_alloc_desc(struct pxad_chan * chan,unsigned int nb_hw_desc)740 pxad_alloc_desc(struct pxad_chan *chan, unsigned int nb_hw_desc)
741 {
742 struct pxad_desc_sw *sw_desc;
743 dma_addr_t dma;
744 int i;
745
746 sw_desc = kzalloc(sizeof(*sw_desc) +
747 nb_hw_desc * sizeof(struct pxad_desc_hw *),
748 GFP_NOWAIT);
749 if (!sw_desc)
750 return NULL;
751 sw_desc->desc_pool = chan->desc_pool;
752
753 for (i = 0; i < nb_hw_desc; i++) {
754 sw_desc->hw_desc[i] = dma_pool_alloc(sw_desc->desc_pool,
755 GFP_NOWAIT, &dma);
756 if (!sw_desc->hw_desc[i]) {
757 dev_err(&chan->vc.chan.dev->device,
758 "%s(): Couldn't allocate the %dth hw_desc from dma_pool %p\n",
759 __func__, i, sw_desc->desc_pool);
760 goto err;
761 }
762
763 if (i == 0)
764 sw_desc->first = dma;
765 else
766 sw_desc->hw_desc[i - 1]->ddadr = dma;
767 sw_desc->nb_desc++;
768 }
769
770 return sw_desc;
771 err:
772 pxad_free_desc(&sw_desc->vd);
773 return NULL;
774 }
775
pxad_tx_submit(struct dma_async_tx_descriptor * tx)776 static dma_cookie_t pxad_tx_submit(struct dma_async_tx_descriptor *tx)
777 {
778 struct virt_dma_chan *vc = to_virt_chan(tx->chan);
779 struct pxad_chan *chan = to_pxad_chan(&vc->chan);
780 struct virt_dma_desc *vd_chained = NULL,
781 *vd = container_of(tx, struct virt_dma_desc, tx);
782 dma_cookie_t cookie;
783 unsigned long flags;
784
785 set_updater_desc(to_pxad_sw_desc(vd), tx->flags);
786
787 spin_lock_irqsave(&vc->lock, flags);
788 cookie = dma_cookie_assign(tx);
789
790 if (list_empty(&vc->desc_submitted) && pxad_try_hotchain(vc, vd)) {
791 list_move_tail(&vd->node, &vc->desc_issued);
792 dev_dbg(&chan->vc.chan.dev->device,
793 "%s(): txd %p[%x]: submitted (hot linked)\n",
794 __func__, vd, cookie);
795 goto out;
796 }
797
798 /*
799 * Fallback to placing the tx in the submitted queue
800 */
801 if (!list_empty(&vc->desc_submitted)) {
802 vd_chained = list_entry(vc->desc_submitted.prev,
803 struct virt_dma_desc, node);
804 /*
805 * Only chain the descriptors if no new misalignment is
806 * introduced. If a new misalignment is chained, let the channel
807 * stop, and be relaunched in misalign mode from the irq
808 * handler.
809 */
810 if (chan->misaligned || !to_pxad_sw_desc(vd)->misaligned)
811 pxad_desc_chain(vd_chained, vd);
812 else
813 vd_chained = NULL;
814 }
815 dev_dbg(&chan->vc.chan.dev->device,
816 "%s(): txd %p[%x]: submitted (%s linked)\n",
817 __func__, vd, cookie, vd_chained ? "cold" : "not");
818 list_move_tail(&vd->node, &vc->desc_submitted);
819 chan->misaligned |= to_pxad_sw_desc(vd)->misaligned;
820
821 out:
822 spin_unlock_irqrestore(&vc->lock, flags);
823 return cookie;
824 }
825
pxad_issue_pending(struct dma_chan * dchan)826 static void pxad_issue_pending(struct dma_chan *dchan)
827 {
828 struct pxad_chan *chan = to_pxad_chan(dchan);
829 struct virt_dma_desc *vd_first;
830 unsigned long flags;
831
832 spin_lock_irqsave(&chan->vc.lock, flags);
833 if (list_empty(&chan->vc.desc_submitted))
834 goto out;
835
836 vd_first = list_first_entry(&chan->vc.desc_submitted,
837 struct virt_dma_desc, node);
838 dev_dbg(&chan->vc.chan.dev->device,
839 "%s(): txd %p[%x]", __func__, vd_first, vd_first->tx.cookie);
840
841 vchan_issue_pending(&chan->vc);
842 if (!pxad_try_hotchain(&chan->vc, vd_first))
843 pxad_launch_chan(chan, to_pxad_sw_desc(vd_first));
844 out:
845 spin_unlock_irqrestore(&chan->vc.lock, flags);
846 }
847
848 static inline struct dma_async_tx_descriptor *
pxad_tx_prep(struct virt_dma_chan * vc,struct virt_dma_desc * vd,unsigned long tx_flags)849 pxad_tx_prep(struct virt_dma_chan *vc, struct virt_dma_desc *vd,
850 unsigned long tx_flags)
851 {
852 struct dma_async_tx_descriptor *tx;
853 struct pxad_chan *chan = container_of(vc, struct pxad_chan, vc);
854
855 INIT_LIST_HEAD(&vd->node);
856 tx = vchan_tx_prep(vc, vd, tx_flags);
857 tx->tx_submit = pxad_tx_submit;
858 dev_dbg(&chan->vc.chan.dev->device,
859 "%s(): vc=%p txd=%p[%x] flags=0x%lx\n", __func__,
860 vc, vd, vd->tx.cookie,
861 tx_flags);
862
863 return tx;
864 }
865
pxad_get_config(struct pxad_chan * chan,enum dma_transfer_direction dir,u32 * dcmd,u32 * dev_src,u32 * dev_dst)866 static void pxad_get_config(struct pxad_chan *chan,
867 enum dma_transfer_direction dir,
868 u32 *dcmd, u32 *dev_src, u32 *dev_dst)
869 {
870 u32 maxburst = 0, dev_addr = 0;
871 enum dma_slave_buswidth width = DMA_SLAVE_BUSWIDTH_UNDEFINED;
872 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
873
874 *dcmd = 0;
875 if (dir == DMA_DEV_TO_MEM) {
876 maxburst = chan->cfg.src_maxburst;
877 width = chan->cfg.src_addr_width;
878 dev_addr = chan->cfg.src_addr;
879 *dev_src = dev_addr;
880 *dcmd |= PXA_DCMD_INCTRGADDR;
881 if (chan->drcmr <= pdev->nr_requestors)
882 *dcmd |= PXA_DCMD_FLOWSRC;
883 }
884 if (dir == DMA_MEM_TO_DEV) {
885 maxburst = chan->cfg.dst_maxburst;
886 width = chan->cfg.dst_addr_width;
887 dev_addr = chan->cfg.dst_addr;
888 *dev_dst = dev_addr;
889 *dcmd |= PXA_DCMD_INCSRCADDR;
890 if (chan->drcmr <= pdev->nr_requestors)
891 *dcmd |= PXA_DCMD_FLOWTRG;
892 }
893 if (dir == DMA_MEM_TO_MEM)
894 *dcmd |= PXA_DCMD_BURST32 | PXA_DCMD_INCTRGADDR |
895 PXA_DCMD_INCSRCADDR;
896
897 dev_dbg(&chan->vc.chan.dev->device,
898 "%s(): dev_addr=0x%x maxburst=%d width=%d dir=%d\n",
899 __func__, dev_addr, maxburst, width, dir);
900
901 if (width == DMA_SLAVE_BUSWIDTH_1_BYTE)
902 *dcmd |= PXA_DCMD_WIDTH1;
903 else if (width == DMA_SLAVE_BUSWIDTH_2_BYTES)
904 *dcmd |= PXA_DCMD_WIDTH2;
905 else if (width == DMA_SLAVE_BUSWIDTH_4_BYTES)
906 *dcmd |= PXA_DCMD_WIDTH4;
907
908 if (maxburst == 8)
909 *dcmd |= PXA_DCMD_BURST8;
910 else if (maxburst == 16)
911 *dcmd |= PXA_DCMD_BURST16;
912 else if (maxburst == 32)
913 *dcmd |= PXA_DCMD_BURST32;
914 }
915
916 static struct dma_async_tx_descriptor *
pxad_prep_memcpy(struct dma_chan * dchan,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len,unsigned long flags)917 pxad_prep_memcpy(struct dma_chan *dchan,
918 dma_addr_t dma_dst, dma_addr_t dma_src,
919 size_t len, unsigned long flags)
920 {
921 struct pxad_chan *chan = to_pxad_chan(dchan);
922 struct pxad_desc_sw *sw_desc;
923 struct pxad_desc_hw *hw_desc;
924 u32 dcmd;
925 unsigned int i, nb_desc = 0;
926 size_t copy;
927
928 if (!dchan || !len)
929 return NULL;
930
931 dev_dbg(&chan->vc.chan.dev->device,
932 "%s(): dma_dst=0x%lx dma_src=0x%lx len=%zu flags=%lx\n",
933 __func__, (unsigned long)dma_dst, (unsigned long)dma_src,
934 len, flags);
935 pxad_get_config(chan, DMA_MEM_TO_MEM, &dcmd, NULL, NULL);
936
937 nb_desc = DIV_ROUND_UP(len, PDMA_MAX_DESC_BYTES);
938 sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
939 if (!sw_desc)
940 return NULL;
941 sw_desc->len = len;
942
943 if (!IS_ALIGNED(dma_src, 1 << PDMA_ALIGNMENT) ||
944 !IS_ALIGNED(dma_dst, 1 << PDMA_ALIGNMENT))
945 sw_desc->misaligned = true;
946
947 i = 0;
948 do {
949 hw_desc = sw_desc->hw_desc[i++];
950 copy = min_t(size_t, len, PDMA_MAX_DESC_BYTES);
951 hw_desc->dcmd = dcmd | (PXA_DCMD_LENGTH & copy);
952 hw_desc->dsadr = dma_src;
953 hw_desc->dtadr = dma_dst;
954 len -= copy;
955 dma_src += copy;
956 dma_dst += copy;
957 } while (len);
958 set_updater_desc(sw_desc, flags);
959
960 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
961 }
962
963 static struct dma_async_tx_descriptor *
pxad_prep_slave_sg(struct dma_chan * dchan,struct scatterlist * sgl,unsigned int sg_len,enum dma_transfer_direction dir,unsigned long flags,void * context)964 pxad_prep_slave_sg(struct dma_chan *dchan, struct scatterlist *sgl,
965 unsigned int sg_len, enum dma_transfer_direction dir,
966 unsigned long flags, void *context)
967 {
968 struct pxad_chan *chan = to_pxad_chan(dchan);
969 struct pxad_desc_sw *sw_desc;
970 size_t len, avail;
971 struct scatterlist *sg;
972 dma_addr_t dma;
973 u32 dcmd, dsadr = 0, dtadr = 0;
974 unsigned int nb_desc = 0, i, j = 0;
975
976 if ((sgl == NULL) || (sg_len == 0))
977 return NULL;
978
979 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
980 dev_dbg(&chan->vc.chan.dev->device,
981 "%s(): dir=%d flags=%lx\n", __func__, dir, flags);
982
983 for_each_sg(sgl, sg, sg_len, i)
984 nb_desc += DIV_ROUND_UP(sg_dma_len(sg), PDMA_MAX_DESC_BYTES);
985 sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
986 if (!sw_desc)
987 return NULL;
988
989 for_each_sg(sgl, sg, sg_len, i) {
990 dma = sg_dma_address(sg);
991 avail = sg_dma_len(sg);
992 sw_desc->len += avail;
993
994 do {
995 len = min_t(size_t, avail, PDMA_MAX_DESC_BYTES);
996 if (dma & 0x7)
997 sw_desc->misaligned = true;
998
999 sw_desc->hw_desc[j]->dcmd =
1000 dcmd | (PXA_DCMD_LENGTH & len);
1001 sw_desc->hw_desc[j]->dsadr = dsadr ? dsadr : dma;
1002 sw_desc->hw_desc[j++]->dtadr = dtadr ? dtadr : dma;
1003
1004 dma += len;
1005 avail -= len;
1006 } while (avail);
1007 }
1008 set_updater_desc(sw_desc, flags);
1009
1010 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1011 }
1012
1013 static struct dma_async_tx_descriptor *
pxad_prep_dma_cyclic(struct dma_chan * dchan,dma_addr_t buf_addr,size_t len,size_t period_len,enum dma_transfer_direction dir,unsigned long flags)1014 pxad_prep_dma_cyclic(struct dma_chan *dchan,
1015 dma_addr_t buf_addr, size_t len, size_t period_len,
1016 enum dma_transfer_direction dir, unsigned long flags)
1017 {
1018 struct pxad_chan *chan = to_pxad_chan(dchan);
1019 struct pxad_desc_sw *sw_desc;
1020 struct pxad_desc_hw **phw_desc;
1021 dma_addr_t dma;
1022 u32 dcmd, dsadr = 0, dtadr = 0;
1023 unsigned int nb_desc = 0;
1024
1025 if (!dchan || !len || !period_len)
1026 return NULL;
1027 if ((dir != DMA_DEV_TO_MEM) && (dir != DMA_MEM_TO_DEV)) {
1028 dev_err(&chan->vc.chan.dev->device,
1029 "Unsupported direction for cyclic DMA\n");
1030 return NULL;
1031 }
1032 /* the buffer length must be a multiple of period_len */
1033 if (len % period_len != 0 || period_len > PDMA_MAX_DESC_BYTES ||
1034 !IS_ALIGNED(period_len, 1 << PDMA_ALIGNMENT))
1035 return NULL;
1036
1037 pxad_get_config(chan, dir, &dcmd, &dsadr, &dtadr);
1038 dcmd |= PXA_DCMD_ENDIRQEN | (PXA_DCMD_LENGTH & period_len);
1039 dev_dbg(&chan->vc.chan.dev->device,
1040 "%s(): buf_addr=0x%lx len=%zu period=%zu dir=%d flags=%lx\n",
1041 __func__, (unsigned long)buf_addr, len, period_len, dir, flags);
1042
1043 nb_desc = DIV_ROUND_UP(period_len, PDMA_MAX_DESC_BYTES);
1044 nb_desc *= DIV_ROUND_UP(len, period_len);
1045 sw_desc = pxad_alloc_desc(chan, nb_desc + 1);
1046 if (!sw_desc)
1047 return NULL;
1048 sw_desc->cyclic = true;
1049 sw_desc->len = len;
1050
1051 phw_desc = sw_desc->hw_desc;
1052 dma = buf_addr;
1053 do {
1054 phw_desc[0]->dsadr = dsadr ? dsadr : dma;
1055 phw_desc[0]->dtadr = dtadr ? dtadr : dma;
1056 phw_desc[0]->dcmd = dcmd;
1057 phw_desc++;
1058 dma += period_len;
1059 len -= period_len;
1060 } while (len);
1061 set_updater_desc(sw_desc, flags);
1062
1063 return pxad_tx_prep(&chan->vc, &sw_desc->vd, flags);
1064 }
1065
pxad_config(struct dma_chan * dchan,struct dma_slave_config * cfg)1066 static int pxad_config(struct dma_chan *dchan,
1067 struct dma_slave_config *cfg)
1068 {
1069 struct pxad_chan *chan = to_pxad_chan(dchan);
1070
1071 if (!dchan)
1072 return -EINVAL;
1073
1074 chan->cfg = *cfg;
1075 return 0;
1076 }
1077
pxad_terminate_all(struct dma_chan * dchan)1078 static int pxad_terminate_all(struct dma_chan *dchan)
1079 {
1080 struct pxad_chan *chan = to_pxad_chan(dchan);
1081 struct pxad_device *pdev = to_pxad_dev(chan->vc.chan.device);
1082 struct virt_dma_desc *vd = NULL;
1083 unsigned long flags;
1084 struct pxad_phy *phy;
1085 LIST_HEAD(head);
1086
1087 dev_dbg(&chan->vc.chan.dev->device,
1088 "%s(): vchan %p: terminate all\n", __func__, &chan->vc);
1089
1090 spin_lock_irqsave(&chan->vc.lock, flags);
1091 vchan_get_all_descriptors(&chan->vc, &head);
1092
1093 list_for_each_entry(vd, &head, node) {
1094 dev_dbg(&chan->vc.chan.dev->device,
1095 "%s(): cancelling txd %p[%x] (completed=%d)", __func__,
1096 vd, vd->tx.cookie, is_desc_completed(vd));
1097 }
1098
1099 phy = chan->phy;
1100 if (phy) {
1101 phy_disable(chan->phy);
1102 pxad_free_phy(chan);
1103 chan->phy = NULL;
1104 spin_lock(&pdev->phy_lock);
1105 phy->vchan = NULL;
1106 spin_unlock(&pdev->phy_lock);
1107 }
1108 spin_unlock_irqrestore(&chan->vc.lock, flags);
1109 vchan_dma_desc_free_list(&chan->vc, &head);
1110
1111 return 0;
1112 }
1113
pxad_residue(struct pxad_chan * chan,dma_cookie_t cookie)1114 static unsigned int pxad_residue(struct pxad_chan *chan,
1115 dma_cookie_t cookie)
1116 {
1117 struct virt_dma_desc *vd = NULL;
1118 struct pxad_desc_sw *sw_desc = NULL;
1119 struct pxad_desc_hw *hw_desc = NULL;
1120 u32 curr, start, len, end, residue = 0;
1121 unsigned long flags;
1122 bool passed = false;
1123 int i;
1124
1125 /*
1126 * If the channel does not have a phy pointer anymore, it has already
1127 * been completed. Therefore, its residue is 0.
1128 */
1129 if (!chan->phy)
1130 return 0;
1131
1132 spin_lock_irqsave(&chan->vc.lock, flags);
1133
1134 vd = vchan_find_desc(&chan->vc, cookie);
1135 if (!vd)
1136 goto out;
1137
1138 sw_desc = to_pxad_sw_desc(vd);
1139 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
1140 curr = phy_readl_relaxed(chan->phy, DSADR);
1141 else
1142 curr = phy_readl_relaxed(chan->phy, DTADR);
1143
1144 /*
1145 * curr has to be actually read before checking descriptor
1146 * completion, so that a curr inside a status updater
1147 * descriptor implies the following test returns true, and
1148 * preventing reordering of curr load and the test.
1149 */
1150 rmb();
1151 if (is_desc_completed(vd))
1152 goto out;
1153
1154 for (i = 0; i < sw_desc->nb_desc - 1; i++) {
1155 hw_desc = sw_desc->hw_desc[i];
1156 if (sw_desc->hw_desc[0]->dcmd & PXA_DCMD_INCSRCADDR)
1157 start = hw_desc->dsadr;
1158 else
1159 start = hw_desc->dtadr;
1160 len = hw_desc->dcmd & PXA_DCMD_LENGTH;
1161 end = start + len;
1162
1163 /*
1164 * 'passed' will be latched once we found the descriptor
1165 * which lies inside the boundaries of the curr
1166 * pointer. All descriptors that occur in the list
1167 * _after_ we found that partially handled descriptor
1168 * are still to be processed and are hence added to the
1169 * residual bytes counter.
1170 */
1171
1172 if (passed) {
1173 residue += len;
1174 } else if (curr >= start && curr <= end) {
1175 residue += end - curr;
1176 passed = true;
1177 }
1178 }
1179 if (!passed)
1180 residue = sw_desc->len;
1181
1182 out:
1183 spin_unlock_irqrestore(&chan->vc.lock, flags);
1184 dev_dbg(&chan->vc.chan.dev->device,
1185 "%s(): txd %p[%x] sw_desc=%p: %d\n",
1186 __func__, vd, cookie, sw_desc, residue);
1187 return residue;
1188 }
1189
pxad_tx_status(struct dma_chan * dchan,dma_cookie_t cookie,struct dma_tx_state * txstate)1190 static enum dma_status pxad_tx_status(struct dma_chan *dchan,
1191 dma_cookie_t cookie,
1192 struct dma_tx_state *txstate)
1193 {
1194 struct pxad_chan *chan = to_pxad_chan(dchan);
1195 enum dma_status ret;
1196
1197 if (cookie == chan->bus_error)
1198 return DMA_ERROR;
1199
1200 ret = dma_cookie_status(dchan, cookie, txstate);
1201 if (likely(txstate && (ret != DMA_ERROR)))
1202 dma_set_residue(txstate, pxad_residue(chan, cookie));
1203
1204 return ret;
1205 }
1206
pxad_synchronize(struct dma_chan * dchan)1207 static void pxad_synchronize(struct dma_chan *dchan)
1208 {
1209 struct pxad_chan *chan = to_pxad_chan(dchan);
1210
1211 wait_event(chan->wq_state, !is_chan_running(chan));
1212 vchan_synchronize(&chan->vc);
1213 }
1214
pxad_free_channels(struct dma_device * dmadev)1215 static void pxad_free_channels(struct dma_device *dmadev)
1216 {
1217 struct pxad_chan *c, *cn;
1218
1219 list_for_each_entry_safe(c, cn, &dmadev->channels,
1220 vc.chan.device_node) {
1221 list_del(&c->vc.chan.device_node);
1222 tasklet_kill(&c->vc.task);
1223 }
1224 }
1225
pxad_remove(struct platform_device * op)1226 static int pxad_remove(struct platform_device *op)
1227 {
1228 struct pxad_device *pdev = platform_get_drvdata(op);
1229
1230 pxad_cleanup_debugfs(pdev);
1231 pxad_free_channels(&pdev->slave);
1232 return 0;
1233 }
1234
pxad_init_phys(struct platform_device * op,struct pxad_device * pdev,unsigned int nb_phy_chans)1235 static int pxad_init_phys(struct platform_device *op,
1236 struct pxad_device *pdev,
1237 unsigned int nb_phy_chans)
1238 {
1239 int irq0, irq, nr_irq = 0, i, ret;
1240 struct pxad_phy *phy;
1241
1242 irq0 = platform_get_irq(op, 0);
1243 if (irq0 < 0)
1244 return irq0;
1245
1246 pdev->phys = devm_kcalloc(&op->dev, nb_phy_chans,
1247 sizeof(pdev->phys[0]), GFP_KERNEL);
1248 if (!pdev->phys)
1249 return -ENOMEM;
1250
1251 for (i = 0; i < nb_phy_chans; i++)
1252 if (platform_get_irq_optional(op, i) > 0)
1253 nr_irq++;
1254
1255 for (i = 0; i < nb_phy_chans; i++) {
1256 phy = &pdev->phys[i];
1257 phy->base = pdev->base;
1258 phy->idx = i;
1259 irq = platform_get_irq_optional(op, i);
1260 if ((nr_irq > 1) && (irq > 0))
1261 ret = devm_request_irq(&op->dev, irq,
1262 pxad_chan_handler,
1263 IRQF_SHARED, "pxa-dma", phy);
1264 if ((nr_irq == 1) && (i == 0))
1265 ret = devm_request_irq(&op->dev, irq0,
1266 pxad_int_handler,
1267 IRQF_SHARED, "pxa-dma", pdev);
1268 if (ret) {
1269 dev_err(pdev->slave.dev,
1270 "%s(): can't request irq %d:%d\n", __func__,
1271 irq, ret);
1272 return ret;
1273 }
1274 }
1275
1276 return 0;
1277 }
1278
1279 static const struct of_device_id pxad_dt_ids[] = {
1280 { .compatible = "marvell,pdma-1.0", },
1281 {}
1282 };
1283 MODULE_DEVICE_TABLE(of, pxad_dt_ids);
1284
pxad_dma_xlate(struct of_phandle_args * dma_spec,struct of_dma * ofdma)1285 static struct dma_chan *pxad_dma_xlate(struct of_phandle_args *dma_spec,
1286 struct of_dma *ofdma)
1287 {
1288 struct pxad_device *d = ofdma->of_dma_data;
1289 struct dma_chan *chan;
1290
1291 chan = dma_get_any_slave_channel(&d->slave);
1292 if (!chan)
1293 return NULL;
1294
1295 to_pxad_chan(chan)->drcmr = dma_spec->args[0];
1296 to_pxad_chan(chan)->prio = dma_spec->args[1];
1297
1298 return chan;
1299 }
1300
pxad_init_dmadev(struct platform_device * op,struct pxad_device * pdev,unsigned int nr_phy_chans,unsigned int nr_requestors)1301 static int pxad_init_dmadev(struct platform_device *op,
1302 struct pxad_device *pdev,
1303 unsigned int nr_phy_chans,
1304 unsigned int nr_requestors)
1305 {
1306 int ret;
1307 unsigned int i;
1308 struct pxad_chan *c;
1309
1310 pdev->nr_chans = nr_phy_chans;
1311 pdev->nr_requestors = nr_requestors;
1312 INIT_LIST_HEAD(&pdev->slave.channels);
1313 pdev->slave.device_alloc_chan_resources = pxad_alloc_chan_resources;
1314 pdev->slave.device_free_chan_resources = pxad_free_chan_resources;
1315 pdev->slave.device_tx_status = pxad_tx_status;
1316 pdev->slave.device_issue_pending = pxad_issue_pending;
1317 pdev->slave.device_config = pxad_config;
1318 pdev->slave.device_synchronize = pxad_synchronize;
1319 pdev->slave.device_terminate_all = pxad_terminate_all;
1320
1321 if (op->dev.coherent_dma_mask)
1322 dma_set_mask(&op->dev, op->dev.coherent_dma_mask);
1323 else
1324 dma_set_mask(&op->dev, DMA_BIT_MASK(32));
1325
1326 ret = pxad_init_phys(op, pdev, nr_phy_chans);
1327 if (ret)
1328 return ret;
1329
1330 for (i = 0; i < nr_phy_chans; i++) {
1331 c = devm_kzalloc(&op->dev, sizeof(*c), GFP_KERNEL);
1332 if (!c)
1333 return -ENOMEM;
1334
1335 c->drcmr = U32_MAX;
1336 c->prio = PXAD_PRIO_LOWEST;
1337 c->vc.desc_free = pxad_free_desc;
1338 vchan_init(&c->vc, &pdev->slave);
1339 init_waitqueue_head(&c->wq_state);
1340 }
1341
1342 return dmaenginem_async_device_register(&pdev->slave);
1343 }
1344
pxad_probe(struct platform_device * op)1345 static int pxad_probe(struct platform_device *op)
1346 {
1347 struct pxad_device *pdev;
1348 const struct of_device_id *of_id;
1349 const struct dma_slave_map *slave_map = NULL;
1350 struct mmp_dma_platdata *pdata = dev_get_platdata(&op->dev);
1351 struct resource *iores;
1352 int ret, dma_channels = 0, nb_requestors = 0, slave_map_cnt = 0;
1353 const enum dma_slave_buswidth widths =
1354 DMA_SLAVE_BUSWIDTH_1_BYTE | DMA_SLAVE_BUSWIDTH_2_BYTES |
1355 DMA_SLAVE_BUSWIDTH_4_BYTES;
1356
1357 pdev = devm_kzalloc(&op->dev, sizeof(*pdev), GFP_KERNEL);
1358 if (!pdev)
1359 return -ENOMEM;
1360
1361 spin_lock_init(&pdev->phy_lock);
1362
1363 iores = platform_get_resource(op, IORESOURCE_MEM, 0);
1364 pdev->base = devm_ioremap_resource(&op->dev, iores);
1365 if (IS_ERR(pdev->base))
1366 return PTR_ERR(pdev->base);
1367
1368 of_id = of_match_device(pxad_dt_ids, &op->dev);
1369 if (of_id) {
1370 of_property_read_u32(op->dev.of_node, "#dma-channels",
1371 &dma_channels);
1372 ret = of_property_read_u32(op->dev.of_node, "#dma-requests",
1373 &nb_requestors);
1374 if (ret) {
1375 dev_warn(pdev->slave.dev,
1376 "#dma-requests set to default 32 as missing in OF: %d",
1377 ret);
1378 nb_requestors = 32;
1379 }
1380 } else if (pdata && pdata->dma_channels) {
1381 dma_channels = pdata->dma_channels;
1382 nb_requestors = pdata->nb_requestors;
1383 slave_map = pdata->slave_map;
1384 slave_map_cnt = pdata->slave_map_cnt;
1385 } else {
1386 dma_channels = 32; /* default 32 channel */
1387 }
1388
1389 dma_cap_set(DMA_SLAVE, pdev->slave.cap_mask);
1390 dma_cap_set(DMA_MEMCPY, pdev->slave.cap_mask);
1391 dma_cap_set(DMA_CYCLIC, pdev->slave.cap_mask);
1392 dma_cap_set(DMA_PRIVATE, pdev->slave.cap_mask);
1393 pdev->slave.device_prep_dma_memcpy = pxad_prep_memcpy;
1394 pdev->slave.device_prep_slave_sg = pxad_prep_slave_sg;
1395 pdev->slave.device_prep_dma_cyclic = pxad_prep_dma_cyclic;
1396 pdev->slave.filter.map = slave_map;
1397 pdev->slave.filter.mapcnt = slave_map_cnt;
1398 pdev->slave.filter.fn = pxad_filter_fn;
1399
1400 pdev->slave.copy_align = PDMA_ALIGNMENT;
1401 pdev->slave.src_addr_widths = widths;
1402 pdev->slave.dst_addr_widths = widths;
1403 pdev->slave.directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM);
1404 pdev->slave.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
1405 pdev->slave.descriptor_reuse = true;
1406
1407 pdev->slave.dev = &op->dev;
1408 ret = pxad_init_dmadev(op, pdev, dma_channels, nb_requestors);
1409 if (ret) {
1410 dev_err(pdev->slave.dev, "unable to register\n");
1411 return ret;
1412 }
1413
1414 if (op->dev.of_node) {
1415 /* Device-tree DMA controller registration */
1416 ret = of_dma_controller_register(op->dev.of_node,
1417 pxad_dma_xlate, pdev);
1418 if (ret < 0) {
1419 dev_err(pdev->slave.dev,
1420 "of_dma_controller_register failed\n");
1421 return ret;
1422 }
1423 }
1424
1425 platform_set_drvdata(op, pdev);
1426 pxad_init_debugfs(pdev);
1427 dev_info(pdev->slave.dev, "initialized %d channels on %d requestors\n",
1428 dma_channels, nb_requestors);
1429 return 0;
1430 }
1431
1432 static const struct platform_device_id pxad_id_table[] = {
1433 { "pxa-dma", },
1434 { },
1435 };
1436
1437 static struct platform_driver pxad_driver = {
1438 .driver = {
1439 .name = "pxa-dma",
1440 .of_match_table = pxad_dt_ids,
1441 },
1442 .id_table = pxad_id_table,
1443 .probe = pxad_probe,
1444 .remove = pxad_remove,
1445 };
1446
pxad_filter_fn(struct dma_chan * chan,void * param)1447 static bool pxad_filter_fn(struct dma_chan *chan, void *param)
1448 {
1449 struct pxad_chan *c = to_pxad_chan(chan);
1450 struct pxad_param *p = param;
1451
1452 if (chan->device->dev->driver != &pxad_driver.driver)
1453 return false;
1454
1455 c->drcmr = p->drcmr;
1456 c->prio = p->prio;
1457
1458 return true;
1459 }
1460
1461 module_platform_driver(pxad_driver);
1462
1463 MODULE_DESCRIPTION("Marvell PXA Peripheral DMA Driver");
1464 MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
1465 MODULE_LICENSE("GPL v2");
1466