• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Texas Instruments CPDMA Driver
4  *
5  * Copyright (C) 2010 Texas Instruments
6  *
7  */
8 #include <linux/kernel.h>
9 #include <linux/spinlock.h>
10 #include <linux/device.h>
11 #include <linux/module.h>
12 #include <linux/slab.h>
13 #include <linux/err.h>
14 #include <linux/dma-mapping.h>
15 #include <linux/io.h>
16 #include <linux/delay.h>
17 #include <linux/genalloc.h>
18 #include "davinci_cpdma.h"
19 
20 /* DMA Registers */
21 #define CPDMA_TXIDVER		0x00
22 #define CPDMA_TXCONTROL		0x04
23 #define CPDMA_TXTEARDOWN	0x08
24 #define CPDMA_RXIDVER		0x10
25 #define CPDMA_RXCONTROL		0x14
26 #define CPDMA_SOFTRESET		0x1c
27 #define CPDMA_RXTEARDOWN	0x18
28 #define CPDMA_TX_PRI0_RATE	0x30
29 #define CPDMA_TXINTSTATRAW	0x80
30 #define CPDMA_TXINTSTATMASKED	0x84
31 #define CPDMA_TXINTMASKSET	0x88
32 #define CPDMA_TXINTMASKCLEAR	0x8c
33 #define CPDMA_MACINVECTOR	0x90
34 #define CPDMA_MACEOIVECTOR	0x94
35 #define CPDMA_RXINTSTATRAW	0xa0
36 #define CPDMA_RXINTSTATMASKED	0xa4
37 #define CPDMA_RXINTMASKSET	0xa8
38 #define CPDMA_RXINTMASKCLEAR	0xac
39 #define CPDMA_DMAINTSTATRAW	0xb0
40 #define CPDMA_DMAINTSTATMASKED	0xb4
41 #define CPDMA_DMAINTMASKSET	0xb8
42 #define CPDMA_DMAINTMASKCLEAR	0xbc
43 #define CPDMA_DMAINT_HOSTERR	BIT(1)
44 
45 /* the following exist only if has_ext_regs is set */
46 #define CPDMA_DMACONTROL	0x20
47 #define CPDMA_DMASTATUS		0x24
48 #define CPDMA_RXBUFFOFS		0x28
49 #define CPDMA_EM_CONTROL	0x2c
50 
51 /* Descriptor mode bits */
52 #define CPDMA_DESC_SOP		BIT(31)
53 #define CPDMA_DESC_EOP		BIT(30)
54 #define CPDMA_DESC_OWNER	BIT(29)
55 #define CPDMA_DESC_EOQ		BIT(28)
56 #define CPDMA_DESC_TD_COMPLETE	BIT(27)
57 #define CPDMA_DESC_PASS_CRC	BIT(26)
58 #define CPDMA_DESC_TO_PORT_EN	BIT(20)
59 #define CPDMA_TO_PORT_SHIFT	16
60 #define CPDMA_DESC_PORT_MASK	(BIT(18) | BIT(17) | BIT(16))
61 #define CPDMA_DESC_CRC_LEN	4
62 
63 #define CPDMA_TEARDOWN_VALUE	0xfffffffc
64 
65 #define CPDMA_MAX_RLIM_CNT	16384
66 
67 struct cpdma_desc {
68 	/* hardware fields */
69 	u32			hw_next;
70 	u32			hw_buffer;
71 	u32			hw_len;
72 	u32			hw_mode;
73 	/* software fields */
74 	void			*sw_token;
75 	u32			sw_buffer;
76 	u32			sw_len;
77 };
78 
79 struct cpdma_desc_pool {
80 	phys_addr_t		phys;
81 	dma_addr_t		hw_addr;
82 	void __iomem		*iomap;		/* ioremap map */
83 	void			*cpumap;	/* dma_alloc map */
84 	int			desc_size, mem_size;
85 	int			num_desc;
86 	struct device		*dev;
87 	struct gen_pool		*gen_pool;
88 };
89 
90 enum cpdma_state {
91 	CPDMA_STATE_IDLE,
92 	CPDMA_STATE_ACTIVE,
93 	CPDMA_STATE_TEARDOWN,
94 };
95 
96 struct cpdma_ctlr {
97 	enum cpdma_state	state;
98 	struct cpdma_params	params;
99 	struct device		*dev;
100 	struct cpdma_desc_pool	*pool;
101 	spinlock_t		lock;
102 	struct cpdma_chan	*channels[2 * CPDMA_MAX_CHANNELS];
103 	int chan_num;
104 	int			num_rx_desc; /* RX descriptors number */
105 	int			num_tx_desc; /* TX descriptors number */
106 };
107 
108 struct cpdma_chan {
109 	struct cpdma_desc __iomem	*head, *tail;
110 	void __iomem			*hdp, *cp, *rxfree;
111 	enum cpdma_state		state;
112 	struct cpdma_ctlr		*ctlr;
113 	int				chan_num;
114 	spinlock_t			lock;
115 	int				count;
116 	u32				desc_num;
117 	u32				mask;
118 	cpdma_handler_fn		handler;
119 	enum dma_data_direction		dir;
120 	struct cpdma_chan_stats		stats;
121 	/* offsets into dmaregs */
122 	int	int_set, int_clear, td;
123 	int				weight;
124 	u32				rate_factor;
125 	u32				rate;
126 };
127 
128 struct cpdma_control_info {
129 	u32		reg;
130 	u32		shift, mask;
131 	int		access;
132 #define ACCESS_RO	BIT(0)
133 #define ACCESS_WO	BIT(1)
134 #define ACCESS_RW	(ACCESS_RO | ACCESS_WO)
135 };
136 
137 struct submit_info {
138 	struct cpdma_chan *chan;
139 	int directed;
140 	void *token;
141 	void *data_virt;
142 	dma_addr_t data_dma;
143 	int len;
144 };
145 
146 static struct cpdma_control_info controls[] = {
147 	[CPDMA_TX_RLIM]		  = {CPDMA_DMACONTROL,	8,  0xffff, ACCESS_RW},
148 	[CPDMA_CMD_IDLE]	  = {CPDMA_DMACONTROL,	3,  1,      ACCESS_WO},
149 	[CPDMA_COPY_ERROR_FRAMES] = {CPDMA_DMACONTROL,	4,  1,      ACCESS_RW},
150 	[CPDMA_RX_OFF_LEN_UPDATE] = {CPDMA_DMACONTROL,	2,  1,      ACCESS_RW},
151 	[CPDMA_RX_OWNERSHIP_FLIP] = {CPDMA_DMACONTROL,	1,  1,      ACCESS_RW},
152 	[CPDMA_TX_PRIO_FIXED]	  = {CPDMA_DMACONTROL,	0,  1,      ACCESS_RW},
153 	[CPDMA_STAT_IDLE]	  = {CPDMA_DMASTATUS,	31, 1,      ACCESS_RO},
154 	[CPDMA_STAT_TX_ERR_CODE]  = {CPDMA_DMASTATUS,	20, 0xf,    ACCESS_RW},
155 	[CPDMA_STAT_TX_ERR_CHAN]  = {CPDMA_DMASTATUS,	16, 0x7,    ACCESS_RW},
156 	[CPDMA_STAT_RX_ERR_CODE]  = {CPDMA_DMASTATUS,	12, 0xf,    ACCESS_RW},
157 	[CPDMA_STAT_RX_ERR_CHAN]  = {CPDMA_DMASTATUS,	8,  0x7,    ACCESS_RW},
158 	[CPDMA_RX_BUFFER_OFFSET]  = {CPDMA_RXBUFFOFS,	0,  0xffff, ACCESS_RW},
159 };
160 
161 #define tx_chan_num(chan)	(chan)
162 #define rx_chan_num(chan)	((chan) + CPDMA_MAX_CHANNELS)
163 #define is_rx_chan(chan)	((chan)->chan_num >= CPDMA_MAX_CHANNELS)
164 #define is_tx_chan(chan)	(!is_rx_chan(chan))
165 #define __chan_linear(chan_num)	((chan_num) & (CPDMA_MAX_CHANNELS - 1))
166 #define chan_linear(chan)	__chan_linear((chan)->chan_num)
167 
168 /* The following make access to common cpdma_ctlr params more readable */
169 #define dmaregs		params.dmaregs
170 #define num_chan	params.num_chan
171 
172 /* various accessors */
173 #define dma_reg_read(ctlr, ofs)		readl((ctlr)->dmaregs + (ofs))
174 #define chan_read(chan, fld)		readl((chan)->fld)
175 #define desc_read(desc, fld)		readl(&(desc)->fld)
176 #define dma_reg_write(ctlr, ofs, v)	writel(v, (ctlr)->dmaregs + (ofs))
177 #define chan_write(chan, fld, v)	writel(v, (chan)->fld)
178 #define desc_write(desc, fld, v)	writel((u32)(v), &(desc)->fld)
179 
180 #define cpdma_desc_to_port(chan, mode, directed)			\
181 	do {								\
182 		if (!is_rx_chan(chan) && ((directed == 1) ||		\
183 					  (directed == 2)))		\
184 			mode |= (CPDMA_DESC_TO_PORT_EN |		\
185 				 (directed << CPDMA_TO_PORT_SHIFT));	\
186 	} while (0)
187 
188 #define CPDMA_DMA_EXT_MAP		BIT(16)
189 
cpdma_desc_pool_destroy(struct cpdma_ctlr * ctlr)190 static void cpdma_desc_pool_destroy(struct cpdma_ctlr *ctlr)
191 {
192 	struct cpdma_desc_pool *pool = ctlr->pool;
193 
194 	if (!pool)
195 		return;
196 
197 	WARN(gen_pool_size(pool->gen_pool) != gen_pool_avail(pool->gen_pool),
198 	     "cpdma_desc_pool size %zd != avail %zd",
199 	     gen_pool_size(pool->gen_pool),
200 	     gen_pool_avail(pool->gen_pool));
201 	if (pool->cpumap)
202 		dma_free_coherent(ctlr->dev, pool->mem_size, pool->cpumap,
203 				  pool->phys);
204 }
205 
206 /*
207  * Utility constructs for a cpdma descriptor pool.  Some devices (e.g. davinci
208  * emac) have dedicated on-chip memory for these descriptors.  Some other
209  * devices (e.g. cpsw switches) use plain old memory.  Descriptor pools
210  * abstract out these details
211  */
cpdma_desc_pool_create(struct cpdma_ctlr * ctlr)212 static int cpdma_desc_pool_create(struct cpdma_ctlr *ctlr)
213 {
214 	struct cpdma_params *cpdma_params = &ctlr->params;
215 	struct cpdma_desc_pool *pool;
216 	int ret = -ENOMEM;
217 
218 	pool = devm_kzalloc(ctlr->dev, sizeof(*pool), GFP_KERNEL);
219 	if (!pool)
220 		goto gen_pool_create_fail;
221 	ctlr->pool = pool;
222 
223 	pool->mem_size	= cpdma_params->desc_mem_size;
224 	pool->desc_size	= ALIGN(sizeof(struct cpdma_desc),
225 				cpdma_params->desc_align);
226 	pool->num_desc	= pool->mem_size / pool->desc_size;
227 
228 	if (cpdma_params->descs_pool_size) {
229 		/* recalculate memory size required cpdma descriptor pool
230 		 * basing on number of descriptors specified by user and
231 		 * if memory size > CPPI internal RAM size (desc_mem_size)
232 		 * then switch to use DDR
233 		 */
234 		pool->num_desc = cpdma_params->descs_pool_size;
235 		pool->mem_size = pool->desc_size * pool->num_desc;
236 		if (pool->mem_size > cpdma_params->desc_mem_size)
237 			cpdma_params->desc_mem_phys = 0;
238 	}
239 
240 	pool->gen_pool = devm_gen_pool_create(ctlr->dev, ilog2(pool->desc_size),
241 					      -1, "cpdma");
242 	if (IS_ERR(pool->gen_pool)) {
243 		ret = PTR_ERR(pool->gen_pool);
244 		dev_err(ctlr->dev, "pool create failed %d\n", ret);
245 		goto gen_pool_create_fail;
246 	}
247 
248 	if (cpdma_params->desc_mem_phys) {
249 		pool->phys  = cpdma_params->desc_mem_phys;
250 		pool->iomap = devm_ioremap(ctlr->dev, pool->phys,
251 					   pool->mem_size);
252 		pool->hw_addr = cpdma_params->desc_hw_addr;
253 	} else {
254 		pool->cpumap = dma_alloc_coherent(ctlr->dev,  pool->mem_size,
255 						  &pool->hw_addr, GFP_KERNEL);
256 		pool->iomap = (void __iomem __force *)pool->cpumap;
257 		pool->phys = pool->hw_addr; /* assumes no IOMMU, don't use this value */
258 	}
259 
260 	if (!pool->iomap)
261 		goto gen_pool_create_fail;
262 
263 	ret = gen_pool_add_virt(pool->gen_pool, (unsigned long)pool->iomap,
264 				pool->phys, pool->mem_size, -1);
265 	if (ret < 0) {
266 		dev_err(ctlr->dev, "pool add failed %d\n", ret);
267 		goto gen_pool_add_virt_fail;
268 	}
269 
270 	return 0;
271 
272 gen_pool_add_virt_fail:
273 	cpdma_desc_pool_destroy(ctlr);
274 gen_pool_create_fail:
275 	ctlr->pool = NULL;
276 	return ret;
277 }
278 
desc_phys(struct cpdma_desc_pool * pool,struct cpdma_desc __iomem * desc)279 static inline dma_addr_t desc_phys(struct cpdma_desc_pool *pool,
280 		  struct cpdma_desc __iomem *desc)
281 {
282 	if (!desc)
283 		return 0;
284 	return pool->hw_addr + (__force long)desc - (__force long)pool->iomap;
285 }
286 
287 static inline struct cpdma_desc __iomem *
desc_from_phys(struct cpdma_desc_pool * pool,dma_addr_t dma)288 desc_from_phys(struct cpdma_desc_pool *pool, dma_addr_t dma)
289 {
290 	return dma ? pool->iomap + dma - pool->hw_addr : NULL;
291 }
292 
293 static struct cpdma_desc __iomem *
cpdma_desc_alloc(struct cpdma_desc_pool * pool)294 cpdma_desc_alloc(struct cpdma_desc_pool *pool)
295 {
296 	return (struct cpdma_desc __iomem *)
297 		gen_pool_alloc(pool->gen_pool, pool->desc_size);
298 }
299 
cpdma_desc_free(struct cpdma_desc_pool * pool,struct cpdma_desc __iomem * desc,int num_desc)300 static void cpdma_desc_free(struct cpdma_desc_pool *pool,
301 			    struct cpdma_desc __iomem *desc, int num_desc)
302 {
303 	gen_pool_free(pool->gen_pool, (unsigned long)desc, pool->desc_size);
304 }
305 
_cpdma_control_set(struct cpdma_ctlr * ctlr,int control,int value)306 static int _cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
307 {
308 	struct cpdma_control_info *info = &controls[control];
309 	u32 val;
310 
311 	if (!ctlr->params.has_ext_regs)
312 		return -ENOTSUPP;
313 
314 	if (ctlr->state != CPDMA_STATE_ACTIVE)
315 		return -EINVAL;
316 
317 	if (control < 0 || control >= ARRAY_SIZE(controls))
318 		return -ENOENT;
319 
320 	if ((info->access & ACCESS_WO) != ACCESS_WO)
321 		return -EPERM;
322 
323 	val  = dma_reg_read(ctlr, info->reg);
324 	val &= ~(info->mask << info->shift);
325 	val |= (value & info->mask) << info->shift;
326 	dma_reg_write(ctlr, info->reg, val);
327 
328 	return 0;
329 }
330 
_cpdma_control_get(struct cpdma_ctlr * ctlr,int control)331 static int _cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
332 {
333 	struct cpdma_control_info *info = &controls[control];
334 	int ret;
335 
336 	if (!ctlr->params.has_ext_regs)
337 		return -ENOTSUPP;
338 
339 	if (ctlr->state != CPDMA_STATE_ACTIVE)
340 		return -EINVAL;
341 
342 	if (control < 0 || control >= ARRAY_SIZE(controls))
343 		return -ENOENT;
344 
345 	if ((info->access & ACCESS_RO) != ACCESS_RO)
346 		return -EPERM;
347 
348 	ret = (dma_reg_read(ctlr, info->reg) >> info->shift) & info->mask;
349 	return ret;
350 }
351 
352 /* cpdma_chan_set_chan_shaper - set shaper for a channel
353  * Has to be called under ctlr lock
354  */
cpdma_chan_set_chan_shaper(struct cpdma_chan * chan)355 static int cpdma_chan_set_chan_shaper(struct cpdma_chan *chan)
356 {
357 	struct cpdma_ctlr *ctlr = chan->ctlr;
358 	u32 rate_reg;
359 	u32 rmask;
360 	int ret;
361 
362 	if (!chan->rate)
363 		return 0;
364 
365 	rate_reg = CPDMA_TX_PRI0_RATE + 4 * chan->chan_num;
366 	dma_reg_write(ctlr, rate_reg, chan->rate_factor);
367 
368 	rmask = _cpdma_control_get(ctlr, CPDMA_TX_RLIM);
369 	rmask |= chan->mask;
370 
371 	ret = _cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
372 	return ret;
373 }
374 
cpdma_chan_on(struct cpdma_chan * chan)375 static int cpdma_chan_on(struct cpdma_chan *chan)
376 {
377 	struct cpdma_ctlr *ctlr = chan->ctlr;
378 	struct cpdma_desc_pool	*pool = ctlr->pool;
379 	unsigned long flags;
380 
381 	spin_lock_irqsave(&chan->lock, flags);
382 	if (chan->state != CPDMA_STATE_IDLE) {
383 		spin_unlock_irqrestore(&chan->lock, flags);
384 		return -EBUSY;
385 	}
386 	if (ctlr->state != CPDMA_STATE_ACTIVE) {
387 		spin_unlock_irqrestore(&chan->lock, flags);
388 		return -EINVAL;
389 	}
390 	dma_reg_write(ctlr, chan->int_set, chan->mask);
391 	chan->state = CPDMA_STATE_ACTIVE;
392 	if (chan->head) {
393 		chan_write(chan, hdp, desc_phys(pool, chan->head));
394 		if (chan->rxfree)
395 			chan_write(chan, rxfree, chan->count);
396 	}
397 
398 	spin_unlock_irqrestore(&chan->lock, flags);
399 	return 0;
400 }
401 
402 /* cpdma_chan_fit_rate - set rate for a channel and check if it's possible.
403  * rmask - mask of rate limited channels
404  * Returns min rate in Kb/s
405  */
cpdma_chan_fit_rate(struct cpdma_chan * ch,u32 rate,u32 * rmask,int * prio_mode)406 static int cpdma_chan_fit_rate(struct cpdma_chan *ch, u32 rate,
407 			       u32 *rmask, int *prio_mode)
408 {
409 	struct cpdma_ctlr *ctlr = ch->ctlr;
410 	struct cpdma_chan *chan;
411 	u32 old_rate = ch->rate;
412 	u32 new_rmask = 0;
413 	int rlim = 0;
414 	int i;
415 
416 	for (i = tx_chan_num(0); i < tx_chan_num(CPDMA_MAX_CHANNELS); i++) {
417 		chan = ctlr->channels[i];
418 		if (!chan)
419 			continue;
420 
421 		if (chan == ch)
422 			chan->rate = rate;
423 
424 		if (chan->rate) {
425 			rlim = 1;
426 			new_rmask |= chan->mask;
427 			continue;
428 		}
429 
430 		if (rlim)
431 			goto err;
432 	}
433 
434 	*rmask = new_rmask;
435 	*prio_mode = rlim;
436 	return 0;
437 
438 err:
439 	ch->rate = old_rate;
440 	dev_err(ctlr->dev, "Upper cpdma ch%d is not rate limited\n",
441 		chan->chan_num);
442 	return -EINVAL;
443 }
444 
cpdma_chan_set_factors(struct cpdma_ctlr * ctlr,struct cpdma_chan * ch)445 static u32 cpdma_chan_set_factors(struct cpdma_ctlr *ctlr,
446 				  struct cpdma_chan *ch)
447 {
448 	u32 delta = UINT_MAX, prev_delta = UINT_MAX, best_delta = UINT_MAX;
449 	u32 best_send_cnt = 0, best_idle_cnt = 0;
450 	u32 new_rate, best_rate = 0, rate_reg;
451 	u64 send_cnt, idle_cnt;
452 	u32 min_send_cnt, freq;
453 	u64 divident, divisor;
454 
455 	if (!ch->rate) {
456 		ch->rate_factor = 0;
457 		goto set_factor;
458 	}
459 
460 	freq = ctlr->params.bus_freq_mhz * 1000 * 32;
461 	if (!freq) {
462 		dev_err(ctlr->dev, "The bus frequency is not set\n");
463 		return -EINVAL;
464 	}
465 
466 	min_send_cnt = freq - ch->rate;
467 	send_cnt = DIV_ROUND_UP(min_send_cnt, ch->rate);
468 	while (send_cnt <= CPDMA_MAX_RLIM_CNT) {
469 		divident = ch->rate * send_cnt;
470 		divisor = min_send_cnt;
471 		idle_cnt = DIV_ROUND_CLOSEST_ULL(divident, divisor);
472 
473 		divident = freq * idle_cnt;
474 		divisor = idle_cnt + send_cnt;
475 		new_rate = DIV_ROUND_CLOSEST_ULL(divident, divisor);
476 
477 		delta = new_rate >= ch->rate ? new_rate - ch->rate : delta;
478 		if (delta < best_delta) {
479 			best_delta = delta;
480 			best_send_cnt = send_cnt;
481 			best_idle_cnt = idle_cnt;
482 			best_rate = new_rate;
483 
484 			if (!delta)
485 				break;
486 		}
487 
488 		if (prev_delta >= delta) {
489 			prev_delta = delta;
490 			send_cnt++;
491 			continue;
492 		}
493 
494 		idle_cnt++;
495 		divident = freq * idle_cnt;
496 		send_cnt = DIV_ROUND_CLOSEST_ULL(divident, ch->rate);
497 		send_cnt -= idle_cnt;
498 		prev_delta = UINT_MAX;
499 	}
500 
501 	ch->rate = best_rate;
502 	ch->rate_factor = best_send_cnt | (best_idle_cnt << 16);
503 
504 set_factor:
505 	rate_reg = CPDMA_TX_PRI0_RATE + 4 * ch->chan_num;
506 	dma_reg_write(ctlr, rate_reg, ch->rate_factor);
507 	return 0;
508 }
509 
cpdma_ctlr_create(struct cpdma_params * params)510 struct cpdma_ctlr *cpdma_ctlr_create(struct cpdma_params *params)
511 {
512 	struct cpdma_ctlr *ctlr;
513 
514 	ctlr = devm_kzalloc(params->dev, sizeof(*ctlr), GFP_KERNEL);
515 	if (!ctlr)
516 		return NULL;
517 
518 	ctlr->state = CPDMA_STATE_IDLE;
519 	ctlr->params = *params;
520 	ctlr->dev = params->dev;
521 	ctlr->chan_num = 0;
522 	spin_lock_init(&ctlr->lock);
523 
524 	if (cpdma_desc_pool_create(ctlr))
525 		return NULL;
526 	/* split pool equally between RX/TX by default */
527 	ctlr->num_tx_desc = ctlr->pool->num_desc / 2;
528 	ctlr->num_rx_desc = ctlr->pool->num_desc - ctlr->num_tx_desc;
529 
530 	if (WARN_ON(ctlr->num_chan > CPDMA_MAX_CHANNELS))
531 		ctlr->num_chan = CPDMA_MAX_CHANNELS;
532 	return ctlr;
533 }
534 
cpdma_ctlr_start(struct cpdma_ctlr * ctlr)535 int cpdma_ctlr_start(struct cpdma_ctlr *ctlr)
536 {
537 	struct cpdma_chan *chan;
538 	unsigned long flags;
539 	int i, prio_mode;
540 
541 	spin_lock_irqsave(&ctlr->lock, flags);
542 	if (ctlr->state != CPDMA_STATE_IDLE) {
543 		spin_unlock_irqrestore(&ctlr->lock, flags);
544 		return -EBUSY;
545 	}
546 
547 	if (ctlr->params.has_soft_reset) {
548 		unsigned timeout = 10 * 100;
549 
550 		dma_reg_write(ctlr, CPDMA_SOFTRESET, 1);
551 		while (timeout) {
552 			if (dma_reg_read(ctlr, CPDMA_SOFTRESET) == 0)
553 				break;
554 			udelay(10);
555 			timeout--;
556 		}
557 		WARN_ON(!timeout);
558 	}
559 
560 	for (i = 0; i < ctlr->num_chan; i++) {
561 		writel(0, ctlr->params.txhdp + 4 * i);
562 		writel(0, ctlr->params.rxhdp + 4 * i);
563 		writel(0, ctlr->params.txcp + 4 * i);
564 		writel(0, ctlr->params.rxcp + 4 * i);
565 	}
566 
567 	dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
568 	dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
569 
570 	dma_reg_write(ctlr, CPDMA_TXCONTROL, 1);
571 	dma_reg_write(ctlr, CPDMA_RXCONTROL, 1);
572 
573 	ctlr->state = CPDMA_STATE_ACTIVE;
574 
575 	prio_mode = 0;
576 	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
577 		chan = ctlr->channels[i];
578 		if (chan) {
579 			cpdma_chan_set_chan_shaper(chan);
580 			cpdma_chan_on(chan);
581 
582 			/* off prio mode if all tx channels are rate limited */
583 			if (is_tx_chan(chan) && !chan->rate)
584 				prio_mode = 1;
585 		}
586 	}
587 
588 	_cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
589 	_cpdma_control_set(ctlr, CPDMA_RX_BUFFER_OFFSET, 0);
590 
591 	spin_unlock_irqrestore(&ctlr->lock, flags);
592 	return 0;
593 }
594 
cpdma_ctlr_stop(struct cpdma_ctlr * ctlr)595 int cpdma_ctlr_stop(struct cpdma_ctlr *ctlr)
596 {
597 	unsigned long flags;
598 	int i;
599 
600 	spin_lock_irqsave(&ctlr->lock, flags);
601 	if (ctlr->state != CPDMA_STATE_ACTIVE) {
602 		spin_unlock_irqrestore(&ctlr->lock, flags);
603 		return -EINVAL;
604 	}
605 
606 	ctlr->state = CPDMA_STATE_TEARDOWN;
607 	spin_unlock_irqrestore(&ctlr->lock, flags);
608 
609 	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
610 		if (ctlr->channels[i])
611 			cpdma_chan_stop(ctlr->channels[i]);
612 	}
613 
614 	spin_lock_irqsave(&ctlr->lock, flags);
615 	dma_reg_write(ctlr, CPDMA_RXINTMASKCLEAR, 0xffffffff);
616 	dma_reg_write(ctlr, CPDMA_TXINTMASKCLEAR, 0xffffffff);
617 
618 	dma_reg_write(ctlr, CPDMA_TXCONTROL, 0);
619 	dma_reg_write(ctlr, CPDMA_RXCONTROL, 0);
620 
621 	ctlr->state = CPDMA_STATE_IDLE;
622 
623 	spin_unlock_irqrestore(&ctlr->lock, flags);
624 	return 0;
625 }
626 
cpdma_ctlr_destroy(struct cpdma_ctlr * ctlr)627 int cpdma_ctlr_destroy(struct cpdma_ctlr *ctlr)
628 {
629 	int ret = 0, i;
630 
631 	if (!ctlr)
632 		return -EINVAL;
633 
634 	if (ctlr->state != CPDMA_STATE_IDLE)
635 		cpdma_ctlr_stop(ctlr);
636 
637 	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++)
638 		cpdma_chan_destroy(ctlr->channels[i]);
639 
640 	cpdma_desc_pool_destroy(ctlr);
641 	return ret;
642 }
643 
cpdma_ctlr_int_ctrl(struct cpdma_ctlr * ctlr,bool enable)644 int cpdma_ctlr_int_ctrl(struct cpdma_ctlr *ctlr, bool enable)
645 {
646 	unsigned long flags;
647 	int i;
648 
649 	spin_lock_irqsave(&ctlr->lock, flags);
650 	if (ctlr->state != CPDMA_STATE_ACTIVE) {
651 		spin_unlock_irqrestore(&ctlr->lock, flags);
652 		return -EINVAL;
653 	}
654 
655 	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
656 		if (ctlr->channels[i])
657 			cpdma_chan_int_ctrl(ctlr->channels[i], enable);
658 	}
659 
660 	spin_unlock_irqrestore(&ctlr->lock, flags);
661 	return 0;
662 }
663 
cpdma_ctlr_eoi(struct cpdma_ctlr * ctlr,u32 value)664 void cpdma_ctlr_eoi(struct cpdma_ctlr *ctlr, u32 value)
665 {
666 	dma_reg_write(ctlr, CPDMA_MACEOIVECTOR, value);
667 }
668 
cpdma_ctrl_rxchs_state(struct cpdma_ctlr * ctlr)669 u32 cpdma_ctrl_rxchs_state(struct cpdma_ctlr *ctlr)
670 {
671 	return dma_reg_read(ctlr, CPDMA_RXINTSTATMASKED);
672 }
673 
cpdma_ctrl_txchs_state(struct cpdma_ctlr * ctlr)674 u32 cpdma_ctrl_txchs_state(struct cpdma_ctlr *ctlr)
675 {
676 	return dma_reg_read(ctlr, CPDMA_TXINTSTATMASKED);
677 }
678 
cpdma_chan_set_descs(struct cpdma_ctlr * ctlr,int rx,int desc_num,int per_ch_desc)679 static void cpdma_chan_set_descs(struct cpdma_ctlr *ctlr,
680 				 int rx, int desc_num,
681 				 int per_ch_desc)
682 {
683 	struct cpdma_chan *chan, *most_chan = NULL;
684 	int desc_cnt = desc_num;
685 	int most_dnum = 0;
686 	int min, max, i;
687 
688 	if (!desc_num)
689 		return;
690 
691 	if (rx) {
692 		min = rx_chan_num(0);
693 		max = rx_chan_num(CPDMA_MAX_CHANNELS);
694 	} else {
695 		min = tx_chan_num(0);
696 		max = tx_chan_num(CPDMA_MAX_CHANNELS);
697 	}
698 
699 	for (i = min; i < max; i++) {
700 		chan = ctlr->channels[i];
701 		if (!chan)
702 			continue;
703 
704 		if (chan->weight)
705 			chan->desc_num = (chan->weight * desc_num) / 100;
706 		else
707 			chan->desc_num = per_ch_desc;
708 
709 		desc_cnt -= chan->desc_num;
710 
711 		if (most_dnum < chan->desc_num) {
712 			most_dnum = chan->desc_num;
713 			most_chan = chan;
714 		}
715 	}
716 	/* use remains */
717 	if (most_chan)
718 		most_chan->desc_num += desc_cnt;
719 }
720 
721 /*
722  * cpdma_chan_split_pool - Splits ctrl pool between all channels.
723  * Has to be called under ctlr lock
724  */
cpdma_chan_split_pool(struct cpdma_ctlr * ctlr)725 static int cpdma_chan_split_pool(struct cpdma_ctlr *ctlr)
726 {
727 	int tx_per_ch_desc = 0, rx_per_ch_desc = 0;
728 	int free_rx_num = 0, free_tx_num = 0;
729 	int rx_weight = 0, tx_weight = 0;
730 	int tx_desc_num, rx_desc_num;
731 	struct cpdma_chan *chan;
732 	int i;
733 
734 	if (!ctlr->chan_num)
735 		return 0;
736 
737 	for (i = 0; i < ARRAY_SIZE(ctlr->channels); i++) {
738 		chan = ctlr->channels[i];
739 		if (!chan)
740 			continue;
741 
742 		if (is_rx_chan(chan)) {
743 			if (!chan->weight)
744 				free_rx_num++;
745 			rx_weight += chan->weight;
746 		} else {
747 			if (!chan->weight)
748 				free_tx_num++;
749 			tx_weight += chan->weight;
750 		}
751 	}
752 
753 	if (rx_weight > 100 || tx_weight > 100)
754 		return -EINVAL;
755 
756 	tx_desc_num = ctlr->num_tx_desc;
757 	rx_desc_num = ctlr->num_rx_desc;
758 
759 	if (free_tx_num) {
760 		tx_per_ch_desc = tx_desc_num - (tx_weight * tx_desc_num) / 100;
761 		tx_per_ch_desc /= free_tx_num;
762 	}
763 	if (free_rx_num) {
764 		rx_per_ch_desc = rx_desc_num - (rx_weight * rx_desc_num) / 100;
765 		rx_per_ch_desc /= free_rx_num;
766 	}
767 
768 	cpdma_chan_set_descs(ctlr, 0, tx_desc_num, tx_per_ch_desc);
769 	cpdma_chan_set_descs(ctlr, 1, rx_desc_num, rx_per_ch_desc);
770 
771 	return 0;
772 }
773 
774 
775 /* cpdma_chan_set_weight - set weight of a channel in percentage.
776  * Tx and Rx channels have separate weights. That is 100% for RX
777  * and 100% for Tx. The weight is used to split cpdma resources
778  * in correct proportion required by the channels, including number
779  * of descriptors. The channel rate is not enough to know the
780  * weight of a channel as the maximum rate of an interface is needed.
781  * If weight = 0, then channel uses rest of descriptors leaved by
782  * weighted channels.
783  */
cpdma_chan_set_weight(struct cpdma_chan * ch,int weight)784 int cpdma_chan_set_weight(struct cpdma_chan *ch, int weight)
785 {
786 	struct cpdma_ctlr *ctlr = ch->ctlr;
787 	unsigned long flags, ch_flags;
788 	int ret;
789 
790 	spin_lock_irqsave(&ctlr->lock, flags);
791 	spin_lock_irqsave(&ch->lock, ch_flags);
792 	if (ch->weight == weight) {
793 		spin_unlock_irqrestore(&ch->lock, ch_flags);
794 		spin_unlock_irqrestore(&ctlr->lock, flags);
795 		return 0;
796 	}
797 	ch->weight = weight;
798 	spin_unlock_irqrestore(&ch->lock, ch_flags);
799 
800 	/* re-split pool using new channel weight */
801 	ret = cpdma_chan_split_pool(ctlr);
802 	spin_unlock_irqrestore(&ctlr->lock, flags);
803 	return ret;
804 }
805 
806 /* cpdma_chan_get_min_rate - get minimum allowed rate for channel
807  * Should be called before cpdma_chan_set_rate.
808  * Returns min rate in Kb/s
809  */
cpdma_chan_get_min_rate(struct cpdma_ctlr * ctlr)810 u32 cpdma_chan_get_min_rate(struct cpdma_ctlr *ctlr)
811 {
812 	unsigned int divident, divisor;
813 
814 	divident = ctlr->params.bus_freq_mhz * 32 * 1000;
815 	divisor = 1 + CPDMA_MAX_RLIM_CNT;
816 
817 	return DIV_ROUND_UP(divident, divisor);
818 }
819 
820 /* cpdma_chan_set_rate - limits bandwidth for transmit channel.
821  * The bandwidth * limited channels have to be in order beginning from lowest.
822  * ch - transmit channel the bandwidth is configured for
823  * rate - bandwidth in Kb/s, if 0 - then off shaper
824  */
cpdma_chan_set_rate(struct cpdma_chan * ch,u32 rate)825 int cpdma_chan_set_rate(struct cpdma_chan *ch, u32 rate)
826 {
827 	unsigned long flags, ch_flags;
828 	struct cpdma_ctlr *ctlr;
829 	int ret, prio_mode;
830 	u32 rmask;
831 
832 	if (!ch || !is_tx_chan(ch))
833 		return -EINVAL;
834 
835 	if (ch->rate == rate)
836 		return rate;
837 
838 	ctlr = ch->ctlr;
839 	spin_lock_irqsave(&ctlr->lock, flags);
840 	spin_lock_irqsave(&ch->lock, ch_flags);
841 
842 	ret = cpdma_chan_fit_rate(ch, rate, &rmask, &prio_mode);
843 	if (ret)
844 		goto err;
845 
846 	ret = cpdma_chan_set_factors(ctlr, ch);
847 	if (ret)
848 		goto err;
849 
850 	spin_unlock_irqrestore(&ch->lock, ch_flags);
851 
852 	/* on shapers */
853 	_cpdma_control_set(ctlr, CPDMA_TX_RLIM, rmask);
854 	_cpdma_control_set(ctlr, CPDMA_TX_PRIO_FIXED, prio_mode);
855 	spin_unlock_irqrestore(&ctlr->lock, flags);
856 	return ret;
857 
858 err:
859 	spin_unlock_irqrestore(&ch->lock, ch_flags);
860 	spin_unlock_irqrestore(&ctlr->lock, flags);
861 	return ret;
862 }
863 
cpdma_chan_get_rate(struct cpdma_chan * ch)864 u32 cpdma_chan_get_rate(struct cpdma_chan *ch)
865 {
866 	unsigned long flags;
867 	u32 rate;
868 
869 	spin_lock_irqsave(&ch->lock, flags);
870 	rate = ch->rate;
871 	spin_unlock_irqrestore(&ch->lock, flags);
872 
873 	return rate;
874 }
875 
cpdma_chan_create(struct cpdma_ctlr * ctlr,int chan_num,cpdma_handler_fn handler,int rx_type)876 struct cpdma_chan *cpdma_chan_create(struct cpdma_ctlr *ctlr, int chan_num,
877 				     cpdma_handler_fn handler, int rx_type)
878 {
879 	int offset = chan_num * 4;
880 	struct cpdma_chan *chan;
881 	unsigned long flags;
882 
883 	chan_num = rx_type ? rx_chan_num(chan_num) : tx_chan_num(chan_num);
884 
885 	if (__chan_linear(chan_num) >= ctlr->num_chan)
886 		return ERR_PTR(-EINVAL);
887 
888 	chan = devm_kzalloc(ctlr->dev, sizeof(*chan), GFP_KERNEL);
889 	if (!chan)
890 		return ERR_PTR(-ENOMEM);
891 
892 	spin_lock_irqsave(&ctlr->lock, flags);
893 	if (ctlr->channels[chan_num]) {
894 		spin_unlock_irqrestore(&ctlr->lock, flags);
895 		devm_kfree(ctlr->dev, chan);
896 		return ERR_PTR(-EBUSY);
897 	}
898 
899 	chan->ctlr	= ctlr;
900 	chan->state	= CPDMA_STATE_IDLE;
901 	chan->chan_num	= chan_num;
902 	chan->handler	= handler;
903 	chan->rate	= 0;
904 	chan->weight	= 0;
905 
906 	if (is_rx_chan(chan)) {
907 		chan->hdp	= ctlr->params.rxhdp + offset;
908 		chan->cp	= ctlr->params.rxcp + offset;
909 		chan->rxfree	= ctlr->params.rxfree + offset;
910 		chan->int_set	= CPDMA_RXINTMASKSET;
911 		chan->int_clear	= CPDMA_RXINTMASKCLEAR;
912 		chan->td	= CPDMA_RXTEARDOWN;
913 		chan->dir	= DMA_FROM_DEVICE;
914 	} else {
915 		chan->hdp	= ctlr->params.txhdp + offset;
916 		chan->cp	= ctlr->params.txcp + offset;
917 		chan->int_set	= CPDMA_TXINTMASKSET;
918 		chan->int_clear	= CPDMA_TXINTMASKCLEAR;
919 		chan->td	= CPDMA_TXTEARDOWN;
920 		chan->dir	= DMA_TO_DEVICE;
921 	}
922 	chan->mask = BIT(chan_linear(chan));
923 
924 	spin_lock_init(&chan->lock);
925 
926 	ctlr->channels[chan_num] = chan;
927 	ctlr->chan_num++;
928 
929 	cpdma_chan_split_pool(ctlr);
930 
931 	spin_unlock_irqrestore(&ctlr->lock, flags);
932 	return chan;
933 }
934 
cpdma_chan_get_rx_buf_num(struct cpdma_chan * chan)935 int cpdma_chan_get_rx_buf_num(struct cpdma_chan *chan)
936 {
937 	unsigned long flags;
938 	int desc_num;
939 
940 	spin_lock_irqsave(&chan->lock, flags);
941 	desc_num = chan->desc_num;
942 	spin_unlock_irqrestore(&chan->lock, flags);
943 
944 	return desc_num;
945 }
946 
cpdma_chan_destroy(struct cpdma_chan * chan)947 int cpdma_chan_destroy(struct cpdma_chan *chan)
948 {
949 	struct cpdma_ctlr *ctlr;
950 	unsigned long flags;
951 
952 	if (!chan)
953 		return -EINVAL;
954 	ctlr = chan->ctlr;
955 
956 	spin_lock_irqsave(&ctlr->lock, flags);
957 	if (chan->state != CPDMA_STATE_IDLE)
958 		cpdma_chan_stop(chan);
959 	ctlr->channels[chan->chan_num] = NULL;
960 	ctlr->chan_num--;
961 	devm_kfree(ctlr->dev, chan);
962 	cpdma_chan_split_pool(ctlr);
963 
964 	spin_unlock_irqrestore(&ctlr->lock, flags);
965 	return 0;
966 }
967 
cpdma_chan_get_stats(struct cpdma_chan * chan,struct cpdma_chan_stats * stats)968 int cpdma_chan_get_stats(struct cpdma_chan *chan,
969 			 struct cpdma_chan_stats *stats)
970 {
971 	unsigned long flags;
972 	if (!chan)
973 		return -EINVAL;
974 	spin_lock_irqsave(&chan->lock, flags);
975 	memcpy(stats, &chan->stats, sizeof(*stats));
976 	spin_unlock_irqrestore(&chan->lock, flags);
977 	return 0;
978 }
979 
__cpdma_chan_submit(struct cpdma_chan * chan,struct cpdma_desc __iomem * desc)980 static void __cpdma_chan_submit(struct cpdma_chan *chan,
981 				struct cpdma_desc __iomem *desc)
982 {
983 	struct cpdma_ctlr		*ctlr = chan->ctlr;
984 	struct cpdma_desc __iomem	*prev = chan->tail;
985 	struct cpdma_desc_pool		*pool = ctlr->pool;
986 	dma_addr_t			desc_dma;
987 	u32				mode;
988 
989 	desc_dma = desc_phys(pool, desc);
990 
991 	/* simple case - idle channel */
992 	if (!chan->head) {
993 		chan->stats.head_enqueue++;
994 		chan->head = desc;
995 		chan->tail = desc;
996 		if (chan->state == CPDMA_STATE_ACTIVE)
997 			chan_write(chan, hdp, desc_dma);
998 		return;
999 	}
1000 
1001 	/* first chain the descriptor at the tail of the list */
1002 	desc_write(prev, hw_next, desc_dma);
1003 	chan->tail = desc;
1004 	chan->stats.tail_enqueue++;
1005 
1006 	/* next check if EOQ has been triggered already */
1007 	mode = desc_read(prev, hw_mode);
1008 	if (((mode & (CPDMA_DESC_EOQ | CPDMA_DESC_OWNER)) == CPDMA_DESC_EOQ) &&
1009 	    (chan->state == CPDMA_STATE_ACTIVE)) {
1010 		desc_write(prev, hw_mode, mode & ~CPDMA_DESC_EOQ);
1011 		chan_write(chan, hdp, desc_dma);
1012 		chan->stats.misqueued++;
1013 	}
1014 }
1015 
cpdma_chan_submit_si(struct submit_info * si)1016 static int cpdma_chan_submit_si(struct submit_info *si)
1017 {
1018 	struct cpdma_chan		*chan = si->chan;
1019 	struct cpdma_ctlr		*ctlr = chan->ctlr;
1020 	int				len = si->len;
1021 	struct cpdma_desc __iomem	*desc;
1022 	dma_addr_t			buffer;
1023 	u32				mode;
1024 	int				ret;
1025 
1026 	if (chan->count >= chan->desc_num)	{
1027 		chan->stats.desc_alloc_fail++;
1028 		return -ENOMEM;
1029 	}
1030 
1031 	desc = cpdma_desc_alloc(ctlr->pool);
1032 	if (!desc) {
1033 		chan->stats.desc_alloc_fail++;
1034 		return -ENOMEM;
1035 	}
1036 
1037 	if (len < ctlr->params.min_packet_size) {
1038 		len = ctlr->params.min_packet_size;
1039 		chan->stats.runt_transmit_buff++;
1040 	}
1041 
1042 	mode = CPDMA_DESC_OWNER | CPDMA_DESC_SOP | CPDMA_DESC_EOP;
1043 	cpdma_desc_to_port(chan, mode, si->directed);
1044 
1045 	if (si->data_dma) {
1046 		buffer = si->data_dma;
1047 		dma_sync_single_for_device(ctlr->dev, buffer, len, chan->dir);
1048 	} else {
1049 		buffer = dma_map_single(ctlr->dev, si->data_virt, len, chan->dir);
1050 		ret = dma_mapping_error(ctlr->dev, buffer);
1051 		if (ret) {
1052 			cpdma_desc_free(ctlr->pool, desc, 1);
1053 			return -EINVAL;
1054 		}
1055 	}
1056 
1057 	/* Relaxed IO accessors can be used here as there is read barrier
1058 	 * at the end of write sequence.
1059 	 */
1060 	writel_relaxed(0, &desc->hw_next);
1061 	writel_relaxed(buffer, &desc->hw_buffer);
1062 	writel_relaxed(len, &desc->hw_len);
1063 	writel_relaxed(mode | len, &desc->hw_mode);
1064 	writel_relaxed((uintptr_t)si->token, &desc->sw_token);
1065 	writel_relaxed(buffer, &desc->sw_buffer);
1066 	writel_relaxed(si->data_dma ? len | CPDMA_DMA_EXT_MAP : len,
1067 		       &desc->sw_len);
1068 	desc_read(desc, sw_len);
1069 
1070 	__cpdma_chan_submit(chan, desc);
1071 
1072 	if (chan->state == CPDMA_STATE_ACTIVE && chan->rxfree)
1073 		chan_write(chan, rxfree, 1);
1074 
1075 	chan->count++;
1076 	return 0;
1077 }
1078 
cpdma_chan_idle_submit(struct cpdma_chan * chan,void * token,void * data,int len,int directed)1079 int cpdma_chan_idle_submit(struct cpdma_chan *chan, void *token, void *data,
1080 			   int len, int directed)
1081 {
1082 	struct submit_info si;
1083 	unsigned long flags;
1084 	int ret;
1085 
1086 	si.chan = chan;
1087 	si.token = token;
1088 	si.data_virt = data;
1089 	si.data_dma = 0;
1090 	si.len = len;
1091 	si.directed = directed;
1092 
1093 	spin_lock_irqsave(&chan->lock, flags);
1094 	if (chan->state == CPDMA_STATE_TEARDOWN) {
1095 		spin_unlock_irqrestore(&chan->lock, flags);
1096 		return -EINVAL;
1097 	}
1098 
1099 	ret = cpdma_chan_submit_si(&si);
1100 	spin_unlock_irqrestore(&chan->lock, flags);
1101 	return ret;
1102 }
1103 
cpdma_chan_idle_submit_mapped(struct cpdma_chan * chan,void * token,dma_addr_t data,int len,int directed)1104 int cpdma_chan_idle_submit_mapped(struct cpdma_chan *chan, void *token,
1105 				  dma_addr_t data, int len, int directed)
1106 {
1107 	struct submit_info si;
1108 	unsigned long flags;
1109 	int ret;
1110 
1111 	si.chan = chan;
1112 	si.token = token;
1113 	si.data_virt = NULL;
1114 	si.data_dma = data;
1115 	si.len = len;
1116 	si.directed = directed;
1117 
1118 	spin_lock_irqsave(&chan->lock, flags);
1119 	if (chan->state == CPDMA_STATE_TEARDOWN) {
1120 		spin_unlock_irqrestore(&chan->lock, flags);
1121 		return -EINVAL;
1122 	}
1123 
1124 	ret = cpdma_chan_submit_si(&si);
1125 	spin_unlock_irqrestore(&chan->lock, flags);
1126 	return ret;
1127 }
1128 
cpdma_chan_submit(struct cpdma_chan * chan,void * token,void * data,int len,int directed)1129 int cpdma_chan_submit(struct cpdma_chan *chan, void *token, void *data,
1130 		      int len, int directed)
1131 {
1132 	struct submit_info si;
1133 	unsigned long flags;
1134 	int ret;
1135 
1136 	si.chan = chan;
1137 	si.token = token;
1138 	si.data_virt = data;
1139 	si.data_dma = 0;
1140 	si.len = len;
1141 	si.directed = directed;
1142 
1143 	spin_lock_irqsave(&chan->lock, flags);
1144 	if (chan->state != CPDMA_STATE_ACTIVE) {
1145 		spin_unlock_irqrestore(&chan->lock, flags);
1146 		return -EINVAL;
1147 	}
1148 
1149 	ret = cpdma_chan_submit_si(&si);
1150 	spin_unlock_irqrestore(&chan->lock, flags);
1151 	return ret;
1152 }
1153 
cpdma_chan_submit_mapped(struct cpdma_chan * chan,void * token,dma_addr_t data,int len,int directed)1154 int cpdma_chan_submit_mapped(struct cpdma_chan *chan, void *token,
1155 			     dma_addr_t data, int len, int directed)
1156 {
1157 	struct submit_info si;
1158 	unsigned long flags;
1159 	int ret;
1160 
1161 	si.chan = chan;
1162 	si.token = token;
1163 	si.data_virt = NULL;
1164 	si.data_dma = data;
1165 	si.len = len;
1166 	si.directed = directed;
1167 
1168 	spin_lock_irqsave(&chan->lock, flags);
1169 	if (chan->state != CPDMA_STATE_ACTIVE) {
1170 		spin_unlock_irqrestore(&chan->lock, flags);
1171 		return -EINVAL;
1172 	}
1173 
1174 	ret = cpdma_chan_submit_si(&si);
1175 	spin_unlock_irqrestore(&chan->lock, flags);
1176 	return ret;
1177 }
1178 
cpdma_check_free_tx_desc(struct cpdma_chan * chan)1179 bool cpdma_check_free_tx_desc(struct cpdma_chan *chan)
1180 {
1181 	struct cpdma_ctlr	*ctlr = chan->ctlr;
1182 	struct cpdma_desc_pool	*pool = ctlr->pool;
1183 	bool			free_tx_desc;
1184 	unsigned long		flags;
1185 
1186 	spin_lock_irqsave(&chan->lock, flags);
1187 	free_tx_desc = (chan->count < chan->desc_num) &&
1188 			 gen_pool_avail(pool->gen_pool);
1189 	spin_unlock_irqrestore(&chan->lock, flags);
1190 	return free_tx_desc;
1191 }
1192 
__cpdma_chan_free(struct cpdma_chan * chan,struct cpdma_desc __iomem * desc,int outlen,int status)1193 static void __cpdma_chan_free(struct cpdma_chan *chan,
1194 			      struct cpdma_desc __iomem *desc,
1195 			      int outlen, int status)
1196 {
1197 	struct cpdma_ctlr		*ctlr = chan->ctlr;
1198 	struct cpdma_desc_pool		*pool = ctlr->pool;
1199 	dma_addr_t			buff_dma;
1200 	int				origlen;
1201 	uintptr_t			token;
1202 
1203 	token      = desc_read(desc, sw_token);
1204 	origlen    = desc_read(desc, sw_len);
1205 
1206 	buff_dma   = desc_read(desc, sw_buffer);
1207 	if (origlen & CPDMA_DMA_EXT_MAP) {
1208 		origlen &= ~CPDMA_DMA_EXT_MAP;
1209 		dma_sync_single_for_cpu(ctlr->dev, buff_dma, origlen,
1210 					chan->dir);
1211 	} else {
1212 		dma_unmap_single(ctlr->dev, buff_dma, origlen, chan->dir);
1213 	}
1214 
1215 	cpdma_desc_free(pool, desc, 1);
1216 	(*chan->handler)((void *)token, outlen, status);
1217 }
1218 
__cpdma_chan_process(struct cpdma_chan * chan)1219 static int __cpdma_chan_process(struct cpdma_chan *chan)
1220 {
1221 	struct cpdma_ctlr		*ctlr = chan->ctlr;
1222 	struct cpdma_desc __iomem	*desc;
1223 	int				status, outlen;
1224 	int				cb_status = 0;
1225 	struct cpdma_desc_pool		*pool = ctlr->pool;
1226 	dma_addr_t			desc_dma;
1227 	unsigned long			flags;
1228 
1229 	spin_lock_irqsave(&chan->lock, flags);
1230 
1231 	desc = chan->head;
1232 	if (!desc) {
1233 		chan->stats.empty_dequeue++;
1234 		status = -ENOENT;
1235 		goto unlock_ret;
1236 	}
1237 	desc_dma = desc_phys(pool, desc);
1238 
1239 	status	= desc_read(desc, hw_mode);
1240 	outlen	= status & 0x7ff;
1241 	if (status & CPDMA_DESC_OWNER) {
1242 		chan->stats.busy_dequeue++;
1243 		status = -EBUSY;
1244 		goto unlock_ret;
1245 	}
1246 
1247 	if (status & CPDMA_DESC_PASS_CRC)
1248 		outlen -= CPDMA_DESC_CRC_LEN;
1249 
1250 	status	= status & (CPDMA_DESC_EOQ | CPDMA_DESC_TD_COMPLETE |
1251 			    CPDMA_DESC_PORT_MASK | CPDMA_RX_VLAN_ENCAP);
1252 
1253 	chan->head = desc_from_phys(pool, desc_read(desc, hw_next));
1254 	chan_write(chan, cp, desc_dma);
1255 	chan->count--;
1256 	chan->stats.good_dequeue++;
1257 
1258 	if ((status & CPDMA_DESC_EOQ) && chan->head) {
1259 		chan->stats.requeue++;
1260 		chan_write(chan, hdp, desc_phys(pool, chan->head));
1261 	}
1262 
1263 	spin_unlock_irqrestore(&chan->lock, flags);
1264 	if (unlikely(status & CPDMA_DESC_TD_COMPLETE))
1265 		cb_status = -ENOSYS;
1266 	else
1267 		cb_status = status;
1268 
1269 	__cpdma_chan_free(chan, desc, outlen, cb_status);
1270 	return status;
1271 
1272 unlock_ret:
1273 	spin_unlock_irqrestore(&chan->lock, flags);
1274 	return status;
1275 }
1276 
cpdma_chan_process(struct cpdma_chan * chan,int quota)1277 int cpdma_chan_process(struct cpdma_chan *chan, int quota)
1278 {
1279 	int used = 0, ret = 0;
1280 
1281 	if (chan->state != CPDMA_STATE_ACTIVE)
1282 		return -EINVAL;
1283 
1284 	while (used < quota) {
1285 		ret = __cpdma_chan_process(chan);
1286 		if (ret < 0)
1287 			break;
1288 		used++;
1289 	}
1290 	return used;
1291 }
1292 
cpdma_chan_start(struct cpdma_chan * chan)1293 int cpdma_chan_start(struct cpdma_chan *chan)
1294 {
1295 	struct cpdma_ctlr *ctlr = chan->ctlr;
1296 	unsigned long flags;
1297 	int ret;
1298 
1299 	spin_lock_irqsave(&ctlr->lock, flags);
1300 	ret = cpdma_chan_set_chan_shaper(chan);
1301 	spin_unlock_irqrestore(&ctlr->lock, flags);
1302 	if (ret)
1303 		return ret;
1304 
1305 	ret = cpdma_chan_on(chan);
1306 	if (ret)
1307 		return ret;
1308 
1309 	return 0;
1310 }
1311 
cpdma_chan_stop(struct cpdma_chan * chan)1312 int cpdma_chan_stop(struct cpdma_chan *chan)
1313 {
1314 	struct cpdma_ctlr	*ctlr = chan->ctlr;
1315 	struct cpdma_desc_pool	*pool = ctlr->pool;
1316 	unsigned long		flags;
1317 	int			ret;
1318 	unsigned		timeout;
1319 
1320 	spin_lock_irqsave(&chan->lock, flags);
1321 	if (chan->state == CPDMA_STATE_TEARDOWN) {
1322 		spin_unlock_irqrestore(&chan->lock, flags);
1323 		return -EINVAL;
1324 	}
1325 
1326 	chan->state = CPDMA_STATE_TEARDOWN;
1327 	dma_reg_write(ctlr, chan->int_clear, chan->mask);
1328 
1329 	/* trigger teardown */
1330 	dma_reg_write(ctlr, chan->td, chan_linear(chan));
1331 
1332 	/* wait for teardown complete */
1333 	timeout = 100 * 100; /* 100 ms */
1334 	while (timeout) {
1335 		u32 cp = chan_read(chan, cp);
1336 		if ((cp & CPDMA_TEARDOWN_VALUE) == CPDMA_TEARDOWN_VALUE)
1337 			break;
1338 		udelay(10);
1339 		timeout--;
1340 	}
1341 	WARN_ON(!timeout);
1342 	chan_write(chan, cp, CPDMA_TEARDOWN_VALUE);
1343 
1344 	/* handle completed packets */
1345 	spin_unlock_irqrestore(&chan->lock, flags);
1346 	do {
1347 		ret = __cpdma_chan_process(chan);
1348 		if (ret < 0)
1349 			break;
1350 	} while ((ret & CPDMA_DESC_TD_COMPLETE) == 0);
1351 	spin_lock_irqsave(&chan->lock, flags);
1352 
1353 	/* remaining packets haven't been tx/rx'ed, clean them up */
1354 	while (chan->head) {
1355 		struct cpdma_desc __iomem *desc = chan->head;
1356 		dma_addr_t next_dma;
1357 
1358 		next_dma = desc_read(desc, hw_next);
1359 		chan->head = desc_from_phys(pool, next_dma);
1360 		chan->count--;
1361 		chan->stats.teardown_dequeue++;
1362 
1363 		/* issue callback without locks held */
1364 		spin_unlock_irqrestore(&chan->lock, flags);
1365 		__cpdma_chan_free(chan, desc, 0, -ENOSYS);
1366 		spin_lock_irqsave(&chan->lock, flags);
1367 	}
1368 
1369 	chan->state = CPDMA_STATE_IDLE;
1370 	spin_unlock_irqrestore(&chan->lock, flags);
1371 	return 0;
1372 }
1373 
cpdma_chan_int_ctrl(struct cpdma_chan * chan,bool enable)1374 int cpdma_chan_int_ctrl(struct cpdma_chan *chan, bool enable)
1375 {
1376 	unsigned long flags;
1377 
1378 	spin_lock_irqsave(&chan->lock, flags);
1379 	if (chan->state != CPDMA_STATE_ACTIVE) {
1380 		spin_unlock_irqrestore(&chan->lock, flags);
1381 		return -EINVAL;
1382 	}
1383 
1384 	dma_reg_write(chan->ctlr, enable ? chan->int_set : chan->int_clear,
1385 		      chan->mask);
1386 	spin_unlock_irqrestore(&chan->lock, flags);
1387 
1388 	return 0;
1389 }
1390 
cpdma_control_get(struct cpdma_ctlr * ctlr,int control)1391 int cpdma_control_get(struct cpdma_ctlr *ctlr, int control)
1392 {
1393 	unsigned long flags;
1394 	int ret;
1395 
1396 	spin_lock_irqsave(&ctlr->lock, flags);
1397 	ret = _cpdma_control_get(ctlr, control);
1398 	spin_unlock_irqrestore(&ctlr->lock, flags);
1399 
1400 	return ret;
1401 }
1402 
cpdma_control_set(struct cpdma_ctlr * ctlr,int control,int value)1403 int cpdma_control_set(struct cpdma_ctlr *ctlr, int control, int value)
1404 {
1405 	unsigned long flags;
1406 	int ret;
1407 
1408 	spin_lock_irqsave(&ctlr->lock, flags);
1409 	ret = _cpdma_control_set(ctlr, control, value);
1410 	spin_unlock_irqrestore(&ctlr->lock, flags);
1411 
1412 	return ret;
1413 }
1414 
cpdma_get_num_rx_descs(struct cpdma_ctlr * ctlr)1415 int cpdma_get_num_rx_descs(struct cpdma_ctlr *ctlr)
1416 {
1417 	return ctlr->num_rx_desc;
1418 }
1419 
cpdma_get_num_tx_descs(struct cpdma_ctlr * ctlr)1420 int cpdma_get_num_tx_descs(struct cpdma_ctlr *ctlr)
1421 {
1422 	return ctlr->num_tx_desc;
1423 }
1424 
cpdma_set_num_rx_descs(struct cpdma_ctlr * ctlr,int num_rx_desc)1425 int cpdma_set_num_rx_descs(struct cpdma_ctlr *ctlr, int num_rx_desc)
1426 {
1427 	unsigned long flags;
1428 	int temp, ret;
1429 
1430 	spin_lock_irqsave(&ctlr->lock, flags);
1431 
1432 	temp = ctlr->num_rx_desc;
1433 	ctlr->num_rx_desc = num_rx_desc;
1434 	ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
1435 	ret = cpdma_chan_split_pool(ctlr);
1436 	if (ret) {
1437 		ctlr->num_rx_desc = temp;
1438 		ctlr->num_tx_desc = ctlr->pool->num_desc - ctlr->num_rx_desc;
1439 	}
1440 
1441 	spin_unlock_irqrestore(&ctlr->lock, flags);
1442 
1443 	return ret;
1444 }
1445