1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright © 2006, Intel Corporation.
4 */
5 #ifndef _ADMA_H
6 #define _ADMA_H
7 #include <linux/types.h>
8 #include <linux/io.h>
9 #include <linux/platform_data/dma-iop32x.h>
10
11 /* Memory copy units */
12 #define DMA_CCR(chan) (chan->mmr_base + 0x0)
13 #define DMA_CSR(chan) (chan->mmr_base + 0x4)
14 #define DMA_DAR(chan) (chan->mmr_base + 0xc)
15 #define DMA_NDAR(chan) (chan->mmr_base + 0x10)
16 #define DMA_PADR(chan) (chan->mmr_base + 0x14)
17 #define DMA_PUADR(chan) (chan->mmr_base + 0x18)
18 #define DMA_LADR(chan) (chan->mmr_base + 0x1c)
19 #define DMA_BCR(chan) (chan->mmr_base + 0x20)
20 #define DMA_DCR(chan) (chan->mmr_base + 0x24)
21
22 /* Application accelerator unit */
23 #define AAU_ACR(chan) (chan->mmr_base + 0x0)
24 #define AAU_ASR(chan) (chan->mmr_base + 0x4)
25 #define AAU_ADAR(chan) (chan->mmr_base + 0x8)
26 #define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
27 #define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
28 #define AAU_DAR(chan) (chan->mmr_base + 0x20)
29 #define AAU_ABCR(chan) (chan->mmr_base + 0x24)
30 #define AAU_ADCR(chan) (chan->mmr_base + 0x28)
31 #define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
32 #define AAU_EDCR0_IDX 8
33 #define AAU_EDCR1_IDX 17
34 #define AAU_EDCR2_IDX 26
35
36 struct iop3xx_aau_desc_ctrl {
37 unsigned int int_en:1;
38 unsigned int blk1_cmd_ctrl:3;
39 unsigned int blk2_cmd_ctrl:3;
40 unsigned int blk3_cmd_ctrl:3;
41 unsigned int blk4_cmd_ctrl:3;
42 unsigned int blk5_cmd_ctrl:3;
43 unsigned int blk6_cmd_ctrl:3;
44 unsigned int blk7_cmd_ctrl:3;
45 unsigned int blk8_cmd_ctrl:3;
46 unsigned int blk_ctrl:2;
47 unsigned int dual_xor_en:1;
48 unsigned int tx_complete:1;
49 unsigned int zero_result_err:1;
50 unsigned int zero_result_en:1;
51 unsigned int dest_write_en:1;
52 };
53
54 struct iop3xx_aau_e_desc_ctrl {
55 unsigned int reserved:1;
56 unsigned int blk1_cmd_ctrl:3;
57 unsigned int blk2_cmd_ctrl:3;
58 unsigned int blk3_cmd_ctrl:3;
59 unsigned int blk4_cmd_ctrl:3;
60 unsigned int blk5_cmd_ctrl:3;
61 unsigned int blk6_cmd_ctrl:3;
62 unsigned int blk7_cmd_ctrl:3;
63 unsigned int blk8_cmd_ctrl:3;
64 unsigned int reserved2:7;
65 };
66
67 struct iop3xx_dma_desc_ctrl {
68 unsigned int pci_transaction:4;
69 unsigned int int_en:1;
70 unsigned int dac_cycle_en:1;
71 unsigned int mem_to_mem_en:1;
72 unsigned int crc_data_tx_en:1;
73 unsigned int crc_gen_en:1;
74 unsigned int crc_seed_dis:1;
75 unsigned int reserved:21;
76 unsigned int crc_tx_complete:1;
77 };
78
79 struct iop3xx_desc_dma {
80 u32 next_desc;
81 union {
82 u32 pci_src_addr;
83 u32 pci_dest_addr;
84 u32 src_addr;
85 };
86 union {
87 u32 upper_pci_src_addr;
88 u32 upper_pci_dest_addr;
89 };
90 union {
91 u32 local_pci_src_addr;
92 u32 local_pci_dest_addr;
93 u32 dest_addr;
94 };
95 u32 byte_count;
96 union {
97 u32 desc_ctrl;
98 struct iop3xx_dma_desc_ctrl desc_ctrl_field;
99 };
100 u32 crc_addr;
101 };
102
103 struct iop3xx_desc_aau {
104 u32 next_desc;
105 u32 src[4];
106 u32 dest_addr;
107 u32 byte_count;
108 union {
109 u32 desc_ctrl;
110 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
111 };
112 union {
113 u32 src_addr;
114 u32 e_desc_ctrl;
115 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
116 } src_edc[31];
117 };
118
119 struct iop3xx_aau_gfmr {
120 unsigned int gfmr1:8;
121 unsigned int gfmr2:8;
122 unsigned int gfmr3:8;
123 unsigned int gfmr4:8;
124 };
125
126 struct iop3xx_desc_pq_xor {
127 u32 next_desc;
128 u32 src[3];
129 union {
130 u32 data_mult1;
131 struct iop3xx_aau_gfmr data_mult1_field;
132 };
133 u32 dest_addr;
134 u32 byte_count;
135 union {
136 u32 desc_ctrl;
137 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
138 };
139 union {
140 u32 src_addr;
141 u32 e_desc_ctrl;
142 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
143 u32 data_multiplier;
144 struct iop3xx_aau_gfmr data_mult_field;
145 u32 reserved;
146 } src_edc_gfmr[19];
147 };
148
149 struct iop3xx_desc_dual_xor {
150 u32 next_desc;
151 u32 src0_addr;
152 u32 src1_addr;
153 u32 h_src_addr;
154 u32 d_src_addr;
155 u32 h_dest_addr;
156 u32 byte_count;
157 union {
158 u32 desc_ctrl;
159 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
160 };
161 u32 d_dest_addr;
162 };
163
164 union iop3xx_desc {
165 struct iop3xx_desc_aau *aau;
166 struct iop3xx_desc_dma *dma;
167 struct iop3xx_desc_pq_xor *pq_xor;
168 struct iop3xx_desc_dual_xor *dual_xor;
169 void *ptr;
170 };
171
172 /* No support for p+q operations */
173 static inline int
iop_chan_pq_slot_count(size_t len,int src_cnt,int * slots_per_op)174 iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
175 {
176 BUG();
177 return 0;
178 }
179
180 static inline void
iop_desc_init_pq(struct iop_adma_desc_slot * desc,int src_cnt,unsigned long flags)181 iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
182 unsigned long flags)
183 {
184 BUG();
185 }
186
187 static inline void
iop_desc_set_pq_addr(struct iop_adma_desc_slot * desc,dma_addr_t * addr)188 iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
189 {
190 BUG();
191 }
192
193 static inline void
iop_desc_set_pq_src_addr(struct iop_adma_desc_slot * desc,int src_idx,dma_addr_t addr,unsigned char coef)194 iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
195 dma_addr_t addr, unsigned char coef)
196 {
197 BUG();
198 }
199
200 static inline int
iop_chan_pq_zero_sum_slot_count(size_t len,int src_cnt,int * slots_per_op)201 iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
202 {
203 BUG();
204 return 0;
205 }
206
207 static inline void
iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot * desc,int src_cnt,unsigned long flags)208 iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
209 unsigned long flags)
210 {
211 BUG();
212 }
213
214 static inline void
iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot * desc,u32 len)215 iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
216 {
217 BUG();
218 }
219
220 #define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
221
222 static inline void
iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot * desc,int pq_idx,dma_addr_t * src)223 iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
224 dma_addr_t *src)
225 {
226 BUG();
227 }
228
iop_adma_get_max_xor(void)229 static inline int iop_adma_get_max_xor(void)
230 {
231 return 32;
232 }
233
iop_adma_get_max_pq(void)234 static inline int iop_adma_get_max_pq(void)
235 {
236 BUG();
237 return 0;
238 }
239
iop_chan_get_current_descriptor(struct iop_adma_chan * chan)240 static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
241 {
242 int id = chan->device->id;
243
244 switch (id) {
245 case DMA0_ID:
246 case DMA1_ID:
247 return __raw_readl(DMA_DAR(chan));
248 case AAU_ID:
249 return __raw_readl(AAU_ADAR(chan));
250 default:
251 BUG();
252 }
253 return 0;
254 }
255
iop_chan_set_next_descriptor(struct iop_adma_chan * chan,u32 next_desc_addr)256 static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
257 u32 next_desc_addr)
258 {
259 int id = chan->device->id;
260
261 switch (id) {
262 case DMA0_ID:
263 case DMA1_ID:
264 __raw_writel(next_desc_addr, DMA_NDAR(chan));
265 break;
266 case AAU_ID:
267 __raw_writel(next_desc_addr, AAU_ANDAR(chan));
268 break;
269 }
270
271 }
272
273 #define IOP_ADMA_STATUS_BUSY (1 << 10)
274 #define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
275 #define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
276 #define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
277
iop_chan_is_busy(struct iop_adma_chan * chan)278 static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
279 {
280 u32 status = __raw_readl(DMA_CSR(chan));
281 return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
282 }
283
iop_desc_is_aligned(struct iop_adma_desc_slot * desc,int num_slots)284 static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
285 int num_slots)
286 {
287 /* num_slots will only ever be 1, 2, 4, or 8 */
288 return (desc->idx & (num_slots - 1)) ? 0 : 1;
289 }
290
291 /* to do: support large (i.e. > hw max) buffer sizes */
iop_chan_memcpy_slot_count(size_t len,int * slots_per_op)292 static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
293 {
294 *slots_per_op = 1;
295 return 1;
296 }
297
298 /* to do: support large (i.e. > hw max) buffer sizes */
iop_chan_memset_slot_count(size_t len,int * slots_per_op)299 static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
300 {
301 *slots_per_op = 1;
302 return 1;
303 }
304
iop3xx_aau_xor_slot_count(size_t len,int src_cnt,int * slots_per_op)305 static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
306 int *slots_per_op)
307 {
308 static const char slot_count_table[] = {
309 1, 1, 1, 1, /* 01 - 04 */
310 2, 2, 2, 2, /* 05 - 08 */
311 4, 4, 4, 4, /* 09 - 12 */
312 4, 4, 4, 4, /* 13 - 16 */
313 8, 8, 8, 8, /* 17 - 20 */
314 8, 8, 8, 8, /* 21 - 24 */
315 8, 8, 8, 8, /* 25 - 28 */
316 8, 8, 8, 8, /* 29 - 32 */
317 };
318 *slots_per_op = slot_count_table[src_cnt - 1];
319 return *slots_per_op;
320 }
321
322 static inline int
iop_chan_interrupt_slot_count(int * slots_per_op,struct iop_adma_chan * chan)323 iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
324 {
325 switch (chan->device->id) {
326 case DMA0_ID:
327 case DMA1_ID:
328 return iop_chan_memcpy_slot_count(0, slots_per_op);
329 case AAU_ID:
330 return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
331 default:
332 BUG();
333 }
334 return 0;
335 }
336
iop_chan_xor_slot_count(size_t len,int src_cnt,int * slots_per_op)337 static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
338 int *slots_per_op)
339 {
340 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
341
342 if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
343 return slot_cnt;
344
345 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
346 while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
347 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
348 slot_cnt += *slots_per_op;
349 }
350
351 slot_cnt += *slots_per_op;
352
353 return slot_cnt;
354 }
355
356 /* zero sum on iop3xx is limited to 1k at a time so it requires multiple
357 * descriptors
358 */
iop_chan_zero_sum_slot_count(size_t len,int src_cnt,int * slots_per_op)359 static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
360 int *slots_per_op)
361 {
362 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
363
364 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
365 return slot_cnt;
366
367 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
368 while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
369 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
370 slot_cnt += *slots_per_op;
371 }
372
373 slot_cnt += *slots_per_op;
374
375 return slot_cnt;
376 }
377
iop_desc_get_byte_count(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan)378 static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
379 struct iop_adma_chan *chan)
380 {
381 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
382
383 switch (chan->device->id) {
384 case DMA0_ID:
385 case DMA1_ID:
386 return hw_desc.dma->byte_count;
387 case AAU_ID:
388 return hw_desc.aau->byte_count;
389 default:
390 BUG();
391 }
392 return 0;
393 }
394
395 /* translate the src_idx to a descriptor word index */
__desc_idx(int src_idx)396 static inline int __desc_idx(int src_idx)
397 {
398 static const int desc_idx_table[] = { 0, 0, 0, 0,
399 0, 1, 2, 3,
400 5, 6, 7, 8,
401 9, 10, 11, 12,
402 14, 15, 16, 17,
403 18, 19, 20, 21,
404 23, 24, 25, 26,
405 27, 28, 29, 30,
406 };
407
408 return desc_idx_table[src_idx];
409 }
410
iop_desc_get_src_addr(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan,int src_idx)411 static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
412 struct iop_adma_chan *chan,
413 int src_idx)
414 {
415 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
416
417 switch (chan->device->id) {
418 case DMA0_ID:
419 case DMA1_ID:
420 return hw_desc.dma->src_addr;
421 case AAU_ID:
422 break;
423 default:
424 BUG();
425 }
426
427 if (src_idx < 4)
428 return hw_desc.aau->src[src_idx];
429 else
430 return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
431 }
432
iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau * hw_desc,int src_idx,dma_addr_t addr)433 static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
434 int src_idx, dma_addr_t addr)
435 {
436 if (src_idx < 4)
437 hw_desc->src[src_idx] = addr;
438 else
439 hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
440 }
441
442 static inline void
iop_desc_init_memcpy(struct iop_adma_desc_slot * desc,unsigned long flags)443 iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
444 {
445 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
446 union {
447 u32 value;
448 struct iop3xx_dma_desc_ctrl field;
449 } u_desc_ctrl;
450
451 u_desc_ctrl.value = 0;
452 u_desc_ctrl.field.mem_to_mem_en = 1;
453 u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
454 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
455 hw_desc->desc_ctrl = u_desc_ctrl.value;
456 hw_desc->upper_pci_src_addr = 0;
457 hw_desc->crc_addr = 0;
458 }
459
460 static inline void
iop_desc_init_memset(struct iop_adma_desc_slot * desc,unsigned long flags)461 iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
462 {
463 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
464 union {
465 u32 value;
466 struct iop3xx_aau_desc_ctrl field;
467 } u_desc_ctrl;
468
469 u_desc_ctrl.value = 0;
470 u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
471 u_desc_ctrl.field.dest_write_en = 1;
472 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
473 hw_desc->desc_ctrl = u_desc_ctrl.value;
474 }
475
476 static inline u32
iop3xx_desc_init_xor(struct iop3xx_desc_aau * hw_desc,int src_cnt,unsigned long flags)477 iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
478 unsigned long flags)
479 {
480 int i, shift;
481 u32 edcr;
482 union {
483 u32 value;
484 struct iop3xx_aau_desc_ctrl field;
485 } u_desc_ctrl;
486
487 u_desc_ctrl.value = 0;
488 switch (src_cnt) {
489 case 25 ... 32:
490 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
491 edcr = 0;
492 shift = 1;
493 for (i = 24; i < src_cnt; i++) {
494 edcr |= (1 << shift);
495 shift += 3;
496 }
497 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
498 src_cnt = 24;
499 /* fall through */
500 case 17 ... 24:
501 if (!u_desc_ctrl.field.blk_ctrl) {
502 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
503 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
504 }
505 edcr = 0;
506 shift = 1;
507 for (i = 16; i < src_cnt; i++) {
508 edcr |= (1 << shift);
509 shift += 3;
510 }
511 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
512 src_cnt = 16;
513 /* fall through */
514 case 9 ... 16:
515 if (!u_desc_ctrl.field.blk_ctrl)
516 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
517 edcr = 0;
518 shift = 1;
519 for (i = 8; i < src_cnt; i++) {
520 edcr |= (1 << shift);
521 shift += 3;
522 }
523 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
524 src_cnt = 8;
525 /* fall through */
526 case 2 ... 8:
527 shift = 1;
528 for (i = 0; i < src_cnt; i++) {
529 u_desc_ctrl.value |= (1 << shift);
530 shift += 3;
531 }
532
533 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
534 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
535 }
536
537 u_desc_ctrl.field.dest_write_en = 1;
538 u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
539 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
540 hw_desc->desc_ctrl = u_desc_ctrl.value;
541
542 return u_desc_ctrl.value;
543 }
544
545 static inline void
iop_desc_init_xor(struct iop_adma_desc_slot * desc,int src_cnt,unsigned long flags)546 iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
547 unsigned long flags)
548 {
549 iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
550 }
551
552 /* return the number of operations */
553 static inline int
iop_desc_init_zero_sum(struct iop_adma_desc_slot * desc,int src_cnt,unsigned long flags)554 iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
555 unsigned long flags)
556 {
557 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
558 struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
559 union {
560 u32 value;
561 struct iop3xx_aau_desc_ctrl field;
562 } u_desc_ctrl;
563 int i, j;
564
565 hw_desc = desc->hw_desc;
566
567 for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
568 i += slots_per_op, j++) {
569 iter = iop_hw_desc_slot_idx(hw_desc, i);
570 u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
571 u_desc_ctrl.field.dest_write_en = 0;
572 u_desc_ctrl.field.zero_result_en = 1;
573 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
574 iter->desc_ctrl = u_desc_ctrl.value;
575
576 /* for the subsequent descriptors preserve the store queue
577 * and chain them together
578 */
579 if (i) {
580 prev_hw_desc =
581 iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
582 prev_hw_desc->next_desc =
583 (u32) (desc->async_tx.phys + (i << 5));
584 }
585 }
586
587 return j;
588 }
589
590 static inline void
iop_desc_init_null_xor(struct iop_adma_desc_slot * desc,int src_cnt,unsigned long flags)591 iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
592 unsigned long flags)
593 {
594 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
595 union {
596 u32 value;
597 struct iop3xx_aau_desc_ctrl field;
598 } u_desc_ctrl;
599
600 u_desc_ctrl.value = 0;
601 switch (src_cnt) {
602 case 25 ... 32:
603 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
604 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
605 /* fall through */
606 case 17 ... 24:
607 if (!u_desc_ctrl.field.blk_ctrl) {
608 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
609 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
610 }
611 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
612 /* fall through */
613 case 9 ... 16:
614 if (!u_desc_ctrl.field.blk_ctrl)
615 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
616 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
617 /* fall through */
618 case 1 ... 8:
619 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
620 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
621 }
622
623 u_desc_ctrl.field.dest_write_en = 0;
624 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
625 hw_desc->desc_ctrl = u_desc_ctrl.value;
626 }
627
iop_desc_set_byte_count(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan,u32 byte_count)628 static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
629 struct iop_adma_chan *chan,
630 u32 byte_count)
631 {
632 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
633
634 switch (chan->device->id) {
635 case DMA0_ID:
636 case DMA1_ID:
637 hw_desc.dma->byte_count = byte_count;
638 break;
639 case AAU_ID:
640 hw_desc.aau->byte_count = byte_count;
641 break;
642 default:
643 BUG();
644 }
645 }
646
647 static inline void
iop_desc_init_interrupt(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan)648 iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
649 struct iop_adma_chan *chan)
650 {
651 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
652
653 switch (chan->device->id) {
654 case DMA0_ID:
655 case DMA1_ID:
656 iop_desc_init_memcpy(desc, 1);
657 hw_desc.dma->byte_count = 0;
658 hw_desc.dma->dest_addr = 0;
659 hw_desc.dma->src_addr = 0;
660 break;
661 case AAU_ID:
662 iop_desc_init_null_xor(desc, 2, 1);
663 hw_desc.aau->byte_count = 0;
664 hw_desc.aau->dest_addr = 0;
665 hw_desc.aau->src[0] = 0;
666 hw_desc.aau->src[1] = 0;
667 break;
668 default:
669 BUG();
670 }
671 }
672
673 static inline void
iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot * desc,u32 len)674 iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
675 {
676 int slots_per_op = desc->slots_per_op;
677 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
678 int i = 0;
679
680 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
681 hw_desc->byte_count = len;
682 } else {
683 do {
684 iter = iop_hw_desc_slot_idx(hw_desc, i);
685 iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
686 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
687 i += slots_per_op;
688 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
689
690 iter = iop_hw_desc_slot_idx(hw_desc, i);
691 iter->byte_count = len;
692 }
693 }
694
iop_desc_set_dest_addr(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan,dma_addr_t addr)695 static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
696 struct iop_adma_chan *chan,
697 dma_addr_t addr)
698 {
699 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
700
701 switch (chan->device->id) {
702 case DMA0_ID:
703 case DMA1_ID:
704 hw_desc.dma->dest_addr = addr;
705 break;
706 case AAU_ID:
707 hw_desc.aau->dest_addr = addr;
708 break;
709 default:
710 BUG();
711 }
712 }
713
iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot * desc,dma_addr_t addr)714 static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
715 dma_addr_t addr)
716 {
717 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
718 hw_desc->src_addr = addr;
719 }
720
721 static inline void
iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot * desc,int src_idx,dma_addr_t addr)722 iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
723 dma_addr_t addr)
724 {
725
726 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
727 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
728 int i;
729
730 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
731 i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
732 iter = iop_hw_desc_slot_idx(hw_desc, i);
733 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
734 }
735 }
736
iop_desc_set_xor_src_addr(struct iop_adma_desc_slot * desc,int src_idx,dma_addr_t addr)737 static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
738 int src_idx, dma_addr_t addr)
739 {
740
741 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
742 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
743 int i;
744
745 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
746 i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
747 iter = iop_hw_desc_slot_idx(hw_desc, i);
748 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
749 }
750 }
751
iop_desc_set_next_desc(struct iop_adma_desc_slot * desc,u32 next_desc_addr)752 static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
753 u32 next_desc_addr)
754 {
755 /* hw_desc->next_desc is the same location for all channels */
756 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
757
758 iop_paranoia(hw_desc.dma->next_desc);
759 hw_desc.dma->next_desc = next_desc_addr;
760 }
761
iop_desc_get_next_desc(struct iop_adma_desc_slot * desc)762 static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
763 {
764 /* hw_desc->next_desc is the same location for all channels */
765 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
766 return hw_desc.dma->next_desc;
767 }
768
iop_desc_clear_next_desc(struct iop_adma_desc_slot * desc)769 static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
770 {
771 /* hw_desc->next_desc is the same location for all channels */
772 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
773 hw_desc.dma->next_desc = 0;
774 }
775
iop_desc_set_block_fill_val(struct iop_adma_desc_slot * desc,u32 val)776 static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
777 u32 val)
778 {
779 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
780 hw_desc->src[0] = val;
781 }
782
783 static inline enum sum_check_flags
iop_desc_get_zero_result(struct iop_adma_desc_slot * desc)784 iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
785 {
786 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
787 struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
788
789 iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
790 return desc_ctrl.zero_result_err << SUM_CHECK_P;
791 }
792
iop_chan_append(struct iop_adma_chan * chan)793 static inline void iop_chan_append(struct iop_adma_chan *chan)
794 {
795 u32 dma_chan_ctrl;
796
797 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
798 dma_chan_ctrl |= 0x2;
799 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
800 }
801
iop_chan_get_status(struct iop_adma_chan * chan)802 static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
803 {
804 return __raw_readl(DMA_CSR(chan));
805 }
806
iop_chan_disable(struct iop_adma_chan * chan)807 static inline void iop_chan_disable(struct iop_adma_chan *chan)
808 {
809 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
810 dma_chan_ctrl &= ~1;
811 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
812 }
813
iop_chan_enable(struct iop_adma_chan * chan)814 static inline void iop_chan_enable(struct iop_adma_chan *chan)
815 {
816 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
817
818 dma_chan_ctrl |= 1;
819 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
820 }
821
iop_adma_device_clear_eot_status(struct iop_adma_chan * chan)822 static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
823 {
824 u32 status = __raw_readl(DMA_CSR(chan));
825 status &= (1 << 9);
826 __raw_writel(status, DMA_CSR(chan));
827 }
828
iop_adma_device_clear_eoc_status(struct iop_adma_chan * chan)829 static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
830 {
831 u32 status = __raw_readl(DMA_CSR(chan));
832 status &= (1 << 8);
833 __raw_writel(status, DMA_CSR(chan));
834 }
835
iop_adma_device_clear_err_status(struct iop_adma_chan * chan)836 static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
837 {
838 u32 status = __raw_readl(DMA_CSR(chan));
839
840 switch (chan->device->id) {
841 case DMA0_ID:
842 case DMA1_ID:
843 status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
844 break;
845 case AAU_ID:
846 status &= (1 << 5);
847 break;
848 default:
849 BUG();
850 }
851
852 __raw_writel(status, DMA_CSR(chan));
853 }
854
855 static inline int
iop_is_err_int_parity(unsigned long status,struct iop_adma_chan * chan)856 iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
857 {
858 return 0;
859 }
860
861 static inline int
iop_is_err_mcu_abort(unsigned long status,struct iop_adma_chan * chan)862 iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
863 {
864 return 0;
865 }
866
867 static inline int
iop_is_err_int_tabort(unsigned long status,struct iop_adma_chan * chan)868 iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
869 {
870 return 0;
871 }
872
873 static inline int
iop_is_err_int_mabort(unsigned long status,struct iop_adma_chan * chan)874 iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
875 {
876 return test_bit(5, &status);
877 }
878
879 static inline int
iop_is_err_pci_tabort(unsigned long status,struct iop_adma_chan * chan)880 iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
881 {
882 switch (chan->device->id) {
883 case DMA0_ID:
884 case DMA1_ID:
885 return test_bit(2, &status);
886 default:
887 return 0;
888 }
889 }
890
891 static inline int
iop_is_err_pci_mabort(unsigned long status,struct iop_adma_chan * chan)892 iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
893 {
894 switch (chan->device->id) {
895 case DMA0_ID:
896 case DMA1_ID:
897 return test_bit(3, &status);
898 default:
899 return 0;
900 }
901 }
902
903 static inline int
iop_is_err_split_tx(unsigned long status,struct iop_adma_chan * chan)904 iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
905 {
906 switch (chan->device->id) {
907 case DMA0_ID:
908 case DMA1_ID:
909 return test_bit(1, &status);
910 default:
911 return 0;
912 }
913 }
914 #endif /* _ADMA_H */
915