1 /*
2 * Copyright © 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 */
18 #ifndef _ADMA_H
19 #define _ADMA_H
20 #include <linux/types.h>
21 #include <linux/io.h>
22 #include <mach/hardware.h>
23 #include <asm/hardware/iop_adma.h>
24
25 /* Memory copy units */
26 #define DMA_CCR(chan) (chan->mmr_base + 0x0)
27 #define DMA_CSR(chan) (chan->mmr_base + 0x4)
28 #define DMA_DAR(chan) (chan->mmr_base + 0xc)
29 #define DMA_NDAR(chan) (chan->mmr_base + 0x10)
30 #define DMA_PADR(chan) (chan->mmr_base + 0x14)
31 #define DMA_PUADR(chan) (chan->mmr_base + 0x18)
32 #define DMA_LADR(chan) (chan->mmr_base + 0x1c)
33 #define DMA_BCR(chan) (chan->mmr_base + 0x20)
34 #define DMA_DCR(chan) (chan->mmr_base + 0x24)
35
36 /* Application accelerator unit */
37 #define AAU_ACR(chan) (chan->mmr_base + 0x0)
38 #define AAU_ASR(chan) (chan->mmr_base + 0x4)
39 #define AAU_ADAR(chan) (chan->mmr_base + 0x8)
40 #define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
41 #define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
42 #define AAU_DAR(chan) (chan->mmr_base + 0x20)
43 #define AAU_ABCR(chan) (chan->mmr_base + 0x24)
44 #define AAU_ADCR(chan) (chan->mmr_base + 0x28)
45 #define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
46 #define AAU_EDCR0_IDX 8
47 #define AAU_EDCR1_IDX 17
48 #define AAU_EDCR2_IDX 26
49
50 #define DMA0_ID 0
51 #define DMA1_ID 1
52 #define AAU_ID 2
53
54 struct iop3xx_aau_desc_ctrl {
55 unsigned int int_en:1;
56 unsigned int blk1_cmd_ctrl:3;
57 unsigned int blk2_cmd_ctrl:3;
58 unsigned int blk3_cmd_ctrl:3;
59 unsigned int blk4_cmd_ctrl:3;
60 unsigned int blk5_cmd_ctrl:3;
61 unsigned int blk6_cmd_ctrl:3;
62 unsigned int blk7_cmd_ctrl:3;
63 unsigned int blk8_cmd_ctrl:3;
64 unsigned int blk_ctrl:2;
65 unsigned int dual_xor_en:1;
66 unsigned int tx_complete:1;
67 unsigned int zero_result_err:1;
68 unsigned int zero_result_en:1;
69 unsigned int dest_write_en:1;
70 };
71
72 struct iop3xx_aau_e_desc_ctrl {
73 unsigned int reserved:1;
74 unsigned int blk1_cmd_ctrl:3;
75 unsigned int blk2_cmd_ctrl:3;
76 unsigned int blk3_cmd_ctrl:3;
77 unsigned int blk4_cmd_ctrl:3;
78 unsigned int blk5_cmd_ctrl:3;
79 unsigned int blk6_cmd_ctrl:3;
80 unsigned int blk7_cmd_ctrl:3;
81 unsigned int blk8_cmd_ctrl:3;
82 unsigned int reserved2:7;
83 };
84
85 struct iop3xx_dma_desc_ctrl {
86 unsigned int pci_transaction:4;
87 unsigned int int_en:1;
88 unsigned int dac_cycle_en:1;
89 unsigned int mem_to_mem_en:1;
90 unsigned int crc_data_tx_en:1;
91 unsigned int crc_gen_en:1;
92 unsigned int crc_seed_dis:1;
93 unsigned int reserved:21;
94 unsigned int crc_tx_complete:1;
95 };
96
97 struct iop3xx_desc_dma {
98 u32 next_desc;
99 union {
100 u32 pci_src_addr;
101 u32 pci_dest_addr;
102 u32 src_addr;
103 };
104 union {
105 u32 upper_pci_src_addr;
106 u32 upper_pci_dest_addr;
107 };
108 union {
109 u32 local_pci_src_addr;
110 u32 local_pci_dest_addr;
111 u32 dest_addr;
112 };
113 u32 byte_count;
114 union {
115 u32 desc_ctrl;
116 struct iop3xx_dma_desc_ctrl desc_ctrl_field;
117 };
118 u32 crc_addr;
119 };
120
121 struct iop3xx_desc_aau {
122 u32 next_desc;
123 u32 src[4];
124 u32 dest_addr;
125 u32 byte_count;
126 union {
127 u32 desc_ctrl;
128 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
129 };
130 union {
131 u32 src_addr;
132 u32 e_desc_ctrl;
133 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
134 } src_edc[31];
135 };
136
137 struct iop3xx_aau_gfmr {
138 unsigned int gfmr1:8;
139 unsigned int gfmr2:8;
140 unsigned int gfmr3:8;
141 unsigned int gfmr4:8;
142 };
143
144 struct iop3xx_desc_pq_xor {
145 u32 next_desc;
146 u32 src[3];
147 union {
148 u32 data_mult1;
149 struct iop3xx_aau_gfmr data_mult1_field;
150 };
151 u32 dest_addr;
152 u32 byte_count;
153 union {
154 u32 desc_ctrl;
155 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
156 };
157 union {
158 u32 src_addr;
159 u32 e_desc_ctrl;
160 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
161 u32 data_multiplier;
162 struct iop3xx_aau_gfmr data_mult_field;
163 u32 reserved;
164 } src_edc_gfmr[19];
165 };
166
167 struct iop3xx_desc_dual_xor {
168 u32 next_desc;
169 u32 src0_addr;
170 u32 src1_addr;
171 u32 h_src_addr;
172 u32 d_src_addr;
173 u32 h_dest_addr;
174 u32 byte_count;
175 union {
176 u32 desc_ctrl;
177 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
178 };
179 u32 d_dest_addr;
180 };
181
182 union iop3xx_desc {
183 struct iop3xx_desc_aau *aau;
184 struct iop3xx_desc_dma *dma;
185 struct iop3xx_desc_pq_xor *pq_xor;
186 struct iop3xx_desc_dual_xor *dual_xor;
187 void *ptr;
188 };
189
190 /* No support for p+q operations */
191 static inline int
iop_chan_pq_slot_count(size_t len,int src_cnt,int * slots_per_op)192 iop_chan_pq_slot_count(size_t len, int src_cnt, int *slots_per_op)
193 {
194 BUG();
195 return 0;
196 }
197
198 static inline void
iop_desc_init_pq(struct iop_adma_desc_slot * desc,int src_cnt,unsigned long flags)199 iop_desc_init_pq(struct iop_adma_desc_slot *desc, int src_cnt,
200 unsigned long flags)
201 {
202 BUG();
203 }
204
205 static inline void
iop_desc_set_pq_addr(struct iop_adma_desc_slot * desc,dma_addr_t * addr)206 iop_desc_set_pq_addr(struct iop_adma_desc_slot *desc, dma_addr_t *addr)
207 {
208 BUG();
209 }
210
211 static inline void
iop_desc_set_pq_src_addr(struct iop_adma_desc_slot * desc,int src_idx,dma_addr_t addr,unsigned char coef)212 iop_desc_set_pq_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
213 dma_addr_t addr, unsigned char coef)
214 {
215 BUG();
216 }
217
218 static inline int
iop_chan_pq_zero_sum_slot_count(size_t len,int src_cnt,int * slots_per_op)219 iop_chan_pq_zero_sum_slot_count(size_t len, int src_cnt, int *slots_per_op)
220 {
221 BUG();
222 return 0;
223 }
224
225 static inline void
iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot * desc,int src_cnt,unsigned long flags)226 iop_desc_init_pq_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
227 unsigned long flags)
228 {
229 BUG();
230 }
231
232 static inline void
iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot * desc,u32 len)233 iop_desc_set_pq_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
234 {
235 BUG();
236 }
237
238 #define iop_desc_set_pq_zero_sum_src_addr iop_desc_set_pq_src_addr
239
240 static inline void
iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot * desc,int pq_idx,dma_addr_t * src)241 iop_desc_set_pq_zero_sum_addr(struct iop_adma_desc_slot *desc, int pq_idx,
242 dma_addr_t *src)
243 {
244 BUG();
245 }
246
iop_adma_get_max_xor(void)247 static inline int iop_adma_get_max_xor(void)
248 {
249 return 32;
250 }
251
iop_adma_get_max_pq(void)252 static inline int iop_adma_get_max_pq(void)
253 {
254 BUG();
255 return 0;
256 }
257
iop_chan_get_current_descriptor(struct iop_adma_chan * chan)258 static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
259 {
260 int id = chan->device->id;
261
262 switch (id) {
263 case DMA0_ID:
264 case DMA1_ID:
265 return __raw_readl(DMA_DAR(chan));
266 case AAU_ID:
267 return __raw_readl(AAU_ADAR(chan));
268 default:
269 BUG();
270 }
271 return 0;
272 }
273
iop_chan_set_next_descriptor(struct iop_adma_chan * chan,u32 next_desc_addr)274 static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
275 u32 next_desc_addr)
276 {
277 int id = chan->device->id;
278
279 switch (id) {
280 case DMA0_ID:
281 case DMA1_ID:
282 __raw_writel(next_desc_addr, DMA_NDAR(chan));
283 break;
284 case AAU_ID:
285 __raw_writel(next_desc_addr, AAU_ANDAR(chan));
286 break;
287 }
288
289 }
290
291 #define IOP_ADMA_STATUS_BUSY (1 << 10)
292 #define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
293 #define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
294 #define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
295
iop_chan_is_busy(struct iop_adma_chan * chan)296 static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
297 {
298 u32 status = __raw_readl(DMA_CSR(chan));
299 return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
300 }
301
iop_desc_is_aligned(struct iop_adma_desc_slot * desc,int num_slots)302 static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
303 int num_slots)
304 {
305 /* num_slots will only ever be 1, 2, 4, or 8 */
306 return (desc->idx & (num_slots - 1)) ? 0 : 1;
307 }
308
309 /* to do: support large (i.e. > hw max) buffer sizes */
iop_chan_memcpy_slot_count(size_t len,int * slots_per_op)310 static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
311 {
312 *slots_per_op = 1;
313 return 1;
314 }
315
316 /* to do: support large (i.e. > hw max) buffer sizes */
iop_chan_memset_slot_count(size_t len,int * slots_per_op)317 static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
318 {
319 *slots_per_op = 1;
320 return 1;
321 }
322
iop3xx_aau_xor_slot_count(size_t len,int src_cnt,int * slots_per_op)323 static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
324 int *slots_per_op)
325 {
326 static const char slot_count_table[] = {
327 1, 1, 1, 1, /* 01 - 04 */
328 2, 2, 2, 2, /* 05 - 08 */
329 4, 4, 4, 4, /* 09 - 12 */
330 4, 4, 4, 4, /* 13 - 16 */
331 8, 8, 8, 8, /* 17 - 20 */
332 8, 8, 8, 8, /* 21 - 24 */
333 8, 8, 8, 8, /* 25 - 28 */
334 8, 8, 8, 8, /* 29 - 32 */
335 };
336 *slots_per_op = slot_count_table[src_cnt - 1];
337 return *slots_per_op;
338 }
339
340 static inline int
iop_chan_interrupt_slot_count(int * slots_per_op,struct iop_adma_chan * chan)341 iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
342 {
343 switch (chan->device->id) {
344 case DMA0_ID:
345 case DMA1_ID:
346 return iop_chan_memcpy_slot_count(0, slots_per_op);
347 case AAU_ID:
348 return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
349 default:
350 BUG();
351 }
352 return 0;
353 }
354
iop_chan_xor_slot_count(size_t len,int src_cnt,int * slots_per_op)355 static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
356 int *slots_per_op)
357 {
358 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
359
360 if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
361 return slot_cnt;
362
363 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
364 while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
365 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
366 slot_cnt += *slots_per_op;
367 }
368
369 slot_cnt += *slots_per_op;
370
371 return slot_cnt;
372 }
373
374 /* zero sum on iop3xx is limited to 1k at a time so it requires multiple
375 * descriptors
376 */
iop_chan_zero_sum_slot_count(size_t len,int src_cnt,int * slots_per_op)377 static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
378 int *slots_per_op)
379 {
380 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
381
382 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
383 return slot_cnt;
384
385 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
386 while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
387 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
388 slot_cnt += *slots_per_op;
389 }
390
391 slot_cnt += *slots_per_op;
392
393 return slot_cnt;
394 }
395
iop_desc_is_pq(struct iop_adma_desc_slot * desc)396 static inline int iop_desc_is_pq(struct iop_adma_desc_slot *desc)
397 {
398 return 0;
399 }
400
iop_desc_get_dest_addr(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan)401 static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
402 struct iop_adma_chan *chan)
403 {
404 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
405
406 switch (chan->device->id) {
407 case DMA0_ID:
408 case DMA1_ID:
409 return hw_desc.dma->dest_addr;
410 case AAU_ID:
411 return hw_desc.aau->dest_addr;
412 default:
413 BUG();
414 }
415 return 0;
416 }
417
418
iop_desc_get_qdest_addr(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan)419 static inline u32 iop_desc_get_qdest_addr(struct iop_adma_desc_slot *desc,
420 struct iop_adma_chan *chan)
421 {
422 BUG();
423 return 0;
424 }
425
iop_desc_get_byte_count(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan)426 static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
427 struct iop_adma_chan *chan)
428 {
429 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
430
431 switch (chan->device->id) {
432 case DMA0_ID:
433 case DMA1_ID:
434 return hw_desc.dma->byte_count;
435 case AAU_ID:
436 return hw_desc.aau->byte_count;
437 default:
438 BUG();
439 }
440 return 0;
441 }
442
443 /* translate the src_idx to a descriptor word index */
__desc_idx(int src_idx)444 static inline int __desc_idx(int src_idx)
445 {
446 static const int desc_idx_table[] = { 0, 0, 0, 0,
447 0, 1, 2, 3,
448 5, 6, 7, 8,
449 9, 10, 11, 12,
450 14, 15, 16, 17,
451 18, 19, 20, 21,
452 23, 24, 25, 26,
453 27, 28, 29, 30,
454 };
455
456 return desc_idx_table[src_idx];
457 }
458
iop_desc_get_src_addr(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan,int src_idx)459 static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
460 struct iop_adma_chan *chan,
461 int src_idx)
462 {
463 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
464
465 switch (chan->device->id) {
466 case DMA0_ID:
467 case DMA1_ID:
468 return hw_desc.dma->src_addr;
469 case AAU_ID:
470 break;
471 default:
472 BUG();
473 }
474
475 if (src_idx < 4)
476 return hw_desc.aau->src[src_idx];
477 else
478 return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
479 }
480
iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau * hw_desc,int src_idx,dma_addr_t addr)481 static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
482 int src_idx, dma_addr_t addr)
483 {
484 if (src_idx < 4)
485 hw_desc->src[src_idx] = addr;
486 else
487 hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
488 }
489
490 static inline void
iop_desc_init_memcpy(struct iop_adma_desc_slot * desc,unsigned long flags)491 iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
492 {
493 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
494 union {
495 u32 value;
496 struct iop3xx_dma_desc_ctrl field;
497 } u_desc_ctrl;
498
499 u_desc_ctrl.value = 0;
500 u_desc_ctrl.field.mem_to_mem_en = 1;
501 u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
502 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
503 hw_desc->desc_ctrl = u_desc_ctrl.value;
504 hw_desc->upper_pci_src_addr = 0;
505 hw_desc->crc_addr = 0;
506 }
507
508 static inline void
iop_desc_init_memset(struct iop_adma_desc_slot * desc,unsigned long flags)509 iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
510 {
511 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
512 union {
513 u32 value;
514 struct iop3xx_aau_desc_ctrl field;
515 } u_desc_ctrl;
516
517 u_desc_ctrl.value = 0;
518 u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
519 u_desc_ctrl.field.dest_write_en = 1;
520 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
521 hw_desc->desc_ctrl = u_desc_ctrl.value;
522 }
523
524 static inline u32
iop3xx_desc_init_xor(struct iop3xx_desc_aau * hw_desc,int src_cnt,unsigned long flags)525 iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
526 unsigned long flags)
527 {
528 int i, shift;
529 u32 edcr;
530 union {
531 u32 value;
532 struct iop3xx_aau_desc_ctrl field;
533 } u_desc_ctrl;
534
535 u_desc_ctrl.value = 0;
536 switch (src_cnt) {
537 case 25 ... 32:
538 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
539 edcr = 0;
540 shift = 1;
541 for (i = 24; i < src_cnt; i++) {
542 edcr |= (1 << shift);
543 shift += 3;
544 }
545 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
546 src_cnt = 24;
547 /* fall through */
548 case 17 ... 24:
549 if (!u_desc_ctrl.field.blk_ctrl) {
550 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
551 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
552 }
553 edcr = 0;
554 shift = 1;
555 for (i = 16; i < src_cnt; i++) {
556 edcr |= (1 << shift);
557 shift += 3;
558 }
559 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
560 src_cnt = 16;
561 /* fall through */
562 case 9 ... 16:
563 if (!u_desc_ctrl.field.blk_ctrl)
564 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
565 edcr = 0;
566 shift = 1;
567 for (i = 8; i < src_cnt; i++) {
568 edcr |= (1 << shift);
569 shift += 3;
570 }
571 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
572 src_cnt = 8;
573 /* fall through */
574 case 2 ... 8:
575 shift = 1;
576 for (i = 0; i < src_cnt; i++) {
577 u_desc_ctrl.value |= (1 << shift);
578 shift += 3;
579 }
580
581 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
582 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
583 }
584
585 u_desc_ctrl.field.dest_write_en = 1;
586 u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
587 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
588 hw_desc->desc_ctrl = u_desc_ctrl.value;
589
590 return u_desc_ctrl.value;
591 }
592
593 static inline void
iop_desc_init_xor(struct iop_adma_desc_slot * desc,int src_cnt,unsigned long flags)594 iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
595 unsigned long flags)
596 {
597 iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
598 }
599
600 /* return the number of operations */
601 static inline int
iop_desc_init_zero_sum(struct iop_adma_desc_slot * desc,int src_cnt,unsigned long flags)602 iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
603 unsigned long flags)
604 {
605 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
606 struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
607 union {
608 u32 value;
609 struct iop3xx_aau_desc_ctrl field;
610 } u_desc_ctrl;
611 int i, j;
612
613 hw_desc = desc->hw_desc;
614
615 for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
616 i += slots_per_op, j++) {
617 iter = iop_hw_desc_slot_idx(hw_desc, i);
618 u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
619 u_desc_ctrl.field.dest_write_en = 0;
620 u_desc_ctrl.field.zero_result_en = 1;
621 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
622 iter->desc_ctrl = u_desc_ctrl.value;
623
624 /* for the subsequent descriptors preserve the store queue
625 * and chain them together
626 */
627 if (i) {
628 prev_hw_desc =
629 iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
630 prev_hw_desc->next_desc =
631 (u32) (desc->async_tx.phys + (i << 5));
632 }
633 }
634
635 return j;
636 }
637
638 static inline void
iop_desc_init_null_xor(struct iop_adma_desc_slot * desc,int src_cnt,unsigned long flags)639 iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
640 unsigned long flags)
641 {
642 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
643 union {
644 u32 value;
645 struct iop3xx_aau_desc_ctrl field;
646 } u_desc_ctrl;
647
648 u_desc_ctrl.value = 0;
649 switch (src_cnt) {
650 case 25 ... 32:
651 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
652 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
653 /* fall through */
654 case 17 ... 24:
655 if (!u_desc_ctrl.field.blk_ctrl) {
656 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
657 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
658 }
659 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
660 /* fall through */
661 case 9 ... 16:
662 if (!u_desc_ctrl.field.blk_ctrl)
663 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
664 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
665 /* fall through */
666 case 1 ... 8:
667 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
668 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
669 }
670
671 u_desc_ctrl.field.dest_write_en = 0;
672 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
673 hw_desc->desc_ctrl = u_desc_ctrl.value;
674 }
675
iop_desc_set_byte_count(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan,u32 byte_count)676 static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
677 struct iop_adma_chan *chan,
678 u32 byte_count)
679 {
680 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
681
682 switch (chan->device->id) {
683 case DMA0_ID:
684 case DMA1_ID:
685 hw_desc.dma->byte_count = byte_count;
686 break;
687 case AAU_ID:
688 hw_desc.aau->byte_count = byte_count;
689 break;
690 default:
691 BUG();
692 }
693 }
694
695 static inline void
iop_desc_init_interrupt(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan)696 iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
697 struct iop_adma_chan *chan)
698 {
699 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
700
701 switch (chan->device->id) {
702 case DMA0_ID:
703 case DMA1_ID:
704 iop_desc_init_memcpy(desc, 1);
705 hw_desc.dma->byte_count = 0;
706 hw_desc.dma->dest_addr = 0;
707 hw_desc.dma->src_addr = 0;
708 break;
709 case AAU_ID:
710 iop_desc_init_null_xor(desc, 2, 1);
711 hw_desc.aau->byte_count = 0;
712 hw_desc.aau->dest_addr = 0;
713 hw_desc.aau->src[0] = 0;
714 hw_desc.aau->src[1] = 0;
715 break;
716 default:
717 BUG();
718 }
719 }
720
721 static inline void
iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot * desc,u32 len)722 iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
723 {
724 int slots_per_op = desc->slots_per_op;
725 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
726 int i = 0;
727
728 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
729 hw_desc->byte_count = len;
730 } else {
731 do {
732 iter = iop_hw_desc_slot_idx(hw_desc, i);
733 iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
734 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
735 i += slots_per_op;
736 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
737
738 iter = iop_hw_desc_slot_idx(hw_desc, i);
739 iter->byte_count = len;
740 }
741 }
742
iop_desc_set_dest_addr(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan,dma_addr_t addr)743 static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
744 struct iop_adma_chan *chan,
745 dma_addr_t addr)
746 {
747 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
748
749 switch (chan->device->id) {
750 case DMA0_ID:
751 case DMA1_ID:
752 hw_desc.dma->dest_addr = addr;
753 break;
754 case AAU_ID:
755 hw_desc.aau->dest_addr = addr;
756 break;
757 default:
758 BUG();
759 }
760 }
761
iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot * desc,dma_addr_t addr)762 static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
763 dma_addr_t addr)
764 {
765 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
766 hw_desc->src_addr = addr;
767 }
768
769 static inline void
iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot * desc,int src_idx,dma_addr_t addr)770 iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
771 dma_addr_t addr)
772 {
773
774 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
775 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
776 int i;
777
778 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
779 i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
780 iter = iop_hw_desc_slot_idx(hw_desc, i);
781 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
782 }
783 }
784
iop_desc_set_xor_src_addr(struct iop_adma_desc_slot * desc,int src_idx,dma_addr_t addr)785 static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
786 int src_idx, dma_addr_t addr)
787 {
788
789 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
790 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
791 int i;
792
793 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
794 i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
795 iter = iop_hw_desc_slot_idx(hw_desc, i);
796 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
797 }
798 }
799
iop_desc_set_next_desc(struct iop_adma_desc_slot * desc,u32 next_desc_addr)800 static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
801 u32 next_desc_addr)
802 {
803 /* hw_desc->next_desc is the same location for all channels */
804 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
805
806 iop_paranoia(hw_desc.dma->next_desc);
807 hw_desc.dma->next_desc = next_desc_addr;
808 }
809
iop_desc_get_next_desc(struct iop_adma_desc_slot * desc)810 static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
811 {
812 /* hw_desc->next_desc is the same location for all channels */
813 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
814 return hw_desc.dma->next_desc;
815 }
816
iop_desc_clear_next_desc(struct iop_adma_desc_slot * desc)817 static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
818 {
819 /* hw_desc->next_desc is the same location for all channels */
820 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
821 hw_desc.dma->next_desc = 0;
822 }
823
iop_desc_set_block_fill_val(struct iop_adma_desc_slot * desc,u32 val)824 static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
825 u32 val)
826 {
827 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
828 hw_desc->src[0] = val;
829 }
830
831 static inline enum sum_check_flags
iop_desc_get_zero_result(struct iop_adma_desc_slot * desc)832 iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
833 {
834 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
835 struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
836
837 iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
838 return desc_ctrl.zero_result_err << SUM_CHECK_P;
839 }
840
iop_chan_append(struct iop_adma_chan * chan)841 static inline void iop_chan_append(struct iop_adma_chan *chan)
842 {
843 u32 dma_chan_ctrl;
844
845 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
846 dma_chan_ctrl |= 0x2;
847 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
848 }
849
iop_chan_get_status(struct iop_adma_chan * chan)850 static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
851 {
852 return __raw_readl(DMA_CSR(chan));
853 }
854
iop_chan_disable(struct iop_adma_chan * chan)855 static inline void iop_chan_disable(struct iop_adma_chan *chan)
856 {
857 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
858 dma_chan_ctrl &= ~1;
859 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
860 }
861
iop_chan_enable(struct iop_adma_chan * chan)862 static inline void iop_chan_enable(struct iop_adma_chan *chan)
863 {
864 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
865
866 dma_chan_ctrl |= 1;
867 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
868 }
869
iop_adma_device_clear_eot_status(struct iop_adma_chan * chan)870 static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
871 {
872 u32 status = __raw_readl(DMA_CSR(chan));
873 status &= (1 << 9);
874 __raw_writel(status, DMA_CSR(chan));
875 }
876
iop_adma_device_clear_eoc_status(struct iop_adma_chan * chan)877 static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
878 {
879 u32 status = __raw_readl(DMA_CSR(chan));
880 status &= (1 << 8);
881 __raw_writel(status, DMA_CSR(chan));
882 }
883
iop_adma_device_clear_err_status(struct iop_adma_chan * chan)884 static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
885 {
886 u32 status = __raw_readl(DMA_CSR(chan));
887
888 switch (chan->device->id) {
889 case DMA0_ID:
890 case DMA1_ID:
891 status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
892 break;
893 case AAU_ID:
894 status &= (1 << 5);
895 break;
896 default:
897 BUG();
898 }
899
900 __raw_writel(status, DMA_CSR(chan));
901 }
902
903 static inline int
iop_is_err_int_parity(unsigned long status,struct iop_adma_chan * chan)904 iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
905 {
906 return 0;
907 }
908
909 static inline int
iop_is_err_mcu_abort(unsigned long status,struct iop_adma_chan * chan)910 iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
911 {
912 return 0;
913 }
914
915 static inline int
iop_is_err_int_tabort(unsigned long status,struct iop_adma_chan * chan)916 iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
917 {
918 return 0;
919 }
920
921 static inline int
iop_is_err_int_mabort(unsigned long status,struct iop_adma_chan * chan)922 iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
923 {
924 return test_bit(5, &status);
925 }
926
927 static inline int
iop_is_err_pci_tabort(unsigned long status,struct iop_adma_chan * chan)928 iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
929 {
930 switch (chan->device->id) {
931 case DMA0_ID:
932 case DMA1_ID:
933 return test_bit(2, &status);
934 default:
935 return 0;
936 }
937 }
938
939 static inline int
iop_is_err_pci_mabort(unsigned long status,struct iop_adma_chan * chan)940 iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
941 {
942 switch (chan->device->id) {
943 case DMA0_ID:
944 case DMA1_ID:
945 return test_bit(3, &status);
946 default:
947 return 0;
948 }
949 }
950
951 static inline int
iop_is_err_split_tx(unsigned long status,struct iop_adma_chan * chan)952 iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
953 {
954 switch (chan->device->id) {
955 case DMA0_ID:
956 case DMA1_ID:
957 return test_bit(1, &status);
958 default:
959 return 0;
960 }
961 }
962 #endif /* _ADMA_H */
963