1 /*
2 * Copyright © 2006, Intel Corporation.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 */
18 #ifndef _ADMA_H
19 #define _ADMA_H
20 #include <linux/types.h>
21 #include <linux/io.h>
22 #include <mach/hardware.h>
23 #include <asm/hardware/iop_adma.h>
24
25 /* Memory copy units */
26 #define DMA_CCR(chan) (chan->mmr_base + 0x0)
27 #define DMA_CSR(chan) (chan->mmr_base + 0x4)
28 #define DMA_DAR(chan) (chan->mmr_base + 0xc)
29 #define DMA_NDAR(chan) (chan->mmr_base + 0x10)
30 #define DMA_PADR(chan) (chan->mmr_base + 0x14)
31 #define DMA_PUADR(chan) (chan->mmr_base + 0x18)
32 #define DMA_LADR(chan) (chan->mmr_base + 0x1c)
33 #define DMA_BCR(chan) (chan->mmr_base + 0x20)
34 #define DMA_DCR(chan) (chan->mmr_base + 0x24)
35
36 /* Application accelerator unit */
37 #define AAU_ACR(chan) (chan->mmr_base + 0x0)
38 #define AAU_ASR(chan) (chan->mmr_base + 0x4)
39 #define AAU_ADAR(chan) (chan->mmr_base + 0x8)
40 #define AAU_ANDAR(chan) (chan->mmr_base + 0xc)
41 #define AAU_SAR(src, chan) (chan->mmr_base + (0x10 + ((src) << 2)))
42 #define AAU_DAR(chan) (chan->mmr_base + 0x20)
43 #define AAU_ABCR(chan) (chan->mmr_base + 0x24)
44 #define AAU_ADCR(chan) (chan->mmr_base + 0x28)
45 #define AAU_SAR_EDCR(src_edc) (chan->mmr_base + (0x02c + ((src_edc-4) << 2)))
46 #define AAU_EDCR0_IDX 8
47 #define AAU_EDCR1_IDX 17
48 #define AAU_EDCR2_IDX 26
49
50 #define DMA0_ID 0
51 #define DMA1_ID 1
52 #define AAU_ID 2
53
54 struct iop3xx_aau_desc_ctrl {
55 unsigned int int_en:1;
56 unsigned int blk1_cmd_ctrl:3;
57 unsigned int blk2_cmd_ctrl:3;
58 unsigned int blk3_cmd_ctrl:3;
59 unsigned int blk4_cmd_ctrl:3;
60 unsigned int blk5_cmd_ctrl:3;
61 unsigned int blk6_cmd_ctrl:3;
62 unsigned int blk7_cmd_ctrl:3;
63 unsigned int blk8_cmd_ctrl:3;
64 unsigned int blk_ctrl:2;
65 unsigned int dual_xor_en:1;
66 unsigned int tx_complete:1;
67 unsigned int zero_result_err:1;
68 unsigned int zero_result_en:1;
69 unsigned int dest_write_en:1;
70 };
71
72 struct iop3xx_aau_e_desc_ctrl {
73 unsigned int reserved:1;
74 unsigned int blk1_cmd_ctrl:3;
75 unsigned int blk2_cmd_ctrl:3;
76 unsigned int blk3_cmd_ctrl:3;
77 unsigned int blk4_cmd_ctrl:3;
78 unsigned int blk5_cmd_ctrl:3;
79 unsigned int blk6_cmd_ctrl:3;
80 unsigned int blk7_cmd_ctrl:3;
81 unsigned int blk8_cmd_ctrl:3;
82 unsigned int reserved2:7;
83 };
84
85 struct iop3xx_dma_desc_ctrl {
86 unsigned int pci_transaction:4;
87 unsigned int int_en:1;
88 unsigned int dac_cycle_en:1;
89 unsigned int mem_to_mem_en:1;
90 unsigned int crc_data_tx_en:1;
91 unsigned int crc_gen_en:1;
92 unsigned int crc_seed_dis:1;
93 unsigned int reserved:21;
94 unsigned int crc_tx_complete:1;
95 };
96
97 struct iop3xx_desc_dma {
98 u32 next_desc;
99 union {
100 u32 pci_src_addr;
101 u32 pci_dest_addr;
102 u32 src_addr;
103 };
104 union {
105 u32 upper_pci_src_addr;
106 u32 upper_pci_dest_addr;
107 };
108 union {
109 u32 local_pci_src_addr;
110 u32 local_pci_dest_addr;
111 u32 dest_addr;
112 };
113 u32 byte_count;
114 union {
115 u32 desc_ctrl;
116 struct iop3xx_dma_desc_ctrl desc_ctrl_field;
117 };
118 u32 crc_addr;
119 };
120
121 struct iop3xx_desc_aau {
122 u32 next_desc;
123 u32 src[4];
124 u32 dest_addr;
125 u32 byte_count;
126 union {
127 u32 desc_ctrl;
128 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
129 };
130 union {
131 u32 src_addr;
132 u32 e_desc_ctrl;
133 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
134 } src_edc[31];
135 };
136
137 struct iop3xx_aau_gfmr {
138 unsigned int gfmr1:8;
139 unsigned int gfmr2:8;
140 unsigned int gfmr3:8;
141 unsigned int gfmr4:8;
142 };
143
144 struct iop3xx_desc_pq_xor {
145 u32 next_desc;
146 u32 src[3];
147 union {
148 u32 data_mult1;
149 struct iop3xx_aau_gfmr data_mult1_field;
150 };
151 u32 dest_addr;
152 u32 byte_count;
153 union {
154 u32 desc_ctrl;
155 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
156 };
157 union {
158 u32 src_addr;
159 u32 e_desc_ctrl;
160 struct iop3xx_aau_e_desc_ctrl e_desc_ctrl_field;
161 u32 data_multiplier;
162 struct iop3xx_aau_gfmr data_mult_field;
163 u32 reserved;
164 } src_edc_gfmr[19];
165 };
166
167 struct iop3xx_desc_dual_xor {
168 u32 next_desc;
169 u32 src0_addr;
170 u32 src1_addr;
171 u32 h_src_addr;
172 u32 d_src_addr;
173 u32 h_dest_addr;
174 u32 byte_count;
175 union {
176 u32 desc_ctrl;
177 struct iop3xx_aau_desc_ctrl desc_ctrl_field;
178 };
179 u32 d_dest_addr;
180 };
181
182 union iop3xx_desc {
183 struct iop3xx_desc_aau *aau;
184 struct iop3xx_desc_dma *dma;
185 struct iop3xx_desc_pq_xor *pq_xor;
186 struct iop3xx_desc_dual_xor *dual_xor;
187 void *ptr;
188 };
189
iop_adma_get_max_xor(void)190 static inline int iop_adma_get_max_xor(void)
191 {
192 return 32;
193 }
194
iop_chan_get_current_descriptor(struct iop_adma_chan * chan)195 static inline u32 iop_chan_get_current_descriptor(struct iop_adma_chan *chan)
196 {
197 int id = chan->device->id;
198
199 switch (id) {
200 case DMA0_ID:
201 case DMA1_ID:
202 return __raw_readl(DMA_DAR(chan));
203 case AAU_ID:
204 return __raw_readl(AAU_ADAR(chan));
205 default:
206 BUG();
207 }
208 return 0;
209 }
210
iop_chan_set_next_descriptor(struct iop_adma_chan * chan,u32 next_desc_addr)211 static inline void iop_chan_set_next_descriptor(struct iop_adma_chan *chan,
212 u32 next_desc_addr)
213 {
214 int id = chan->device->id;
215
216 switch (id) {
217 case DMA0_ID:
218 case DMA1_ID:
219 __raw_writel(next_desc_addr, DMA_NDAR(chan));
220 break;
221 case AAU_ID:
222 __raw_writel(next_desc_addr, AAU_ANDAR(chan));
223 break;
224 }
225
226 }
227
228 #define IOP_ADMA_STATUS_BUSY (1 << 10)
229 #define IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT (1024)
230 #define IOP_ADMA_XOR_MAX_BYTE_COUNT (16 * 1024 * 1024)
231 #define IOP_ADMA_MAX_BYTE_COUNT (16 * 1024 * 1024)
232
iop_chan_is_busy(struct iop_adma_chan * chan)233 static inline int iop_chan_is_busy(struct iop_adma_chan *chan)
234 {
235 u32 status = __raw_readl(DMA_CSR(chan));
236 return (status & IOP_ADMA_STATUS_BUSY) ? 1 : 0;
237 }
238
iop_desc_is_aligned(struct iop_adma_desc_slot * desc,int num_slots)239 static inline int iop_desc_is_aligned(struct iop_adma_desc_slot *desc,
240 int num_slots)
241 {
242 /* num_slots will only ever be 1, 2, 4, or 8 */
243 return (desc->idx & (num_slots - 1)) ? 0 : 1;
244 }
245
246 /* to do: support large (i.e. > hw max) buffer sizes */
iop_chan_memcpy_slot_count(size_t len,int * slots_per_op)247 static inline int iop_chan_memcpy_slot_count(size_t len, int *slots_per_op)
248 {
249 *slots_per_op = 1;
250 return 1;
251 }
252
253 /* to do: support large (i.e. > hw max) buffer sizes */
iop_chan_memset_slot_count(size_t len,int * slots_per_op)254 static inline int iop_chan_memset_slot_count(size_t len, int *slots_per_op)
255 {
256 *slots_per_op = 1;
257 return 1;
258 }
259
iop3xx_aau_xor_slot_count(size_t len,int src_cnt,int * slots_per_op)260 static inline int iop3xx_aau_xor_slot_count(size_t len, int src_cnt,
261 int *slots_per_op)
262 {
263 static const char slot_count_table[] = {
264 1, 1, 1, 1, /* 01 - 04 */
265 2, 2, 2, 2, /* 05 - 08 */
266 4, 4, 4, 4, /* 09 - 12 */
267 4, 4, 4, 4, /* 13 - 16 */
268 8, 8, 8, 8, /* 17 - 20 */
269 8, 8, 8, 8, /* 21 - 24 */
270 8, 8, 8, 8, /* 25 - 28 */
271 8, 8, 8, 8, /* 29 - 32 */
272 };
273 *slots_per_op = slot_count_table[src_cnt - 1];
274 return *slots_per_op;
275 }
276
277 static inline int
iop_chan_interrupt_slot_count(int * slots_per_op,struct iop_adma_chan * chan)278 iop_chan_interrupt_slot_count(int *slots_per_op, struct iop_adma_chan *chan)
279 {
280 switch (chan->device->id) {
281 case DMA0_ID:
282 case DMA1_ID:
283 return iop_chan_memcpy_slot_count(0, slots_per_op);
284 case AAU_ID:
285 return iop3xx_aau_xor_slot_count(0, 2, slots_per_op);
286 default:
287 BUG();
288 }
289 return 0;
290 }
291
iop_chan_xor_slot_count(size_t len,int src_cnt,int * slots_per_op)292 static inline int iop_chan_xor_slot_count(size_t len, int src_cnt,
293 int *slots_per_op)
294 {
295 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
296
297 if (len <= IOP_ADMA_XOR_MAX_BYTE_COUNT)
298 return slot_cnt;
299
300 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
301 while (len > IOP_ADMA_XOR_MAX_BYTE_COUNT) {
302 len -= IOP_ADMA_XOR_MAX_BYTE_COUNT;
303 slot_cnt += *slots_per_op;
304 }
305
306 if (len)
307 slot_cnt += *slots_per_op;
308
309 return slot_cnt;
310 }
311
312 /* zero sum on iop3xx is limited to 1k at a time so it requires multiple
313 * descriptors
314 */
iop_chan_zero_sum_slot_count(size_t len,int src_cnt,int * slots_per_op)315 static inline int iop_chan_zero_sum_slot_count(size_t len, int src_cnt,
316 int *slots_per_op)
317 {
318 int slot_cnt = iop3xx_aau_xor_slot_count(len, src_cnt, slots_per_op);
319
320 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT)
321 return slot_cnt;
322
323 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
324 while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
325 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
326 slot_cnt += *slots_per_op;
327 }
328
329 if (len)
330 slot_cnt += *slots_per_op;
331
332 return slot_cnt;
333 }
334
iop_desc_get_dest_addr(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan)335 static inline u32 iop_desc_get_dest_addr(struct iop_adma_desc_slot *desc,
336 struct iop_adma_chan *chan)
337 {
338 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
339
340 switch (chan->device->id) {
341 case DMA0_ID:
342 case DMA1_ID:
343 return hw_desc.dma->dest_addr;
344 case AAU_ID:
345 return hw_desc.aau->dest_addr;
346 default:
347 BUG();
348 }
349 return 0;
350 }
351
iop_desc_get_byte_count(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan)352 static inline u32 iop_desc_get_byte_count(struct iop_adma_desc_slot *desc,
353 struct iop_adma_chan *chan)
354 {
355 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
356
357 switch (chan->device->id) {
358 case DMA0_ID:
359 case DMA1_ID:
360 return hw_desc.dma->byte_count;
361 case AAU_ID:
362 return hw_desc.aau->byte_count;
363 default:
364 BUG();
365 }
366 return 0;
367 }
368
369 /* translate the src_idx to a descriptor word index */
__desc_idx(int src_idx)370 static inline int __desc_idx(int src_idx)
371 {
372 static const int desc_idx_table[] = { 0, 0, 0, 0,
373 0, 1, 2, 3,
374 5, 6, 7, 8,
375 9, 10, 11, 12,
376 14, 15, 16, 17,
377 18, 19, 20, 21,
378 23, 24, 25, 26,
379 27, 28, 29, 30,
380 };
381
382 return desc_idx_table[src_idx];
383 }
384
iop_desc_get_src_addr(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan,int src_idx)385 static inline u32 iop_desc_get_src_addr(struct iop_adma_desc_slot *desc,
386 struct iop_adma_chan *chan,
387 int src_idx)
388 {
389 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
390
391 switch (chan->device->id) {
392 case DMA0_ID:
393 case DMA1_ID:
394 return hw_desc.dma->src_addr;
395 case AAU_ID:
396 break;
397 default:
398 BUG();
399 }
400
401 if (src_idx < 4)
402 return hw_desc.aau->src[src_idx];
403 else
404 return hw_desc.aau->src_edc[__desc_idx(src_idx)].src_addr;
405 }
406
iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau * hw_desc,int src_idx,dma_addr_t addr)407 static inline void iop3xx_aau_desc_set_src_addr(struct iop3xx_desc_aau *hw_desc,
408 int src_idx, dma_addr_t addr)
409 {
410 if (src_idx < 4)
411 hw_desc->src[src_idx] = addr;
412 else
413 hw_desc->src_edc[__desc_idx(src_idx)].src_addr = addr;
414 }
415
416 static inline void
iop_desc_init_memcpy(struct iop_adma_desc_slot * desc,unsigned long flags)417 iop_desc_init_memcpy(struct iop_adma_desc_slot *desc, unsigned long flags)
418 {
419 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
420 union {
421 u32 value;
422 struct iop3xx_dma_desc_ctrl field;
423 } u_desc_ctrl;
424
425 u_desc_ctrl.value = 0;
426 u_desc_ctrl.field.mem_to_mem_en = 1;
427 u_desc_ctrl.field.pci_transaction = 0xe; /* memory read block */
428 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
429 hw_desc->desc_ctrl = u_desc_ctrl.value;
430 hw_desc->upper_pci_src_addr = 0;
431 hw_desc->crc_addr = 0;
432 }
433
434 static inline void
iop_desc_init_memset(struct iop_adma_desc_slot * desc,unsigned long flags)435 iop_desc_init_memset(struct iop_adma_desc_slot *desc, unsigned long flags)
436 {
437 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
438 union {
439 u32 value;
440 struct iop3xx_aau_desc_ctrl field;
441 } u_desc_ctrl;
442
443 u_desc_ctrl.value = 0;
444 u_desc_ctrl.field.blk1_cmd_ctrl = 0x2; /* memory block fill */
445 u_desc_ctrl.field.dest_write_en = 1;
446 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
447 hw_desc->desc_ctrl = u_desc_ctrl.value;
448 }
449
450 static inline u32
iop3xx_desc_init_xor(struct iop3xx_desc_aau * hw_desc,int src_cnt,unsigned long flags)451 iop3xx_desc_init_xor(struct iop3xx_desc_aau *hw_desc, int src_cnt,
452 unsigned long flags)
453 {
454 int i, shift;
455 u32 edcr;
456 union {
457 u32 value;
458 struct iop3xx_aau_desc_ctrl field;
459 } u_desc_ctrl;
460
461 u_desc_ctrl.value = 0;
462 switch (src_cnt) {
463 case 25 ... 32:
464 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
465 edcr = 0;
466 shift = 1;
467 for (i = 24; i < src_cnt; i++) {
468 edcr |= (1 << shift);
469 shift += 3;
470 }
471 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = edcr;
472 src_cnt = 24;
473 /* fall through */
474 case 17 ... 24:
475 if (!u_desc_ctrl.field.blk_ctrl) {
476 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
477 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
478 }
479 edcr = 0;
480 shift = 1;
481 for (i = 16; i < src_cnt; i++) {
482 edcr |= (1 << shift);
483 shift += 3;
484 }
485 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = edcr;
486 src_cnt = 16;
487 /* fall through */
488 case 9 ... 16:
489 if (!u_desc_ctrl.field.blk_ctrl)
490 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
491 edcr = 0;
492 shift = 1;
493 for (i = 8; i < src_cnt; i++) {
494 edcr |= (1 << shift);
495 shift += 3;
496 }
497 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = edcr;
498 src_cnt = 8;
499 /* fall through */
500 case 2 ... 8:
501 shift = 1;
502 for (i = 0; i < src_cnt; i++) {
503 u_desc_ctrl.value |= (1 << shift);
504 shift += 3;
505 }
506
507 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
508 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
509 }
510
511 u_desc_ctrl.field.dest_write_en = 1;
512 u_desc_ctrl.field.blk1_cmd_ctrl = 0x7; /* direct fill */
513 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
514 hw_desc->desc_ctrl = u_desc_ctrl.value;
515
516 return u_desc_ctrl.value;
517 }
518
519 static inline void
iop_desc_init_xor(struct iop_adma_desc_slot * desc,int src_cnt,unsigned long flags)520 iop_desc_init_xor(struct iop_adma_desc_slot *desc, int src_cnt,
521 unsigned long flags)
522 {
523 iop3xx_desc_init_xor(desc->hw_desc, src_cnt, flags);
524 }
525
526 /* return the number of operations */
527 static inline int
iop_desc_init_zero_sum(struct iop_adma_desc_slot * desc,int src_cnt,unsigned long flags)528 iop_desc_init_zero_sum(struct iop_adma_desc_slot *desc, int src_cnt,
529 unsigned long flags)
530 {
531 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
532 struct iop3xx_desc_aau *hw_desc, *prev_hw_desc, *iter;
533 union {
534 u32 value;
535 struct iop3xx_aau_desc_ctrl field;
536 } u_desc_ctrl;
537 int i, j;
538
539 hw_desc = desc->hw_desc;
540
541 for (i = 0, j = 0; (slot_cnt -= slots_per_op) >= 0;
542 i += slots_per_op, j++) {
543 iter = iop_hw_desc_slot_idx(hw_desc, i);
544 u_desc_ctrl.value = iop3xx_desc_init_xor(iter, src_cnt, flags);
545 u_desc_ctrl.field.dest_write_en = 0;
546 u_desc_ctrl.field.zero_result_en = 1;
547 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
548 iter->desc_ctrl = u_desc_ctrl.value;
549
550 /* for the subsequent descriptors preserve the store queue
551 * and chain them together
552 */
553 if (i) {
554 prev_hw_desc =
555 iop_hw_desc_slot_idx(hw_desc, i - slots_per_op);
556 prev_hw_desc->next_desc =
557 (u32) (desc->async_tx.phys + (i << 5));
558 }
559 }
560
561 return j;
562 }
563
564 static inline void
iop_desc_init_null_xor(struct iop_adma_desc_slot * desc,int src_cnt,unsigned long flags)565 iop_desc_init_null_xor(struct iop_adma_desc_slot *desc, int src_cnt,
566 unsigned long flags)
567 {
568 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
569 union {
570 u32 value;
571 struct iop3xx_aau_desc_ctrl field;
572 } u_desc_ctrl;
573
574 u_desc_ctrl.value = 0;
575 switch (src_cnt) {
576 case 25 ... 32:
577 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
578 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
579 /* fall through */
580 case 17 ... 24:
581 if (!u_desc_ctrl.field.blk_ctrl) {
582 hw_desc->src_edc[AAU_EDCR2_IDX].e_desc_ctrl = 0;
583 u_desc_ctrl.field.blk_ctrl = 0x3; /* use EDCR[2:0] */
584 }
585 hw_desc->src_edc[AAU_EDCR1_IDX].e_desc_ctrl = 0;
586 /* fall through */
587 case 9 ... 16:
588 if (!u_desc_ctrl.field.blk_ctrl)
589 u_desc_ctrl.field.blk_ctrl = 0x2; /* use EDCR0 */
590 hw_desc->src_edc[AAU_EDCR0_IDX].e_desc_ctrl = 0;
591 /* fall through */
592 case 1 ... 8:
593 if (!u_desc_ctrl.field.blk_ctrl && src_cnt > 4)
594 u_desc_ctrl.field.blk_ctrl = 0x1; /* use mini-desc */
595 }
596
597 u_desc_ctrl.field.dest_write_en = 0;
598 u_desc_ctrl.field.int_en = flags & DMA_PREP_INTERRUPT;
599 hw_desc->desc_ctrl = u_desc_ctrl.value;
600 }
601
iop_desc_set_byte_count(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan,u32 byte_count)602 static inline void iop_desc_set_byte_count(struct iop_adma_desc_slot *desc,
603 struct iop_adma_chan *chan,
604 u32 byte_count)
605 {
606 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
607
608 switch (chan->device->id) {
609 case DMA0_ID:
610 case DMA1_ID:
611 hw_desc.dma->byte_count = byte_count;
612 break;
613 case AAU_ID:
614 hw_desc.aau->byte_count = byte_count;
615 break;
616 default:
617 BUG();
618 }
619 }
620
621 static inline void
iop_desc_init_interrupt(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan)622 iop_desc_init_interrupt(struct iop_adma_desc_slot *desc,
623 struct iop_adma_chan *chan)
624 {
625 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
626
627 switch (chan->device->id) {
628 case DMA0_ID:
629 case DMA1_ID:
630 iop_desc_init_memcpy(desc, 1);
631 hw_desc.dma->byte_count = 0;
632 hw_desc.dma->dest_addr = 0;
633 hw_desc.dma->src_addr = 0;
634 break;
635 case AAU_ID:
636 iop_desc_init_null_xor(desc, 2, 1);
637 hw_desc.aau->byte_count = 0;
638 hw_desc.aau->dest_addr = 0;
639 hw_desc.aau->src[0] = 0;
640 hw_desc.aau->src[1] = 0;
641 break;
642 default:
643 BUG();
644 }
645 }
646
647 static inline void
iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot * desc,u32 len)648 iop_desc_set_zero_sum_byte_count(struct iop_adma_desc_slot *desc, u32 len)
649 {
650 int slots_per_op = desc->slots_per_op;
651 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
652 int i = 0;
653
654 if (len <= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
655 hw_desc->byte_count = len;
656 } else {
657 do {
658 iter = iop_hw_desc_slot_idx(hw_desc, i);
659 iter->byte_count = IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
660 len -= IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT;
661 i += slots_per_op;
662 } while (len > IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT);
663
664 if (len) {
665 iter = iop_hw_desc_slot_idx(hw_desc, i);
666 iter->byte_count = len;
667 }
668 }
669 }
670
iop_desc_set_dest_addr(struct iop_adma_desc_slot * desc,struct iop_adma_chan * chan,dma_addr_t addr)671 static inline void iop_desc_set_dest_addr(struct iop_adma_desc_slot *desc,
672 struct iop_adma_chan *chan,
673 dma_addr_t addr)
674 {
675 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
676
677 switch (chan->device->id) {
678 case DMA0_ID:
679 case DMA1_ID:
680 hw_desc.dma->dest_addr = addr;
681 break;
682 case AAU_ID:
683 hw_desc.aau->dest_addr = addr;
684 break;
685 default:
686 BUG();
687 }
688 }
689
iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot * desc,dma_addr_t addr)690 static inline void iop_desc_set_memcpy_src_addr(struct iop_adma_desc_slot *desc,
691 dma_addr_t addr)
692 {
693 struct iop3xx_desc_dma *hw_desc = desc->hw_desc;
694 hw_desc->src_addr = addr;
695 }
696
697 static inline void
iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot * desc,int src_idx,dma_addr_t addr)698 iop_desc_set_zero_sum_src_addr(struct iop_adma_desc_slot *desc, int src_idx,
699 dma_addr_t addr)
700 {
701
702 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
703 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
704 int i;
705
706 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
707 i += slots_per_op, addr += IOP_ADMA_ZERO_SUM_MAX_BYTE_COUNT) {
708 iter = iop_hw_desc_slot_idx(hw_desc, i);
709 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
710 }
711 }
712
iop_desc_set_xor_src_addr(struct iop_adma_desc_slot * desc,int src_idx,dma_addr_t addr)713 static inline void iop_desc_set_xor_src_addr(struct iop_adma_desc_slot *desc,
714 int src_idx, dma_addr_t addr)
715 {
716
717 struct iop3xx_desc_aau *hw_desc = desc->hw_desc, *iter;
718 int slot_cnt = desc->slot_cnt, slots_per_op = desc->slots_per_op;
719 int i;
720
721 for (i = 0; (slot_cnt -= slots_per_op) >= 0;
722 i += slots_per_op, addr += IOP_ADMA_XOR_MAX_BYTE_COUNT) {
723 iter = iop_hw_desc_slot_idx(hw_desc, i);
724 iop3xx_aau_desc_set_src_addr(iter, src_idx, addr);
725 }
726 }
727
iop_desc_set_next_desc(struct iop_adma_desc_slot * desc,u32 next_desc_addr)728 static inline void iop_desc_set_next_desc(struct iop_adma_desc_slot *desc,
729 u32 next_desc_addr)
730 {
731 /* hw_desc->next_desc is the same location for all channels */
732 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
733
734 iop_paranoia(hw_desc.dma->next_desc);
735 hw_desc.dma->next_desc = next_desc_addr;
736 }
737
iop_desc_get_next_desc(struct iop_adma_desc_slot * desc)738 static inline u32 iop_desc_get_next_desc(struct iop_adma_desc_slot *desc)
739 {
740 /* hw_desc->next_desc is the same location for all channels */
741 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
742 return hw_desc.dma->next_desc;
743 }
744
iop_desc_clear_next_desc(struct iop_adma_desc_slot * desc)745 static inline void iop_desc_clear_next_desc(struct iop_adma_desc_slot *desc)
746 {
747 /* hw_desc->next_desc is the same location for all channels */
748 union iop3xx_desc hw_desc = { .ptr = desc->hw_desc, };
749 hw_desc.dma->next_desc = 0;
750 }
751
iop_desc_set_block_fill_val(struct iop_adma_desc_slot * desc,u32 val)752 static inline void iop_desc_set_block_fill_val(struct iop_adma_desc_slot *desc,
753 u32 val)
754 {
755 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
756 hw_desc->src[0] = val;
757 }
758
iop_desc_get_zero_result(struct iop_adma_desc_slot * desc)759 static inline int iop_desc_get_zero_result(struct iop_adma_desc_slot *desc)
760 {
761 struct iop3xx_desc_aau *hw_desc = desc->hw_desc;
762 struct iop3xx_aau_desc_ctrl desc_ctrl = hw_desc->desc_ctrl_field;
763
764 iop_paranoia(!(desc_ctrl.tx_complete && desc_ctrl.zero_result_en));
765 return desc_ctrl.zero_result_err;
766 }
767
iop_chan_append(struct iop_adma_chan * chan)768 static inline void iop_chan_append(struct iop_adma_chan *chan)
769 {
770 u32 dma_chan_ctrl;
771
772 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
773 dma_chan_ctrl |= 0x2;
774 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
775 }
776
iop_chan_get_status(struct iop_adma_chan * chan)777 static inline u32 iop_chan_get_status(struct iop_adma_chan *chan)
778 {
779 return __raw_readl(DMA_CSR(chan));
780 }
781
iop_chan_disable(struct iop_adma_chan * chan)782 static inline void iop_chan_disable(struct iop_adma_chan *chan)
783 {
784 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
785 dma_chan_ctrl &= ~1;
786 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
787 }
788
iop_chan_enable(struct iop_adma_chan * chan)789 static inline void iop_chan_enable(struct iop_adma_chan *chan)
790 {
791 u32 dma_chan_ctrl = __raw_readl(DMA_CCR(chan));
792
793 dma_chan_ctrl |= 1;
794 __raw_writel(dma_chan_ctrl, DMA_CCR(chan));
795 }
796
iop_adma_device_clear_eot_status(struct iop_adma_chan * chan)797 static inline void iop_adma_device_clear_eot_status(struct iop_adma_chan *chan)
798 {
799 u32 status = __raw_readl(DMA_CSR(chan));
800 status &= (1 << 9);
801 __raw_writel(status, DMA_CSR(chan));
802 }
803
iop_adma_device_clear_eoc_status(struct iop_adma_chan * chan)804 static inline void iop_adma_device_clear_eoc_status(struct iop_adma_chan *chan)
805 {
806 u32 status = __raw_readl(DMA_CSR(chan));
807 status &= (1 << 8);
808 __raw_writel(status, DMA_CSR(chan));
809 }
810
iop_adma_device_clear_err_status(struct iop_adma_chan * chan)811 static inline void iop_adma_device_clear_err_status(struct iop_adma_chan *chan)
812 {
813 u32 status = __raw_readl(DMA_CSR(chan));
814
815 switch (chan->device->id) {
816 case DMA0_ID:
817 case DMA1_ID:
818 status &= (1 << 5) | (1 << 3) | (1 << 2) | (1 << 1);
819 break;
820 case AAU_ID:
821 status &= (1 << 5);
822 break;
823 default:
824 BUG();
825 }
826
827 __raw_writel(status, DMA_CSR(chan));
828 }
829
830 static inline int
iop_is_err_int_parity(unsigned long status,struct iop_adma_chan * chan)831 iop_is_err_int_parity(unsigned long status, struct iop_adma_chan *chan)
832 {
833 return 0;
834 }
835
836 static inline int
iop_is_err_mcu_abort(unsigned long status,struct iop_adma_chan * chan)837 iop_is_err_mcu_abort(unsigned long status, struct iop_adma_chan *chan)
838 {
839 return 0;
840 }
841
842 static inline int
iop_is_err_int_tabort(unsigned long status,struct iop_adma_chan * chan)843 iop_is_err_int_tabort(unsigned long status, struct iop_adma_chan *chan)
844 {
845 return 0;
846 }
847
848 static inline int
iop_is_err_int_mabort(unsigned long status,struct iop_adma_chan * chan)849 iop_is_err_int_mabort(unsigned long status, struct iop_adma_chan *chan)
850 {
851 return test_bit(5, &status);
852 }
853
854 static inline int
iop_is_err_pci_tabort(unsigned long status,struct iop_adma_chan * chan)855 iop_is_err_pci_tabort(unsigned long status, struct iop_adma_chan *chan)
856 {
857 switch (chan->device->id) {
858 case DMA0_ID:
859 case DMA1_ID:
860 return test_bit(2, &status);
861 default:
862 return 0;
863 }
864 }
865
866 static inline int
iop_is_err_pci_mabort(unsigned long status,struct iop_adma_chan * chan)867 iop_is_err_pci_mabort(unsigned long status, struct iop_adma_chan *chan)
868 {
869 switch (chan->device->id) {
870 case DMA0_ID:
871 case DMA1_ID:
872 return test_bit(3, &status);
873 default:
874 return 0;
875 }
876 }
877
878 static inline int
iop_is_err_split_tx(unsigned long status,struct iop_adma_chan * chan)879 iop_is_err_split_tx(unsigned long status, struct iop_adma_chan *chan)
880 {
881 switch (chan->device->id) {
882 case DMA0_ID:
883 case DMA1_ID:
884 return test_bit(1, &status);
885 default:
886 return 0;
887 }
888 }
889 #endif /* _ADMA_H */
890