1 /*
2 * Copyright (C) 2007-2010 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Author:
5 * Zhang Wei <wei.zhang@freescale.com>, Jul 2007
6 * Ebony Zhu <ebony.zhu@freescale.com>, May 2007
7 *
8 * This is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 *
13 */
14 #ifndef __DMA_FSLDMA_H
15 #define __DMA_FSLDMA_H
16
17 #include <linux/device.h>
18 #include <linux/dmapool.h>
19 #include <linux/dmaengine.h>
20
21 /* Define data structures needed by Freescale
22 * MPC8540 and MPC8349 DMA controller.
23 */
24 #define FSL_DMA_MR_CS 0x00000001
25 #define FSL_DMA_MR_CC 0x00000002
26 #define FSL_DMA_MR_CA 0x00000008
27 #define FSL_DMA_MR_EIE 0x00000040
28 #define FSL_DMA_MR_XFE 0x00000020
29 #define FSL_DMA_MR_EOLNIE 0x00000100
30 #define FSL_DMA_MR_EOLSIE 0x00000080
31 #define FSL_DMA_MR_EOSIE 0x00000200
32 #define FSL_DMA_MR_CDSM 0x00000010
33 #define FSL_DMA_MR_CTM 0x00000004
34 #define FSL_DMA_MR_EMP_EN 0x00200000
35 #define FSL_DMA_MR_EMS_EN 0x00040000
36 #define FSL_DMA_MR_DAHE 0x00002000
37 #define FSL_DMA_MR_SAHE 0x00001000
38
39 /*
40 * Bandwidth/pause control determines how many bytes a given
41 * channel is allowed to transfer before the DMA engine pauses
42 * the current channel and switches to the next channel
43 */
44 #define FSL_DMA_MR_BWC 0x0A000000
45
46 /* Special MR definition for MPC8349 */
47 #define FSL_DMA_MR_EOTIE 0x00000080
48 #define FSL_DMA_MR_PRC_RM 0x00000800
49
50 #define FSL_DMA_SR_CH 0x00000020
51 #define FSL_DMA_SR_PE 0x00000010
52 #define FSL_DMA_SR_CB 0x00000004
53 #define FSL_DMA_SR_TE 0x00000080
54 #define FSL_DMA_SR_EOSI 0x00000002
55 #define FSL_DMA_SR_EOLSI 0x00000001
56 #define FSL_DMA_SR_EOCDI 0x00000001
57 #define FSL_DMA_SR_EOLNI 0x00000008
58
59 #define FSL_DMA_SATR_SBPATMU 0x20000000
60 #define FSL_DMA_SATR_STRANSINT_RIO 0x00c00000
61 #define FSL_DMA_SATR_SREADTYPE_SNOOP_READ 0x00050000
62 #define FSL_DMA_SATR_SREADTYPE_BP_IORH 0x00020000
63 #define FSL_DMA_SATR_SREADTYPE_BP_NREAD 0x00040000
64 #define FSL_DMA_SATR_SREADTYPE_BP_MREAD 0x00070000
65
66 #define FSL_DMA_DATR_DBPATMU 0x20000000
67 #define FSL_DMA_DATR_DTRANSINT_RIO 0x00c00000
68 #define FSL_DMA_DATR_DWRITETYPE_SNOOP_WRITE 0x00050000
69 #define FSL_DMA_DATR_DWRITETYPE_BP_FLUSH 0x00010000
70
71 #define FSL_DMA_EOL ((u64)0x1)
72 #define FSL_DMA_SNEN ((u64)0x10)
73 #define FSL_DMA_EOSIE 0x8
74 #define FSL_DMA_NLDA_MASK (~(u64)0x1f)
75
76 #define FSL_DMA_BCR_MAX_CNT 0x03ffffffu
77
78 #define FSL_DMA_DGSR_TE 0x80
79 #define FSL_DMA_DGSR_CH 0x20
80 #define FSL_DMA_DGSR_PE 0x10
81 #define FSL_DMA_DGSR_EOLNI 0x08
82 #define FSL_DMA_DGSR_CB 0x04
83 #define FSL_DMA_DGSR_EOSI 0x02
84 #define FSL_DMA_DGSR_EOLSI 0x01
85
86 #define FSL_DMA_BUSWIDTHS (BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \
87 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \
88 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \
89 BIT(DMA_SLAVE_BUSWIDTH_8_BYTES))
90 typedef u64 __bitwise v64;
91 typedef u32 __bitwise v32;
92
93 struct fsl_dma_ld_hw {
94 v64 src_addr;
95 v64 dst_addr;
96 v64 next_ln_addr;
97 v32 count;
98 v32 reserve;
99 } __attribute__((aligned(32)));
100
101 struct fsl_desc_sw {
102 struct fsl_dma_ld_hw hw;
103 struct list_head node;
104 struct list_head tx_list;
105 struct dma_async_tx_descriptor async_tx;
106 } __attribute__((aligned(32)));
107
108 struct fsldma_chan_regs {
109 u32 mr; /* 0x00 - Mode Register */
110 u32 sr; /* 0x04 - Status Register */
111 u64 cdar; /* 0x08 - Current descriptor address register */
112 u64 sar; /* 0x10 - Source Address Register */
113 u64 dar; /* 0x18 - Destination Address Register */
114 u32 bcr; /* 0x20 - Byte Count Register */
115 u64 ndar; /* 0x24 - Next Descriptor Address Register */
116 };
117
118 struct fsldma_chan;
119 #define FSL_DMA_MAX_CHANS_PER_DEVICE 8
120
121 struct fsldma_device {
122 void __iomem *regs; /* DGSR register base */
123 struct device *dev;
124 struct dma_device common;
125 struct fsldma_chan *chan[FSL_DMA_MAX_CHANS_PER_DEVICE];
126 u32 feature; /* The same as DMA channels */
127 int irq; /* Channel IRQ */
128 };
129
130 /* Define macros for fsldma_chan->feature property */
131 #define FSL_DMA_LITTLE_ENDIAN 0x00000000
132 #define FSL_DMA_BIG_ENDIAN 0x00000001
133
134 #define FSL_DMA_IP_MASK 0x00000ff0
135 #define FSL_DMA_IP_85XX 0x00000010
136 #define FSL_DMA_IP_83XX 0x00000020
137
138 #define FSL_DMA_CHAN_PAUSE_EXT 0x00001000
139 #define FSL_DMA_CHAN_START_EXT 0x00002000
140
141 #ifdef CONFIG_PM
142 struct fsldma_chan_regs_save {
143 u32 mr;
144 };
145
146 enum fsldma_pm_state {
147 RUNNING = 0,
148 SUSPENDED,
149 };
150 #endif
151
152 struct fsldma_chan {
153 char name[8]; /* Channel name */
154 struct fsldma_chan_regs __iomem *regs;
155 spinlock_t desc_lock; /* Descriptor operation lock */
156 /*
157 * Descriptors which are queued to run, but have not yet been
158 * submitted to the hardware for execution
159 */
160 struct list_head ld_pending;
161 /*
162 * Descriptors which are currently being executed by the hardware
163 */
164 struct list_head ld_running;
165 /*
166 * Descriptors which have finished execution by the hardware. These
167 * descriptors have already had their cleanup actions run. They are
168 * waiting for the ACK bit to be set by the async_tx API.
169 */
170 struct list_head ld_completed; /* Link descriptors queue */
171 struct dma_chan common; /* DMA common channel */
172 struct dma_pool *desc_pool; /* Descriptors pool */
173 struct device *dev; /* Channel device */
174 int irq; /* Channel IRQ */
175 int id; /* Raw id of this channel */
176 struct tasklet_struct tasklet;
177 u32 feature;
178 bool idle; /* DMA controller is idle */
179 #ifdef CONFIG_PM
180 struct fsldma_chan_regs_save regs_save;
181 enum fsldma_pm_state pm_state;
182 #endif
183
184 void (*toggle_ext_pause)(struct fsldma_chan *fsl_chan, int enable);
185 void (*toggle_ext_start)(struct fsldma_chan *fsl_chan, int enable);
186 void (*set_src_loop_size)(struct fsldma_chan *fsl_chan, int size);
187 void (*set_dst_loop_size)(struct fsldma_chan *fsl_chan, int size);
188 void (*set_request_count)(struct fsldma_chan *fsl_chan, int size);
189 };
190
191 #define to_fsl_chan(chan) container_of(chan, struct fsldma_chan, common)
192 #define to_fsl_desc(lh) container_of(lh, struct fsl_desc_sw, node)
193 #define tx_to_fsl_desc(tx) container_of(tx, struct fsl_desc_sw, async_tx)
194
195 #ifndef __powerpc64__
in_be64(const u64 __iomem * addr)196 static u64 in_be64(const u64 __iomem *addr)
197 {
198 return ((u64)in_be32((u32 __iomem *)addr) << 32) |
199 (in_be32((u32 __iomem *)addr + 1));
200 }
201
out_be64(u64 __iomem * addr,u64 val)202 static void out_be64(u64 __iomem *addr, u64 val)
203 {
204 out_be32((u32 __iomem *)addr, val >> 32);
205 out_be32((u32 __iomem *)addr + 1, (u32)val);
206 }
207
208 /* There is no asm instructions for 64 bits reverse loads and stores */
in_le64(const u64 __iomem * addr)209 static u64 in_le64(const u64 __iomem *addr)
210 {
211 return ((u64)in_le32((u32 __iomem *)addr + 1) << 32) |
212 (in_le32((u32 __iomem *)addr));
213 }
214
out_le64(u64 __iomem * addr,u64 val)215 static void out_le64(u64 __iomem *addr, u64 val)
216 {
217 out_le32((u32 __iomem *)addr + 1, val >> 32);
218 out_le32((u32 __iomem *)addr, (u32)val);
219 }
220 #endif
221
222 #define DMA_IN(fsl_chan, addr, width) \
223 (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
224 in_be##width(addr) : in_le##width(addr))
225 #define DMA_OUT(fsl_chan, addr, val, width) \
226 (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
227 out_be##width(addr, val) : out_le##width(addr, val))
228
229 #define DMA_TO_CPU(fsl_chan, d, width) \
230 (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
231 be##width##_to_cpu((__force __be##width)(v##width)d) : \
232 le##width##_to_cpu((__force __le##width)(v##width)d))
233 #define CPU_TO_DMA(fsl_chan, c, width) \
234 (((fsl_chan)->feature & FSL_DMA_BIG_ENDIAN) ? \
235 (__force v##width)cpu_to_be##width(c) : \
236 (__force v##width)cpu_to_le##width(c))
237
238 #endif /* __DMA_FSLDMA_H */
239