• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef __ASM_AVR32_IO_H
2 #define __ASM_AVR32_IO_H
3 
4 #include <linux/kernel.h>
5 #include <linux/string.h>
6 #include <linux/types.h>
7 
8 #include <asm/addrspace.h>
9 #include <asm/byteorder.h>
10 
11 #include <mach/io.h>
12 
13 /* virt_to_phys will only work when address is in P1 or P2 */
virt_to_phys(volatile void * address)14 static __inline__ unsigned long virt_to_phys(volatile void *address)
15 {
16 	return PHYSADDR(address);
17 }
18 
phys_to_virt(unsigned long address)19 static __inline__ void * phys_to_virt(unsigned long address)
20 {
21 	return (void *)P1SEGADDR(address);
22 }
23 
24 #define cached_to_phys(addr)	((unsigned long)PHYSADDR(addr))
25 #define uncached_to_phys(addr)	((unsigned long)PHYSADDR(addr))
26 #define phys_to_cached(addr)	((void *)P1SEGADDR(addr))
27 #define phys_to_uncached(addr)	((void *)P2SEGADDR(addr))
28 
29 /*
30  * Generic IO read/write.  These perform native-endian accesses.  Note
31  * that some architectures will want to re-define __raw_{read,write}w.
32  */
33 extern void __raw_writesb(void __iomem *addr, const void *data, int bytelen);
34 extern void __raw_writesw(void __iomem *addr, const void *data, int wordlen);
35 extern void __raw_writesl(void __iomem *addr, const void *data, int longlen);
36 
37 extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
38 extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
39 extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
40 
__raw_writeb(u8 v,volatile void __iomem * addr)41 static inline void __raw_writeb(u8 v, volatile void __iomem *addr)
42 {
43 	*(volatile u8 __force *)addr = v;
44 }
__raw_writew(u16 v,volatile void __iomem * addr)45 static inline void __raw_writew(u16 v, volatile void __iomem *addr)
46 {
47 	*(volatile u16 __force *)addr = v;
48 }
__raw_writel(u32 v,volatile void __iomem * addr)49 static inline void __raw_writel(u32 v, volatile void __iomem *addr)
50 {
51 	*(volatile u32 __force *)addr = v;
52 }
53 
__raw_readb(const volatile void __iomem * addr)54 static inline u8 __raw_readb(const volatile void __iomem *addr)
55 {
56 	return *(const volatile u8 __force *)addr;
57 }
__raw_readw(const volatile void __iomem * addr)58 static inline u16 __raw_readw(const volatile void __iomem *addr)
59 {
60 	return *(const volatile u16 __force *)addr;
61 }
__raw_readl(const volatile void __iomem * addr)62 static inline u32 __raw_readl(const volatile void __iomem *addr)
63 {
64 	return *(const volatile u32 __force *)addr;
65 }
66 
67 /* Convert I/O port address to virtual address */
68 #ifndef __io
69 # define __io(p)	((void *)phys_to_uncached(p))
70 #endif
71 
72 /*
73  * Not really sure about the best way to slow down I/O on
74  * AVR32. Defining it as a no-op until we have an actual test case.
75  */
76 #define SLOW_DOWN_IO	do { } while (0)
77 
78 #define __BUILD_MEMORY_SINGLE(pfx, bwl, type)				\
79 static inline void							\
80 pfx##write##bwl(type val, volatile void __iomem *addr)			\
81 {									\
82 	volatile type *__addr;						\
83 	type __val;							\
84 									\
85 	__addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr));	\
86 	__val = pfx##ioswab##bwl(__addr, val);				\
87 									\
88 	BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));		\
89 									\
90 	*__addr = __val;						\
91 }									\
92 									\
93 static inline type pfx##read##bwl(const volatile void __iomem *addr)	\
94 {									\
95 	volatile type *__addr;						\
96 	type __val;							\
97 									\
98 	__addr = (void *)__swizzle_addr_##bwl((unsigned long)(addr));	\
99 									\
100 	BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));		\
101 									\
102 	__val = *__addr;						\
103 	return pfx##ioswab##bwl(__addr, __val);				\
104 }
105 
106 #define __BUILD_IOPORT_SINGLE(pfx, bwl, type, p, slow)			\
107 static inline void pfx##out##bwl##p(type val, unsigned long port)	\
108 {									\
109 	volatile type *__addr;						\
110 	type __val;							\
111 									\
112 	__addr = __io(__swizzle_addr_##bwl(port));			\
113 	__val = pfx##ioswab##bwl(__addr, val);				\
114 									\
115 	BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));		\
116 									\
117 	*__addr = __val;						\
118 	slow;								\
119 }									\
120 									\
121 static inline type pfx##in##bwl##p(unsigned long port)			\
122 {									\
123 	volatile type *__addr;						\
124 	type __val;							\
125 									\
126 	__addr = __io(__swizzle_addr_##bwl(port));			\
127 									\
128 	BUILD_BUG_ON(sizeof(type) > sizeof(unsigned long));		\
129 									\
130 	__val = *__addr;						\
131 	slow;								\
132 									\
133 	return pfx##ioswab##bwl(__addr, __val);				\
134 }
135 
136 #define __BUILD_MEMORY_PFX(bus, bwl, type)				\
137 	__BUILD_MEMORY_SINGLE(bus, bwl, type)
138 
139 #define BUILDIO_MEM(bwl, type)						\
140 	__BUILD_MEMORY_PFX(, bwl, type)					\
141 	__BUILD_MEMORY_PFX(__mem_, bwl, type)
142 
143 #define __BUILD_IOPORT_PFX(bus, bwl, type)				\
144 	__BUILD_IOPORT_SINGLE(bus, bwl, type, ,)			\
145 	__BUILD_IOPORT_SINGLE(bus, bwl, type, _p, SLOW_DOWN_IO)
146 
147 #define BUILDIO_IOPORT(bwl, type)					\
148 	__BUILD_IOPORT_PFX(, bwl, type)					\
149 	__BUILD_IOPORT_PFX(__mem_, bwl, type)
150 
BUILDIO_MEM(b,u8)151 BUILDIO_MEM(b, u8)
152 BUILDIO_MEM(w, u16)
153 BUILDIO_MEM(l, u32)
154 
155 BUILDIO_IOPORT(b, u8)
156 BUILDIO_IOPORT(w, u16)
157 BUILDIO_IOPORT(l, u32)
158 
159 #define readb_relaxed			readb
160 #define readw_relaxed			readw
161 #define readl_relaxed			readl
162 
163 #define readb_be			__raw_readb
164 #define readw_be			__raw_readw
165 #define readl_be			__raw_readl
166 
167 #define writeb_be			__raw_writeb
168 #define writew_be			__raw_writew
169 #define writel_be			__raw_writel
170 
171 #define __BUILD_MEMORY_STRING(bwl, type)				\
172 static inline void writes##bwl(volatile void __iomem *addr,		\
173 			       const void *data, unsigned int count)	\
174 {									\
175 	const type *__data = data;					\
176 									\
177 	while (count--)							\
178 		__mem_write##bwl(*__data++, addr);			\
179 }									\
180 									\
181 static inline void reads##bwl(const volatile void __iomem *addr,	\
182 			      void *data, unsigned int count)		\
183 {									\
184 	type *__data = data;						\
185 									\
186 	while (count--)							\
187 		*__data++ = __mem_read##bwl(addr);			\
188 }
189 
190 #define __BUILD_IOPORT_STRING(bwl, type)				\
191 static inline void outs##bwl(unsigned long port, const void *data,	\
192 			     unsigned int count)			\
193 {									\
194 	const type *__data = data;					\
195 									\
196 	while (count--)							\
197 		__mem_out##bwl(*__data++, port);			\
198 }									\
199 									\
200 static inline void ins##bwl(unsigned long port, void *data,		\
201 			   unsigned int count)				\
202 {									\
203 	type *__data = data;						\
204 									\
205 	while (count--)							\
206 		*__data++ = __mem_in##bwl(port);			\
207 }
208 
209 #define BUILDSTRING(bwl, type)						\
210 	__BUILD_MEMORY_STRING(bwl, type)				\
211 	__BUILD_IOPORT_STRING(bwl, type)
212 
213 BUILDSTRING(b, u8)
214 BUILDSTRING(w, u16)
215 BUILDSTRING(l, u32)
216 
217 /*
218  * io{read,write}{8,16,32} macros in both le (for PCI style consumers) and native be
219  */
220 #ifndef ioread8
221 
222 #define ioread8(p)		((unsigned int)readb(p))
223 
224 #define ioread16(p)		((unsigned int)readw(p))
225 #define ioread16be(p)		((unsigned int)__raw_readw(p))
226 
227 #define ioread32(p)		((unsigned int)readl(p))
228 #define ioread32be(p)		((unsigned int)__raw_readl(p))
229 
230 #define iowrite8(v,p)		writeb(v, p)
231 
232 #define iowrite16(v,p)		writew(v, p)
233 #define iowrite16be(v,p)	__raw_writew(v, p)
234 
235 #define iowrite32(v,p)		writel(v, p)
236 #define iowrite32be(v,p)	__raw_writel(v, p)
237 
238 #define ioread8_rep(p,d,c)	readsb(p,d,c)
239 #define ioread16_rep(p,d,c)	readsw(p,d,c)
240 #define ioread32_rep(p,d,c)	readsl(p,d,c)
241 
242 #define iowrite8_rep(p,s,c)	writesb(p,s,c)
243 #define iowrite16_rep(p,s,c)	writesw(p,s,c)
244 #define iowrite32_rep(p,s,c)	writesl(p,s,c)
245 
246 #endif
247 
248 static inline void memcpy_fromio(void * to, const volatile void __iomem *from,
249 				 unsigned long count)
250 {
251 	memcpy(to, (const void __force *)from, count);
252 }
253 
memcpy_toio(volatile void __iomem * to,const void * from,unsigned long count)254 static inline void  memcpy_toio(volatile void __iomem *to, const void * from,
255 				unsigned long count)
256 {
257 	memcpy((void __force *)to, from, count);
258 }
259 
memset_io(volatile void __iomem * addr,unsigned char val,unsigned long count)260 static inline void memset_io(volatile void __iomem *addr, unsigned char val,
261 			     unsigned long count)
262 {
263 	memset((void __force *)addr, val, count);
264 }
265 
266 #define mmiowb()
267 
268 #define IO_SPACE_LIMIT	0xffffffff
269 
270 extern void __iomem *__ioremap(unsigned long offset, size_t size,
271 			       unsigned long flags);
272 extern void __iounmap(void __iomem *addr);
273 
274 /*
275  * ioremap	-   map bus memory into CPU space
276  * @offset	bus address of the memory
277  * @size	size of the resource to map
278  *
279  * ioremap performs a platform specific sequence of operations to make
280  * bus memory CPU accessible via the readb/.../writel functions and
281  * the other mmio helpers. The returned address is not guaranteed to
282  * be usable directly as a virtual address.
283  */
284 #define ioremap(offset, size)			\
285 	__ioremap((offset), (size), 0)
286 
287 #define ioremap_nocache(offset, size)		\
288 	__ioremap((offset), (size), 0)
289 
290 #define iounmap(addr)				\
291 	__iounmap(addr)
292 
293 #define cached(addr) P1SEGADDR(addr)
294 #define uncached(addr) P2SEGADDR(addr)
295 
296 #define virt_to_bus virt_to_phys
297 #define bus_to_virt phys_to_virt
298 #define page_to_bus page_to_phys
299 #define bus_to_page phys_to_page
300 
301 /*
302  * Create a virtual mapping cookie for an IO port range.  There exists
303  * no such thing as port-based I/O on AVR32, so a regular ioremap()
304  * should do what we need.
305  */
306 #define ioport_map(port, nr)	ioremap(port, nr)
307 #define ioport_unmap(port)	iounmap(port)
308 
309 /*
310  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
311  * access
312  */
313 #define xlate_dev_mem_ptr(p)    __va(p)
314 
315 /*
316  * Convert a virtual cached pointer to an uncached pointer
317  */
318 #define xlate_dev_kmem_ptr(p)   p
319 
320 #endif /* __ASM_AVR32_IO_H */
321