• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef __ASM_SH_IO_H
2 #define __ASM_SH_IO_H
3 
4 /*
5  * Convention:
6  *    read{b,w,l,q}/write{b,w,l,q} are for PCI,
7  *    while in{b,w,l}/out{b,w,l} are for ISA
8  *
9  * In addition we have 'pausing' versions: in{b,w,l}_p/out{b,w,l}_p
10  * and 'string' versions: ins{b,w,l}/outs{b,w,l}
11  *
12  * While read{b,w,l,q} and write{b,w,l,q} contain memory barriers
13  * automatically, there are also __raw versions, which do not.
14  */
15 #include <linux/errno.h>
16 #include <asm/cache.h>
17 #include <asm/addrspace.h>
18 #include <asm/machvec.h>
19 #include <asm/pgtable.h>
20 #include <asm-generic/iomap.h>
21 
22 #ifdef __KERNEL__
23 #define __IO_PREFIX     generic
24 #include <asm/io_generic.h>
25 #include <asm/io_trapped.h>
26 #include <mach/mangle-port.h>
27 
28 #define __raw_writeb(v,a)	(__chk_io_ptr(a), *(volatile u8  __force *)(a) = (v))
29 #define __raw_writew(v,a)	(__chk_io_ptr(a), *(volatile u16 __force *)(a) = (v))
30 #define __raw_writel(v,a)	(__chk_io_ptr(a), *(volatile u32 __force *)(a) = (v))
31 #define __raw_writeq(v,a)	(__chk_io_ptr(a), *(volatile u64 __force *)(a) = (v))
32 
33 #define __raw_readb(a)		(__chk_io_ptr(a), *(volatile u8  __force *)(a))
34 #define __raw_readw(a)		(__chk_io_ptr(a), *(volatile u16 __force *)(a))
35 #define __raw_readl(a)		(__chk_io_ptr(a), *(volatile u32 __force *)(a))
36 #define __raw_readq(a)		(__chk_io_ptr(a), *(volatile u64 __force *)(a))
37 
38 #define readb_relaxed(c)	({ u8  __v = ioswabb(__raw_readb(c)); __v; })
39 #define readw_relaxed(c)	({ u16 __v = ioswabw(__raw_readw(c)); __v; })
40 #define readl_relaxed(c)	({ u32 __v = ioswabl(__raw_readl(c)); __v; })
41 #define readq_relaxed(c)	({ u64 __v = ioswabq(__raw_readq(c)); __v; })
42 
43 #define writeb_relaxed(v,c)	((void)__raw_writeb((__force  u8)ioswabb(v),c))
44 #define writew_relaxed(v,c)	((void)__raw_writew((__force u16)ioswabw(v),c))
45 #define writel_relaxed(v,c)	((void)__raw_writel((__force u32)ioswabl(v),c))
46 #define writeq_relaxed(v,c)	((void)__raw_writeq((__force u64)ioswabq(v),c))
47 
48 #define readb(a)		({ u8  r_ = readb_relaxed(a); rmb(); r_; })
49 #define readw(a)		({ u16 r_ = readw_relaxed(a); rmb(); r_; })
50 #define readl(a)		({ u32 r_ = readl_relaxed(a); rmb(); r_; })
51 #define readq(a)		({ u64 r_ = readq_relaxed(a); rmb(); r_; })
52 
53 #define writeb(v,a)		({ wmb(); writeb_relaxed((v),(a)); })
54 #define writew(v,a)		({ wmb(); writew_relaxed((v),(a)); })
55 #define writel(v,a)		({ wmb(); writel_relaxed((v),(a)); })
56 #define writeq(v,a)		({ wmb(); writeq_relaxed((v),(a)); })
57 
58 #define readsb(p,d,l)		__raw_readsb(p,d,l)
59 #define readsw(p,d,l)		__raw_readsw(p,d,l)
60 #define readsl(p,d,l)		__raw_readsl(p,d,l)
61 
62 #define writesb(p,d,l)		__raw_writesb(p,d,l)
63 #define writesw(p,d,l)		__raw_writesw(p,d,l)
64 #define writesl(p,d,l)		__raw_writesl(p,d,l)
65 
66 #define __BUILD_UNCACHED_IO(bwlq, type)					\
67 static inline type read##bwlq##_uncached(unsigned long addr)		\
68 {									\
69 	type ret;							\
70 	jump_to_uncached();						\
71 	ret = __raw_read##bwlq(addr);					\
72 	back_to_cached();						\
73 	return ret;							\
74 }									\
75 									\
76 static inline void write##bwlq##_uncached(type v, unsigned long addr)	\
77 {									\
78 	jump_to_uncached();						\
79 	__raw_write##bwlq(v, addr);					\
80 	back_to_cached();						\
81 }
82 
83 __BUILD_UNCACHED_IO(b, u8)
84 __BUILD_UNCACHED_IO(w, u16)
85 __BUILD_UNCACHED_IO(l, u32)
86 __BUILD_UNCACHED_IO(q, u64)
87 
88 #define __BUILD_MEMORY_STRING(pfx, bwlq, type)				\
89 									\
90 static inline void							\
91 pfx##writes##bwlq(volatile void __iomem *mem, const void *addr,		\
92 		  unsigned int count)					\
93 {									\
94 	const volatile type *__addr = addr;				\
95 									\
96 	while (count--) {						\
97 		__raw_write##bwlq(*__addr, mem);			\
98 		__addr++;						\
99 	}								\
100 }									\
101 									\
102 static inline void pfx##reads##bwlq(volatile void __iomem *mem,		\
103 				    void *addr, unsigned int count)	\
104 {									\
105 	volatile type *__addr = addr;					\
106 									\
107 	while (count--) {						\
108 		*__addr = __raw_read##bwlq(mem);			\
109 		__addr++;						\
110 	}								\
111 }
112 
113 __BUILD_MEMORY_STRING(__raw_, b, u8)
114 __BUILD_MEMORY_STRING(__raw_, w, u16)
115 
116 #ifdef CONFIG_SUPERH32
117 void __raw_writesl(void __iomem *addr, const void *data, int longlen);
118 void __raw_readsl(const void __iomem *addr, void *data, int longlen);
119 #else
120 __BUILD_MEMORY_STRING(__raw_, l, u32)
121 #endif
122 
123 __BUILD_MEMORY_STRING(__raw_, q, u64)
124 
125 #ifdef CONFIG_HAS_IOPORT
126 
127 /*
128  * Slowdown I/O port space accesses for antique hardware.
129  */
130 #undef CONF_SLOWDOWN_IO
131 
132 /*
133  * On SuperH I/O ports are memory mapped, so we access them using normal
134  * load/store instructions. sh_io_port_base is the virtual address to
135  * which all ports are being mapped.
136  */
137 extern const unsigned long sh_io_port_base;
138 
__set_io_port_base(unsigned long pbase)139 static inline void __set_io_port_base(unsigned long pbase)
140 {
141 	*(unsigned long *)&sh_io_port_base = pbase;
142 	barrier();
143 }
144 
145 #ifdef CONFIG_GENERIC_IOMAP
146 #define __ioport_map ioport_map
147 #else
148 extern void __iomem *__ioport_map(unsigned long addr, unsigned int size);
149 #endif
150 
151 #ifdef CONF_SLOWDOWN_IO
152 #define SLOW_DOWN_IO __raw_readw(sh_io_port_base)
153 #else
154 #define SLOW_DOWN_IO
155 #endif
156 
157 #define __BUILD_IOPORT_SINGLE(pfx, bwlq, type, p, slow)			\
158 									\
159 static inline void pfx##out##bwlq##p(type val, unsigned long port)	\
160 {									\
161 	volatile type *__addr;						\
162 									\
163 	__addr = __ioport_map(port, sizeof(type));			\
164 	*__addr = val;							\
165 	slow;								\
166 }									\
167 									\
168 static inline type pfx##in##bwlq##p(unsigned long port)			\
169 {									\
170 	volatile type *__addr;						\
171 	type __val;							\
172 									\
173 	__addr = __ioport_map(port, sizeof(type));			\
174 	__val = *__addr;						\
175 	slow;								\
176 									\
177 	return __val;							\
178 }
179 
180 #define __BUILD_IOPORT_PFX(bus, bwlq, type)				\
181 	__BUILD_IOPORT_SINGLE(bus, bwlq, type, ,)			\
182 	__BUILD_IOPORT_SINGLE(bus, bwlq, type, _p, SLOW_DOWN_IO)
183 
184 #define BUILDIO_IOPORT(bwlq, type)					\
185 	__BUILD_IOPORT_PFX(, bwlq, type)
186 
187 BUILDIO_IOPORT(b, u8)
188 BUILDIO_IOPORT(w, u16)
189 BUILDIO_IOPORT(l, u32)
190 BUILDIO_IOPORT(q, u64)
191 
192 #define __BUILD_IOPORT_STRING(bwlq, type)				\
193 									\
194 static inline void outs##bwlq(unsigned long port, const void *addr,	\
195 			      unsigned int count)			\
196 {									\
197 	const volatile type *__addr = addr;				\
198 									\
199 	while (count--) {						\
200 		out##bwlq(*__addr, port);				\
201 		__addr++;						\
202 	}								\
203 }									\
204 									\
205 static inline void ins##bwlq(unsigned long port, void *addr,		\
206 			     unsigned int count)			\
207 {									\
208 	volatile type *__addr = addr;					\
209 									\
210 	while (count--) {						\
211 		*__addr = in##bwlq(port);				\
212 		__addr++;						\
213 	}								\
214 }
215 
216 __BUILD_IOPORT_STRING(b, u8)
217 __BUILD_IOPORT_STRING(w, u16)
218 __BUILD_IOPORT_STRING(l, u32)
219 __BUILD_IOPORT_STRING(q, u64)
220 
221 #endif
222 
223 #define IO_SPACE_LIMIT 0xffffffff
224 
225 /* synco on SH-4A, otherwise a nop */
226 #define mmiowb()		wmb()
227 
228 /* We really want to try and get these to memcpy etc */
229 void memcpy_fromio(void *, const volatile void __iomem *, unsigned long);
230 void memcpy_toio(volatile void __iomem *, const void *, unsigned long);
231 void memset_io(volatile void __iomem *, int, unsigned long);
232 
233 /* Quad-word real-mode I/O, don't ask.. */
234 unsigned long long peek_real_address_q(unsigned long long addr);
235 unsigned long long poke_real_address_q(unsigned long long addr,
236 				       unsigned long long val);
237 
238 #if !defined(CONFIG_MMU)
239 #define virt_to_phys(address)	((unsigned long)(address))
240 #define phys_to_virt(address)	((void *)(address))
241 #else
242 #define virt_to_phys(address)	(__pa(address))
243 #define phys_to_virt(address)	(__va(address))
244 #endif
245 
246 /*
247  * On 32-bit SH, we traditionally have the whole physical address space
248  * mapped at all times (as MIPS does), so "ioremap()" and "iounmap()" do
249  * not need to do anything but place the address in the proper segment.
250  * This is true for P1 and P2 addresses, as well as some P3 ones.
251  * However, most of the P3 addresses and newer cores using extended
252  * addressing need to map through page tables, so the ioremap()
253  * implementation becomes a bit more complicated.
254  *
255  * See arch/sh/mm/ioremap.c for additional notes on this.
256  *
257  * We cheat a bit and always return uncachable areas until we've fixed
258  * the drivers to handle caching properly.
259  *
260  * On the SH-5 the concept of segmentation in the 1:1 PXSEG sense simply
261  * doesn't exist, so everything must go through page tables.
262  */
263 #ifdef CONFIG_MMU
264 void __iomem *__ioremap_caller(phys_addr_t offset, unsigned long size,
265 			       pgprot_t prot, void *caller);
266 void __iounmap(void __iomem *addr);
267 
268 static inline void __iomem *
__ioremap(phys_addr_t offset,unsigned long size,pgprot_t prot)269 __ioremap(phys_addr_t offset, unsigned long size, pgprot_t prot)
270 {
271 	return __ioremap_caller(offset, size, prot, __builtin_return_address(0));
272 }
273 
274 static inline void __iomem *
__ioremap_29bit(phys_addr_t offset,unsigned long size,pgprot_t prot)275 __ioremap_29bit(phys_addr_t offset, unsigned long size, pgprot_t prot)
276 {
277 #ifdef CONFIG_29BIT
278 	phys_addr_t last_addr = offset + size - 1;
279 
280 	/*
281 	 * For P1 and P2 space this is trivial, as everything is already
282 	 * mapped. Uncached access for P1 addresses are done through P2.
283 	 * In the P3 case or for addresses outside of the 29-bit space,
284 	 * mapping must be done by the PMB or by using page tables.
285 	 */
286 	if (likely(PXSEG(offset) < P3SEG && PXSEG(last_addr) < P3SEG)) {
287 		u64 flags = pgprot_val(prot);
288 
289 		/*
290 		 * Anything using the legacy PTEA space attributes needs
291 		 * to be kicked down to page table mappings.
292 		 */
293 		if (unlikely(flags & _PAGE_PCC_MASK))
294 			return NULL;
295 		if (unlikely(flags & _PAGE_CACHABLE))
296 			return (void __iomem *)P1SEGADDR(offset);
297 
298 		return (void __iomem *)P2SEGADDR(offset);
299 	}
300 
301 	/* P4 above the store queues are always mapped. */
302 	if (unlikely(offset >= P3_ADDR_MAX))
303 		return (void __iomem *)P4SEGADDR(offset);
304 #endif
305 
306 	return NULL;
307 }
308 
309 static inline void __iomem *
__ioremap_mode(phys_addr_t offset,unsigned long size,pgprot_t prot)310 __ioremap_mode(phys_addr_t offset, unsigned long size, pgprot_t prot)
311 {
312 	void __iomem *ret;
313 
314 	ret = __ioremap_trapped(offset, size);
315 	if (ret)
316 		return ret;
317 
318 	ret = __ioremap_29bit(offset, size, prot);
319 	if (ret)
320 		return ret;
321 
322 	return __ioremap(offset, size, prot);
323 }
324 #else
325 #define __ioremap(offset, size, prot)		((void __iomem *)(offset))
326 #define __ioremap_mode(offset, size, prot)	((void __iomem *)(offset))
327 #define __iounmap(addr)				do { } while (0)
328 #endif /* CONFIG_MMU */
329 
ioremap(phys_addr_t offset,unsigned long size)330 static inline void __iomem *ioremap(phys_addr_t offset, unsigned long size)
331 {
332 	return __ioremap_mode(offset, size, PAGE_KERNEL_NOCACHE);
333 }
334 
335 static inline void __iomem *
ioremap_cache(phys_addr_t offset,unsigned long size)336 ioremap_cache(phys_addr_t offset, unsigned long size)
337 {
338 	return __ioremap_mode(offset, size, PAGE_KERNEL);
339 }
340 
341 #ifdef CONFIG_HAVE_IOREMAP_PROT
342 static inline void __iomem *
ioremap_prot(phys_addr_t offset,unsigned long size,unsigned long flags)343 ioremap_prot(phys_addr_t offset, unsigned long size, unsigned long flags)
344 {
345 	return __ioremap_mode(offset, size, __pgprot(flags));
346 }
347 #endif
348 
349 #ifdef CONFIG_IOREMAP_FIXED
350 extern void __iomem *ioremap_fixed(phys_addr_t, unsigned long, pgprot_t);
351 extern int iounmap_fixed(void __iomem *);
352 extern void ioremap_fixed_init(void);
353 #else
354 static inline void __iomem *
ioremap_fixed(phys_addr_t phys_addr,unsigned long size,pgprot_t prot)355 ioremap_fixed(phys_addr_t phys_addr, unsigned long size, pgprot_t prot)
356 {
357 	BUG();
358 	return NULL;
359 }
360 
ioremap_fixed_init(void)361 static inline void ioremap_fixed_init(void) { }
iounmap_fixed(void __iomem * addr)362 static inline int iounmap_fixed(void __iomem *addr) { return -EINVAL; }
363 #endif
364 
365 #define ioremap_nocache	ioremap
366 #define iounmap		__iounmap
367 
368 /*
369  * Convert a physical pointer to a virtual kernel pointer for /dev/mem
370  * access
371  */
372 #define xlate_dev_mem_ptr(p)	__va(p)
373 
374 /*
375  * Convert a virtual cached pointer to an uncached pointer
376  */
377 #define xlate_dev_kmem_ptr(p)	p
378 
379 #define ARCH_HAS_VALID_PHYS_ADDR_RANGE
380 int valid_phys_addr_range(unsigned long addr, size_t size);
381 int valid_mmap_phys_addr_range(unsigned long pfn, size_t size);
382 
383 #endif /* __KERNEL__ */
384 
385 #endif /* __ASM_SH_IO_H */
386