1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 /* Generic I/O port emulation.
3 *
4 * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
5 * Written by David Howells (dhowells@redhat.com)
6 */
7 #ifndef __ASM_GENERIC_IO_H
8 #define __ASM_GENERIC_IO_H
9
10 #include <asm/page.h> /* I/O is all done through memory accesses */
11 #include <linux/string.h> /* for memset() and memcpy() */
12 #include <linux/types.h>
13
14 #ifdef CONFIG_GENERIC_IOMAP
15 #include <asm-generic/iomap.h>
16 #endif
17
18 #include <asm/mmiowb.h>
19 #include <asm-generic/pci_iomap.h>
20
21 #ifndef __io_br
22 #define __io_br() barrier()
23 #endif
24
25 /* prevent prefetching of coherent DMA data ahead of a dma-complete */
26 #ifndef __io_ar
27 #ifdef rmb
28 #define __io_ar(v) rmb()
29 #else
30 #define __io_ar(v) barrier()
31 #endif
32 #endif
33
34 /* flush writes to coherent DMA data before possibly triggering a DMA read */
35 #ifndef __io_bw
36 #ifdef wmb
37 #define __io_bw() wmb()
38 #else
39 #define __io_bw() barrier()
40 #endif
41 #endif
42
43 /* serialize device access against a spin_unlock, usually handled there. */
44 #ifndef __io_aw
45 #define __io_aw() mmiowb_set_pending()
46 #endif
47
48 #ifndef __io_pbw
49 #define __io_pbw() __io_bw()
50 #endif
51
52 #ifndef __io_paw
53 #define __io_paw() __io_aw()
54 #endif
55
56 #ifndef __io_pbr
57 #define __io_pbr() __io_br()
58 #endif
59
60 #ifndef __io_par
61 #define __io_par(v) __io_ar(v)
62 #endif
63
64 #if IS_ENABLED(CONFIG_TRACE_MMIO_ACCESS) && !(defined(__DISABLE_TRACE_MMIO__))
65 #include <linux/tracepoint-defs.h>
66
67 DECLARE_TRACEPOINT(rwmmio_write);
68 DECLARE_TRACEPOINT(rwmmio_post_write);
69 DECLARE_TRACEPOINT(rwmmio_read);
70 DECLARE_TRACEPOINT(rwmmio_post_read);
71
72 void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
73 unsigned long caller_addr);
74 void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
75 unsigned long caller_addr);
76 void log_read_mmio(u8 width, const volatile void __iomem *addr,
77 unsigned long caller_addr);
78 void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
79 unsigned long caller_addr);
80
81 #else
82
log_write_mmio(u64 val,u8 width,volatile void __iomem * addr,unsigned long caller_addr)83 static inline void log_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
84 unsigned long caller_addr) {}
log_post_write_mmio(u64 val,u8 width,volatile void __iomem * addr,unsigned long caller_addr)85 static inline void log_post_write_mmio(u64 val, u8 width, volatile void __iomem *addr,
86 unsigned long caller_addr) {}
log_read_mmio(u8 width,const volatile void __iomem * addr,unsigned long caller_addr)87 static inline void log_read_mmio(u8 width, const volatile void __iomem *addr,
88 unsigned long caller_addr) {}
log_post_read_mmio(u64 val,u8 width,const volatile void __iomem * addr,unsigned long caller_addr)89 static inline void log_post_read_mmio(u64 val, u8 width, const volatile void __iomem *addr,
90 unsigned long caller_addr) {}
91
92 #endif /* CONFIG_TRACE_MMIO_ACCESS */
93
94 /*
95 * __raw_{read,write}{b,w,l,q}() access memory in native endianness.
96 *
97 * On some architectures memory mapped IO needs to be accessed differently.
98 * On the simple architectures, we just read/write the memory location
99 * directly.
100 */
101
102 #ifndef __raw_readb
103 #define __raw_readb __raw_readb
__raw_readb(const volatile void __iomem * addr)104 static inline u8 __raw_readb(const volatile void __iomem *addr)
105 {
106 return *(const volatile u8 __force *)addr;
107 }
108 #endif
109
110 #ifndef __raw_readw
111 #define __raw_readw __raw_readw
__raw_readw(const volatile void __iomem * addr)112 static inline u16 __raw_readw(const volatile void __iomem *addr)
113 {
114 return *(const volatile u16 __force *)addr;
115 }
116 #endif
117
118 #ifndef __raw_readl
119 #define __raw_readl __raw_readl
__raw_readl(const volatile void __iomem * addr)120 static inline u32 __raw_readl(const volatile void __iomem *addr)
121 {
122 return *(const volatile u32 __force *)addr;
123 }
124 #endif
125
126 #ifdef CONFIG_64BIT
127 #ifndef __raw_readq
128 #define __raw_readq __raw_readq
__raw_readq(const volatile void __iomem * addr)129 static inline u64 __raw_readq(const volatile void __iomem *addr)
130 {
131 return *(const volatile u64 __force *)addr;
132 }
133 #endif
134 #endif /* CONFIG_64BIT */
135
136 #ifndef __raw_writeb
137 #define __raw_writeb __raw_writeb
__raw_writeb(u8 value,volatile void __iomem * addr)138 static inline void __raw_writeb(u8 value, volatile void __iomem *addr)
139 {
140 *(volatile u8 __force *)addr = value;
141 }
142 #endif
143
144 #ifndef __raw_writew
145 #define __raw_writew __raw_writew
__raw_writew(u16 value,volatile void __iomem * addr)146 static inline void __raw_writew(u16 value, volatile void __iomem *addr)
147 {
148 *(volatile u16 __force *)addr = value;
149 }
150 #endif
151
152 #ifndef __raw_writel
153 #define __raw_writel __raw_writel
__raw_writel(u32 value,volatile void __iomem * addr)154 static inline void __raw_writel(u32 value, volatile void __iomem *addr)
155 {
156 *(volatile u32 __force *)addr = value;
157 }
158 #endif
159
160 #ifdef CONFIG_64BIT
161 #ifndef __raw_writeq
162 #define __raw_writeq __raw_writeq
__raw_writeq(u64 value,volatile void __iomem * addr)163 static inline void __raw_writeq(u64 value, volatile void __iomem *addr)
164 {
165 *(volatile u64 __force *)addr = value;
166 }
167 #endif
168 #endif /* CONFIG_64BIT */
169
170 /*
171 * {read,write}{b,w,l,q}() access little endian memory and return result in
172 * native endianness.
173 */
174
175 #ifndef readb
176 #define readb readb
readb(const volatile void __iomem * addr)177 static inline u8 readb(const volatile void __iomem *addr)
178 {
179 u8 val;
180
181 log_read_mmio(8, addr, _THIS_IP_);
182 __io_br();
183 val = __raw_readb(addr);
184 __io_ar(val);
185 log_post_read_mmio(val, 8, addr, _THIS_IP_);
186 return val;
187 }
188 #endif
189
190 #ifndef readw
191 #define readw readw
readw(const volatile void __iomem * addr)192 static inline u16 readw(const volatile void __iomem *addr)
193 {
194 u16 val;
195
196 log_read_mmio(16, addr, _THIS_IP_);
197 __io_br();
198 val = __le16_to_cpu((__le16 __force)__raw_readw(addr));
199 __io_ar(val);
200 log_post_read_mmio(val, 16, addr, _THIS_IP_);
201 return val;
202 }
203 #endif
204
205 #ifndef readl
206 #define readl readl
readl(const volatile void __iomem * addr)207 static inline u32 readl(const volatile void __iomem *addr)
208 {
209 u32 val;
210
211 log_read_mmio(32, addr, _THIS_IP_);
212 __io_br();
213 val = __le32_to_cpu((__le32 __force)__raw_readl(addr));
214 __io_ar(val);
215 log_post_read_mmio(val, 32, addr, _THIS_IP_);
216 return val;
217 }
218 #endif
219
220 #ifdef CONFIG_64BIT
221 #ifndef readq
222 #define readq readq
readq(const volatile void __iomem * addr)223 static inline u64 readq(const volatile void __iomem *addr)
224 {
225 u64 val;
226
227 log_read_mmio(64, addr, _THIS_IP_);
228 __io_br();
229 val = __le64_to_cpu((__le64 __force)__raw_readq(addr));
230 __io_ar(val);
231 log_post_read_mmio(val, 64, addr, _THIS_IP_);
232 return val;
233 }
234 #endif
235 #endif /* CONFIG_64BIT */
236
237 #ifndef writeb
238 #define writeb writeb
writeb(u8 value,volatile void __iomem * addr)239 static inline void writeb(u8 value, volatile void __iomem *addr)
240 {
241 log_write_mmio(value, 8, addr, _THIS_IP_);
242 __io_bw();
243 __raw_writeb(value, addr);
244 __io_aw();
245 log_post_write_mmio(value, 8, addr, _THIS_IP_);
246 }
247 #endif
248
249 #ifndef writew
250 #define writew writew
writew(u16 value,volatile void __iomem * addr)251 static inline void writew(u16 value, volatile void __iomem *addr)
252 {
253 log_write_mmio(value, 16, addr, _THIS_IP_);
254 __io_bw();
255 __raw_writew((u16 __force)cpu_to_le16(value), addr);
256 __io_aw();
257 log_post_write_mmio(value, 16, addr, _THIS_IP_);
258 }
259 #endif
260
261 #ifndef writel
262 #define writel writel
writel(u32 value,volatile void __iomem * addr)263 static inline void writel(u32 value, volatile void __iomem *addr)
264 {
265 log_write_mmio(value, 32, addr, _THIS_IP_);
266 __io_bw();
267 __raw_writel((u32 __force)__cpu_to_le32(value), addr);
268 __io_aw();
269 log_post_write_mmio(value, 32, addr, _THIS_IP_);
270 }
271 #endif
272
273 #ifdef CONFIG_64BIT
274 #ifndef writeq
275 #define writeq writeq
writeq(u64 value,volatile void __iomem * addr)276 static inline void writeq(u64 value, volatile void __iomem *addr)
277 {
278 log_write_mmio(value, 64, addr, _THIS_IP_);
279 __io_bw();
280 __raw_writeq((u64 __force)__cpu_to_le64(value), addr);
281 __io_aw();
282 log_post_write_mmio(value, 64, addr, _THIS_IP_);
283 }
284 #endif
285 #endif /* CONFIG_64BIT */
286
287 /*
288 * {read,write}{b,w,l,q}_relaxed() are like the regular version, but
289 * are not guaranteed to provide ordering against spinlocks or memory
290 * accesses.
291 */
292 #ifndef readb_relaxed
293 #define readb_relaxed readb_relaxed
readb_relaxed(const volatile void __iomem * addr)294 static inline u8 readb_relaxed(const volatile void __iomem *addr)
295 {
296 u8 val;
297
298 log_read_mmio(8, addr, _THIS_IP_);
299 val = __raw_readb(addr);
300 log_post_read_mmio(val, 8, addr, _THIS_IP_);
301 return val;
302 }
303 #endif
304
305 #ifndef readw_relaxed
306 #define readw_relaxed readw_relaxed
readw_relaxed(const volatile void __iomem * addr)307 static inline u16 readw_relaxed(const volatile void __iomem *addr)
308 {
309 u16 val;
310
311 log_read_mmio(16, addr, _THIS_IP_);
312 val = __le16_to_cpu(__raw_readw(addr));
313 log_post_read_mmio(val, 16, addr, _THIS_IP_);
314 return val;
315 }
316 #endif
317
318 #ifndef readl_relaxed
319 #define readl_relaxed readl_relaxed
readl_relaxed(const volatile void __iomem * addr)320 static inline u32 readl_relaxed(const volatile void __iomem *addr)
321 {
322 u32 val;
323
324 log_read_mmio(32, addr, _THIS_IP_);
325 val = __le32_to_cpu(__raw_readl(addr));
326 log_post_read_mmio(val, 32, addr, _THIS_IP_);
327 return val;
328 }
329 #endif
330
331 #if defined(readq) && !defined(readq_relaxed)
332 #define readq_relaxed readq_relaxed
readq_relaxed(const volatile void __iomem * addr)333 static inline u64 readq_relaxed(const volatile void __iomem *addr)
334 {
335 u64 val;
336
337 log_read_mmio(64, addr, _THIS_IP_);
338 val =__le64_to_cpu(__raw_readq(addr));
339 log_post_read_mmio(val, 64, addr, _THIS_IP_);
340 return val;
341 }
342 #endif
343
344 #ifndef writeb_relaxed
345 #define writeb_relaxed writeb_relaxed
writeb_relaxed(u8 value,volatile void __iomem * addr)346 static inline void writeb_relaxed(u8 value, volatile void __iomem *addr)
347 {
348 log_write_mmio(value, 8, addr, _THIS_IP_);
349 __raw_writeb(value, addr);
350 log_post_write_mmio(value, 8, addr, _THIS_IP_);
351 }
352 #endif
353
354 #ifndef writew_relaxed
355 #define writew_relaxed writew_relaxed
writew_relaxed(u16 value,volatile void __iomem * addr)356 static inline void writew_relaxed(u16 value, volatile void __iomem *addr)
357 {
358 log_write_mmio(value, 16, addr, _THIS_IP_);
359 __raw_writew(cpu_to_le16(value), addr);
360 log_post_write_mmio(value, 16, addr, _THIS_IP_);
361 }
362 #endif
363
364 #ifndef writel_relaxed
365 #define writel_relaxed writel_relaxed
writel_relaxed(u32 value,volatile void __iomem * addr)366 static inline void writel_relaxed(u32 value, volatile void __iomem *addr)
367 {
368 log_write_mmio(value, 32, addr, _THIS_IP_);
369 __raw_writel(__cpu_to_le32(value), addr);
370 log_post_write_mmio(value, 32, addr, _THIS_IP_);
371 }
372 #endif
373
374 #if defined(writeq) && !defined(writeq_relaxed)
375 #define writeq_relaxed writeq_relaxed
writeq_relaxed(u64 value,volatile void __iomem * addr)376 static inline void writeq_relaxed(u64 value, volatile void __iomem *addr)
377 {
378 log_write_mmio(value, 64, addr, _THIS_IP_);
379 __raw_writeq(__cpu_to_le64(value), addr);
380 log_post_write_mmio(value, 64, addr, _THIS_IP_);
381 }
382 #endif
383
384 /*
385 * {read,write}s{b,w,l,q}() repeatedly access the same memory address in
386 * native endianness in 8-, 16-, 32- or 64-bit chunks (@count times).
387 */
388 #ifndef readsb
389 #define readsb readsb
readsb(const volatile void __iomem * addr,void * buffer,unsigned int count)390 static inline void readsb(const volatile void __iomem *addr, void *buffer,
391 unsigned int count)
392 {
393 if (count) {
394 u8 *buf = buffer;
395
396 do {
397 u8 x = __raw_readb(addr);
398 *buf++ = x;
399 } while (--count);
400 }
401 }
402 #endif
403
404 #ifndef readsw
405 #define readsw readsw
readsw(const volatile void __iomem * addr,void * buffer,unsigned int count)406 static inline void readsw(const volatile void __iomem *addr, void *buffer,
407 unsigned int count)
408 {
409 if (count) {
410 u16 *buf = buffer;
411
412 do {
413 u16 x = __raw_readw(addr);
414 *buf++ = x;
415 } while (--count);
416 }
417 }
418 #endif
419
420 #ifndef readsl
421 #define readsl readsl
readsl(const volatile void __iomem * addr,void * buffer,unsigned int count)422 static inline void readsl(const volatile void __iomem *addr, void *buffer,
423 unsigned int count)
424 {
425 if (count) {
426 u32 *buf = buffer;
427
428 do {
429 u32 x = __raw_readl(addr);
430 *buf++ = x;
431 } while (--count);
432 }
433 }
434 #endif
435
436 #ifdef CONFIG_64BIT
437 #ifndef readsq
438 #define readsq readsq
readsq(const volatile void __iomem * addr,void * buffer,unsigned int count)439 static inline void readsq(const volatile void __iomem *addr, void *buffer,
440 unsigned int count)
441 {
442 if (count) {
443 u64 *buf = buffer;
444
445 do {
446 u64 x = __raw_readq(addr);
447 *buf++ = x;
448 } while (--count);
449 }
450 }
451 #endif
452 #endif /* CONFIG_64BIT */
453
454 #ifndef writesb
455 #define writesb writesb
writesb(volatile void __iomem * addr,const void * buffer,unsigned int count)456 static inline void writesb(volatile void __iomem *addr, const void *buffer,
457 unsigned int count)
458 {
459 if (count) {
460 const u8 *buf = buffer;
461
462 do {
463 __raw_writeb(*buf++, addr);
464 } while (--count);
465 }
466 }
467 #endif
468
469 #ifndef writesw
470 #define writesw writesw
writesw(volatile void __iomem * addr,const void * buffer,unsigned int count)471 static inline void writesw(volatile void __iomem *addr, const void *buffer,
472 unsigned int count)
473 {
474 if (count) {
475 const u16 *buf = buffer;
476
477 do {
478 __raw_writew(*buf++, addr);
479 } while (--count);
480 }
481 }
482 #endif
483
484 #ifndef writesl
485 #define writesl writesl
writesl(volatile void __iomem * addr,const void * buffer,unsigned int count)486 static inline void writesl(volatile void __iomem *addr, const void *buffer,
487 unsigned int count)
488 {
489 if (count) {
490 const u32 *buf = buffer;
491
492 do {
493 __raw_writel(*buf++, addr);
494 } while (--count);
495 }
496 }
497 #endif
498
499 #ifdef CONFIG_64BIT
500 #ifndef writesq
501 #define writesq writesq
writesq(volatile void __iomem * addr,const void * buffer,unsigned int count)502 static inline void writesq(volatile void __iomem *addr, const void *buffer,
503 unsigned int count)
504 {
505 if (count) {
506 const u64 *buf = buffer;
507
508 do {
509 __raw_writeq(*buf++, addr);
510 } while (--count);
511 }
512 }
513 #endif
514 #endif /* CONFIG_64BIT */
515
516 #ifndef PCI_IOBASE
517 #define PCI_IOBASE ((void __iomem *)0)
518 #endif
519
520 #ifndef IO_SPACE_LIMIT
521 #define IO_SPACE_LIMIT 0xffff
522 #endif
523
524 /*
525 * {in,out}{b,w,l}() access little endian I/O. {in,out}{b,w,l}_p() can be
526 * implemented on hardware that needs an additional delay for I/O accesses to
527 * take effect.
528 */
529
530 #if !defined(inb) && !defined(_inb)
531 #define _inb _inb
_inb(unsigned long addr)532 static inline u8 _inb(unsigned long addr)
533 {
534 u8 val;
535
536 __io_pbr();
537 val = __raw_readb(PCI_IOBASE + addr);
538 __io_par(val);
539 return val;
540 }
541 #endif
542
543 #if !defined(inw) && !defined(_inw)
544 #define _inw _inw
_inw(unsigned long addr)545 static inline u16 _inw(unsigned long addr)
546 {
547 u16 val;
548
549 __io_pbr();
550 val = __le16_to_cpu((__le16 __force)__raw_readw(PCI_IOBASE + addr));
551 __io_par(val);
552 return val;
553 }
554 #endif
555
556 #if !defined(inl) && !defined(_inl)
557 #define _inl _inl
_inl(unsigned long addr)558 static inline u32 _inl(unsigned long addr)
559 {
560 u32 val;
561
562 __io_pbr();
563 val = __le32_to_cpu((__le32 __force)__raw_readl(PCI_IOBASE + addr));
564 __io_par(val);
565 return val;
566 }
567 #endif
568
569 #if !defined(outb) && !defined(_outb)
570 #define _outb _outb
_outb(u8 value,unsigned long addr)571 static inline void _outb(u8 value, unsigned long addr)
572 {
573 __io_pbw();
574 __raw_writeb(value, PCI_IOBASE + addr);
575 __io_paw();
576 }
577 #endif
578
579 #if !defined(outw) && !defined(_outw)
580 #define _outw _outw
_outw(u16 value,unsigned long addr)581 static inline void _outw(u16 value, unsigned long addr)
582 {
583 __io_pbw();
584 __raw_writew((u16 __force)cpu_to_le16(value), PCI_IOBASE + addr);
585 __io_paw();
586 }
587 #endif
588
589 #if !defined(outl) && !defined(_outl)
590 #define _outl _outl
_outl(u32 value,unsigned long addr)591 static inline void _outl(u32 value, unsigned long addr)
592 {
593 __io_pbw();
594 __raw_writel((u32 __force)cpu_to_le32(value), PCI_IOBASE + addr);
595 __io_paw();
596 }
597 #endif
598
599 #include <linux/logic_pio.h>
600
601 #ifndef inb
602 #define inb _inb
603 #endif
604
605 #ifndef inw
606 #define inw _inw
607 #endif
608
609 #ifndef inl
610 #define inl _inl
611 #endif
612
613 #ifndef outb
614 #define outb _outb
615 #endif
616
617 #ifndef outw
618 #define outw _outw
619 #endif
620
621 #ifndef outl
622 #define outl _outl
623 #endif
624
625 #ifndef inb_p
626 #define inb_p inb_p
inb_p(unsigned long addr)627 static inline u8 inb_p(unsigned long addr)
628 {
629 return inb(addr);
630 }
631 #endif
632
633 #ifndef inw_p
634 #define inw_p inw_p
inw_p(unsigned long addr)635 static inline u16 inw_p(unsigned long addr)
636 {
637 return inw(addr);
638 }
639 #endif
640
641 #ifndef inl_p
642 #define inl_p inl_p
inl_p(unsigned long addr)643 static inline u32 inl_p(unsigned long addr)
644 {
645 return inl(addr);
646 }
647 #endif
648
649 #ifndef outb_p
650 #define outb_p outb_p
outb_p(u8 value,unsigned long addr)651 static inline void outb_p(u8 value, unsigned long addr)
652 {
653 outb(value, addr);
654 }
655 #endif
656
657 #ifndef outw_p
658 #define outw_p outw_p
outw_p(u16 value,unsigned long addr)659 static inline void outw_p(u16 value, unsigned long addr)
660 {
661 outw(value, addr);
662 }
663 #endif
664
665 #ifndef outl_p
666 #define outl_p outl_p
outl_p(u32 value,unsigned long addr)667 static inline void outl_p(u32 value, unsigned long addr)
668 {
669 outl(value, addr);
670 }
671 #endif
672
673 /*
674 * {in,out}s{b,w,l}{,_p}() are variants of the above that repeatedly access a
675 * single I/O port multiple times.
676 */
677
678 #ifndef insb
679 #define insb insb
insb(unsigned long addr,void * buffer,unsigned int count)680 static inline void insb(unsigned long addr, void *buffer, unsigned int count)
681 {
682 readsb(PCI_IOBASE + addr, buffer, count);
683 }
684 #endif
685
686 #ifndef insw
687 #define insw insw
insw(unsigned long addr,void * buffer,unsigned int count)688 static inline void insw(unsigned long addr, void *buffer, unsigned int count)
689 {
690 readsw(PCI_IOBASE + addr, buffer, count);
691 }
692 #endif
693
694 #ifndef insl
695 #define insl insl
insl(unsigned long addr,void * buffer,unsigned int count)696 static inline void insl(unsigned long addr, void *buffer, unsigned int count)
697 {
698 readsl(PCI_IOBASE + addr, buffer, count);
699 }
700 #endif
701
702 #ifndef outsb
703 #define outsb outsb
outsb(unsigned long addr,const void * buffer,unsigned int count)704 static inline void outsb(unsigned long addr, const void *buffer,
705 unsigned int count)
706 {
707 writesb(PCI_IOBASE + addr, buffer, count);
708 }
709 #endif
710
711 #ifndef outsw
712 #define outsw outsw
outsw(unsigned long addr,const void * buffer,unsigned int count)713 static inline void outsw(unsigned long addr, const void *buffer,
714 unsigned int count)
715 {
716 writesw(PCI_IOBASE + addr, buffer, count);
717 }
718 #endif
719
720 #ifndef outsl
721 #define outsl outsl
outsl(unsigned long addr,const void * buffer,unsigned int count)722 static inline void outsl(unsigned long addr, const void *buffer,
723 unsigned int count)
724 {
725 writesl(PCI_IOBASE + addr, buffer, count);
726 }
727 #endif
728
729 #ifndef insb_p
730 #define insb_p insb_p
insb_p(unsigned long addr,void * buffer,unsigned int count)731 static inline void insb_p(unsigned long addr, void *buffer, unsigned int count)
732 {
733 insb(addr, buffer, count);
734 }
735 #endif
736
737 #ifndef insw_p
738 #define insw_p insw_p
insw_p(unsigned long addr,void * buffer,unsigned int count)739 static inline void insw_p(unsigned long addr, void *buffer, unsigned int count)
740 {
741 insw(addr, buffer, count);
742 }
743 #endif
744
745 #ifndef insl_p
746 #define insl_p insl_p
insl_p(unsigned long addr,void * buffer,unsigned int count)747 static inline void insl_p(unsigned long addr, void *buffer, unsigned int count)
748 {
749 insl(addr, buffer, count);
750 }
751 #endif
752
753 #ifndef outsb_p
754 #define outsb_p outsb_p
outsb_p(unsigned long addr,const void * buffer,unsigned int count)755 static inline void outsb_p(unsigned long addr, const void *buffer,
756 unsigned int count)
757 {
758 outsb(addr, buffer, count);
759 }
760 #endif
761
762 #ifndef outsw_p
763 #define outsw_p outsw_p
outsw_p(unsigned long addr,const void * buffer,unsigned int count)764 static inline void outsw_p(unsigned long addr, const void *buffer,
765 unsigned int count)
766 {
767 outsw(addr, buffer, count);
768 }
769 #endif
770
771 #ifndef outsl_p
772 #define outsl_p outsl_p
outsl_p(unsigned long addr,const void * buffer,unsigned int count)773 static inline void outsl_p(unsigned long addr, const void *buffer,
774 unsigned int count)
775 {
776 outsl(addr, buffer, count);
777 }
778 #endif
779
780 #ifndef CONFIG_GENERIC_IOMAP
781 #ifndef ioread8
782 #define ioread8 ioread8
ioread8(const volatile void __iomem * addr)783 static inline u8 ioread8(const volatile void __iomem *addr)
784 {
785 return readb(addr);
786 }
787 #endif
788
789 #ifndef ioread16
790 #define ioread16 ioread16
ioread16(const volatile void __iomem * addr)791 static inline u16 ioread16(const volatile void __iomem *addr)
792 {
793 return readw(addr);
794 }
795 #endif
796
797 #ifndef ioread32
798 #define ioread32 ioread32
ioread32(const volatile void __iomem * addr)799 static inline u32 ioread32(const volatile void __iomem *addr)
800 {
801 return readl(addr);
802 }
803 #endif
804
805 #ifdef CONFIG_64BIT
806 #ifndef ioread64
807 #define ioread64 ioread64
ioread64(const volatile void __iomem * addr)808 static inline u64 ioread64(const volatile void __iomem *addr)
809 {
810 return readq(addr);
811 }
812 #endif
813 #endif /* CONFIG_64BIT */
814
815 #ifndef iowrite8
816 #define iowrite8 iowrite8
iowrite8(u8 value,volatile void __iomem * addr)817 static inline void iowrite8(u8 value, volatile void __iomem *addr)
818 {
819 writeb(value, addr);
820 }
821 #endif
822
823 #ifndef iowrite16
824 #define iowrite16 iowrite16
iowrite16(u16 value,volatile void __iomem * addr)825 static inline void iowrite16(u16 value, volatile void __iomem *addr)
826 {
827 writew(value, addr);
828 }
829 #endif
830
831 #ifndef iowrite32
832 #define iowrite32 iowrite32
iowrite32(u32 value,volatile void __iomem * addr)833 static inline void iowrite32(u32 value, volatile void __iomem *addr)
834 {
835 writel(value, addr);
836 }
837 #endif
838
839 #ifdef CONFIG_64BIT
840 #ifndef iowrite64
841 #define iowrite64 iowrite64
iowrite64(u64 value,volatile void __iomem * addr)842 static inline void iowrite64(u64 value, volatile void __iomem *addr)
843 {
844 writeq(value, addr);
845 }
846 #endif
847 #endif /* CONFIG_64BIT */
848
849 #ifndef ioread16be
850 #define ioread16be ioread16be
ioread16be(const volatile void __iomem * addr)851 static inline u16 ioread16be(const volatile void __iomem *addr)
852 {
853 return swab16(readw(addr));
854 }
855 #endif
856
857 #ifndef ioread32be
858 #define ioread32be ioread32be
ioread32be(const volatile void __iomem * addr)859 static inline u32 ioread32be(const volatile void __iomem *addr)
860 {
861 return swab32(readl(addr));
862 }
863 #endif
864
865 #ifdef CONFIG_64BIT
866 #ifndef ioread64be
867 #define ioread64be ioread64be
ioread64be(const volatile void __iomem * addr)868 static inline u64 ioread64be(const volatile void __iomem *addr)
869 {
870 return swab64(readq(addr));
871 }
872 #endif
873 #endif /* CONFIG_64BIT */
874
875 #ifndef iowrite16be
876 #define iowrite16be iowrite16be
iowrite16be(u16 value,void volatile __iomem * addr)877 static inline void iowrite16be(u16 value, void volatile __iomem *addr)
878 {
879 writew(swab16(value), addr);
880 }
881 #endif
882
883 #ifndef iowrite32be
884 #define iowrite32be iowrite32be
iowrite32be(u32 value,volatile void __iomem * addr)885 static inline void iowrite32be(u32 value, volatile void __iomem *addr)
886 {
887 writel(swab32(value), addr);
888 }
889 #endif
890
891 #ifdef CONFIG_64BIT
892 #ifndef iowrite64be
893 #define iowrite64be iowrite64be
iowrite64be(u64 value,volatile void __iomem * addr)894 static inline void iowrite64be(u64 value, volatile void __iomem *addr)
895 {
896 writeq(swab64(value), addr);
897 }
898 #endif
899 #endif /* CONFIG_64BIT */
900
901 #ifndef ioread8_rep
902 #define ioread8_rep ioread8_rep
ioread8_rep(const volatile void __iomem * addr,void * buffer,unsigned int count)903 static inline void ioread8_rep(const volatile void __iomem *addr, void *buffer,
904 unsigned int count)
905 {
906 readsb(addr, buffer, count);
907 }
908 #endif
909
910 #ifndef ioread16_rep
911 #define ioread16_rep ioread16_rep
ioread16_rep(const volatile void __iomem * addr,void * buffer,unsigned int count)912 static inline void ioread16_rep(const volatile void __iomem *addr,
913 void *buffer, unsigned int count)
914 {
915 readsw(addr, buffer, count);
916 }
917 #endif
918
919 #ifndef ioread32_rep
920 #define ioread32_rep ioread32_rep
ioread32_rep(const volatile void __iomem * addr,void * buffer,unsigned int count)921 static inline void ioread32_rep(const volatile void __iomem *addr,
922 void *buffer, unsigned int count)
923 {
924 readsl(addr, buffer, count);
925 }
926 #endif
927
928 #ifdef CONFIG_64BIT
929 #ifndef ioread64_rep
930 #define ioread64_rep ioread64_rep
ioread64_rep(const volatile void __iomem * addr,void * buffer,unsigned int count)931 static inline void ioread64_rep(const volatile void __iomem *addr,
932 void *buffer, unsigned int count)
933 {
934 readsq(addr, buffer, count);
935 }
936 #endif
937 #endif /* CONFIG_64BIT */
938
939 #ifndef iowrite8_rep
940 #define iowrite8_rep iowrite8_rep
iowrite8_rep(volatile void __iomem * addr,const void * buffer,unsigned int count)941 static inline void iowrite8_rep(volatile void __iomem *addr,
942 const void *buffer,
943 unsigned int count)
944 {
945 writesb(addr, buffer, count);
946 }
947 #endif
948
949 #ifndef iowrite16_rep
950 #define iowrite16_rep iowrite16_rep
iowrite16_rep(volatile void __iomem * addr,const void * buffer,unsigned int count)951 static inline void iowrite16_rep(volatile void __iomem *addr,
952 const void *buffer,
953 unsigned int count)
954 {
955 writesw(addr, buffer, count);
956 }
957 #endif
958
959 #ifndef iowrite32_rep
960 #define iowrite32_rep iowrite32_rep
iowrite32_rep(volatile void __iomem * addr,const void * buffer,unsigned int count)961 static inline void iowrite32_rep(volatile void __iomem *addr,
962 const void *buffer,
963 unsigned int count)
964 {
965 writesl(addr, buffer, count);
966 }
967 #endif
968
969 #ifdef CONFIG_64BIT
970 #ifndef iowrite64_rep
971 #define iowrite64_rep iowrite64_rep
iowrite64_rep(volatile void __iomem * addr,const void * buffer,unsigned int count)972 static inline void iowrite64_rep(volatile void __iomem *addr,
973 const void *buffer,
974 unsigned int count)
975 {
976 writesq(addr, buffer, count);
977 }
978 #endif
979 #endif /* CONFIG_64BIT */
980 #endif /* CONFIG_GENERIC_IOMAP */
981
982 #ifdef __KERNEL__
983
984 #include <linux/vmalloc.h>
985 #define __io_virt(x) ((void __force *)(x))
986
987 /*
988 * Change virtual addresses to physical addresses and vv.
989 * These are pretty trivial
990 */
991 #ifndef virt_to_phys
992 #define virt_to_phys virt_to_phys
virt_to_phys(volatile void * address)993 static inline unsigned long virt_to_phys(volatile void *address)
994 {
995 return __pa((unsigned long)address);
996 }
997 #endif
998
999 #ifndef phys_to_virt
1000 #define phys_to_virt phys_to_virt
phys_to_virt(unsigned long address)1001 static inline void *phys_to_virt(unsigned long address)
1002 {
1003 return __va(address);
1004 }
1005 #endif
1006
1007 /**
1008 * DOC: ioremap() and ioremap_*() variants
1009 *
1010 * Architectures with an MMU are expected to provide ioremap() and iounmap()
1011 * themselves or rely on GENERIC_IOREMAP. For NOMMU architectures we provide
1012 * a default nop-op implementation that expect that the physical address used
1013 * for MMIO are already marked as uncached, and can be used as kernel virtual
1014 * addresses.
1015 *
1016 * ioremap_wc() and ioremap_wt() can provide more relaxed caching attributes
1017 * for specific drivers if the architecture choses to implement them. If they
1018 * are not implemented we fall back to plain ioremap. Conversely, ioremap_np()
1019 * can provide stricter non-posted write semantics if the architecture
1020 * implements them.
1021 */
1022 #ifndef CONFIG_MMU
1023 #ifndef ioremap
1024 #define ioremap ioremap
ioremap(phys_addr_t offset,size_t size)1025 static inline void __iomem *ioremap(phys_addr_t offset, size_t size)
1026 {
1027 return (void __iomem *)(unsigned long)offset;
1028 }
1029 #endif
1030
1031 #ifndef iounmap
1032 #define iounmap iounmap
iounmap(volatile void __iomem * addr)1033 static inline void iounmap(volatile void __iomem *addr)
1034 {
1035 }
1036 #endif
1037 #elif defined(CONFIG_GENERIC_IOREMAP)
1038 #include <linux/pgtable.h>
1039
1040 void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot);
1041 void iounmap(volatile void __iomem *addr);
1042
ioremap(phys_addr_t addr,size_t size)1043 static inline void __iomem *ioremap(phys_addr_t addr, size_t size)
1044 {
1045 /* _PAGE_IOREMAP needs to be supplied by the architecture */
1046 return ioremap_prot(addr, size, _PAGE_IOREMAP);
1047 }
1048 #endif /* !CONFIG_MMU || CONFIG_GENERIC_IOREMAP */
1049
1050 #ifndef ioremap_wc
1051 #define ioremap_wc ioremap
1052 #endif
1053
1054 #ifndef ioremap_wt
1055 #define ioremap_wt ioremap
1056 #endif
1057
1058 /*
1059 * ioremap_uc is special in that we do require an explicit architecture
1060 * implementation. In general you do not want to use this function in a
1061 * driver and use plain ioremap, which is uncached by default. Similarly
1062 * architectures should not implement it unless they have a very good
1063 * reason.
1064 */
1065 #ifndef ioremap_uc
1066 #define ioremap_uc ioremap_uc
ioremap_uc(phys_addr_t offset,size_t size)1067 static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
1068 {
1069 return NULL;
1070 }
1071 #endif
1072
1073 /*
1074 * ioremap_np needs an explicit architecture implementation, as it
1075 * requests stronger semantics than regular ioremap(). Portable drivers
1076 * should instead use one of the higher-level abstractions, like
1077 * devm_ioremap_resource(), to choose the correct variant for any given
1078 * device and bus. Portable drivers with a good reason to want non-posted
1079 * write semantics should always provide an ioremap() fallback in case
1080 * ioremap_np() is not available.
1081 */
1082 #ifndef ioremap_np
1083 #define ioremap_np ioremap_np
ioremap_np(phys_addr_t offset,size_t size)1084 static inline void __iomem *ioremap_np(phys_addr_t offset, size_t size)
1085 {
1086 return NULL;
1087 }
1088 #endif
1089
1090 #ifdef CONFIG_HAS_IOPORT_MAP
1091 #ifndef CONFIG_GENERIC_IOMAP
1092 #ifndef ioport_map
1093 #define ioport_map ioport_map
ioport_map(unsigned long port,unsigned int nr)1094 static inline void __iomem *ioport_map(unsigned long port, unsigned int nr)
1095 {
1096 port &= IO_SPACE_LIMIT;
1097 return (port > MMIO_UPPER_LIMIT) ? NULL : PCI_IOBASE + port;
1098 }
1099 #define ARCH_HAS_GENERIC_IOPORT_MAP
1100 #endif
1101
1102 #ifndef ioport_unmap
1103 #define ioport_unmap ioport_unmap
ioport_unmap(void __iomem * p)1104 static inline void ioport_unmap(void __iomem *p)
1105 {
1106 }
1107 #endif
1108 #else /* CONFIG_GENERIC_IOMAP */
1109 extern void __iomem *ioport_map(unsigned long port, unsigned int nr);
1110 extern void ioport_unmap(void __iomem *p);
1111 #endif /* CONFIG_GENERIC_IOMAP */
1112 #endif /* CONFIG_HAS_IOPORT_MAP */
1113
1114 #ifndef CONFIG_GENERIC_IOMAP
1115 #ifndef pci_iounmap
1116 #define ARCH_WANTS_GENERIC_PCI_IOUNMAP
1117 #endif
1118 #endif
1119
1120 #ifndef xlate_dev_mem_ptr
1121 #define xlate_dev_mem_ptr xlate_dev_mem_ptr
xlate_dev_mem_ptr(phys_addr_t addr)1122 static inline void *xlate_dev_mem_ptr(phys_addr_t addr)
1123 {
1124 return __va(addr);
1125 }
1126 #endif
1127
1128 #ifndef unxlate_dev_mem_ptr
1129 #define unxlate_dev_mem_ptr unxlate_dev_mem_ptr
unxlate_dev_mem_ptr(phys_addr_t phys,void * addr)1130 static inline void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
1131 {
1132 }
1133 #endif
1134
1135 #ifdef CONFIG_VIRT_TO_BUS
1136 #ifndef virt_to_bus
virt_to_bus(void * address)1137 static inline unsigned long virt_to_bus(void *address)
1138 {
1139 return (unsigned long)address;
1140 }
1141
bus_to_virt(unsigned long address)1142 static inline void *bus_to_virt(unsigned long address)
1143 {
1144 return (void *)address;
1145 }
1146 #endif
1147 #endif
1148
1149 #ifndef memset_io
1150 #define memset_io memset_io
1151 /**
1152 * memset_io Set a range of I/O memory to a constant value
1153 * @addr: The beginning of the I/O-memory range to set
1154 * @val: The value to set the memory to
1155 * @count: The number of bytes to set
1156 *
1157 * Set a range of I/O memory to a given value.
1158 */
memset_io(volatile void __iomem * addr,int value,size_t size)1159 static inline void memset_io(volatile void __iomem *addr, int value,
1160 size_t size)
1161 {
1162 memset(__io_virt(addr), value, size);
1163 }
1164 #endif
1165
1166 #ifndef memcpy_fromio
1167 #define memcpy_fromio memcpy_fromio
1168 /**
1169 * memcpy_fromio Copy a block of data from I/O memory
1170 * @dst: The (RAM) destination for the copy
1171 * @src: The (I/O memory) source for the data
1172 * @count: The number of bytes to copy
1173 *
1174 * Copy a block of data from I/O memory.
1175 */
memcpy_fromio(void * buffer,const volatile void __iomem * addr,size_t size)1176 static inline void memcpy_fromio(void *buffer,
1177 const volatile void __iomem *addr,
1178 size_t size)
1179 {
1180 memcpy(buffer, __io_virt(addr), size);
1181 }
1182 #endif
1183
1184 #ifndef memcpy_toio
1185 #define memcpy_toio memcpy_toio
1186 /**
1187 * memcpy_toio Copy a block of data into I/O memory
1188 * @dst: The (I/O memory) destination for the copy
1189 * @src: The (RAM) source for the data
1190 * @count: The number of bytes to copy
1191 *
1192 * Copy a block of data to I/O memory.
1193 */
memcpy_toio(volatile void __iomem * addr,const void * buffer,size_t size)1194 static inline void memcpy_toio(volatile void __iomem *addr, const void *buffer,
1195 size_t size)
1196 {
1197 memcpy(__io_virt(addr), buffer, size);
1198 }
1199 #endif
1200
1201 extern int devmem_is_allowed(unsigned long pfn);
1202
1203 #endif /* __KERNEL__ */
1204
1205 #endif /* __ASM_GENERIC_IO_H */
1206