• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright © 2008 Keith Packard <keithp@keithp.com>
4  */
5 
6 #ifndef _LINUX_IO_MAPPING_H
7 #define _LINUX_IO_MAPPING_H
8 
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/bug.h>
12 #include <linux/io.h>
13 #include <linux/pgtable.h>
14 #include <asm/page.h>
15 
16 /*
17  * The io_mapping mechanism provides an abstraction for mapping
18  * individual pages from an io device to the CPU in an efficient fashion.
19  *
20  * See Documentation/driver-api/io-mapping.rst
21  */
22 
23 struct io_mapping {
24 	resource_size_t base;
25 	unsigned long size;
26 	pgprot_t prot;
27 	void __iomem *iomem;
28 };
29 
30 #ifdef CONFIG_HAVE_ATOMIC_IOMAP
31 
32 #include <linux/pfn.h>
33 #include <asm/iomap.h>
34 /*
35  * For small address space machines, mapping large objects
36  * into the kernel virtual space isn't practical. Where
37  * available, use fixmap support to dynamically map pages
38  * of the object at run time.
39  */
40 
41 static inline struct io_mapping *
io_mapping_init_wc(struct io_mapping * iomap,resource_size_t base,unsigned long size)42 io_mapping_init_wc(struct io_mapping *iomap,
43 		   resource_size_t base,
44 		   unsigned long size)
45 {
46 	pgprot_t prot;
47 
48 	if (iomap_create_wc(base, size, &prot))
49 		return NULL;
50 
51 	iomap->base = base;
52 	iomap->size = size;
53 	iomap->prot = prot;
54 	return iomap;
55 }
56 
57 static inline void
io_mapping_fini(struct io_mapping * mapping)58 io_mapping_fini(struct io_mapping *mapping)
59 {
60 	iomap_free(mapping->base, mapping->size);
61 }
62 
63 /* Atomic map/unmap */
64 static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping * mapping,unsigned long offset)65 io_mapping_map_atomic_wc(struct io_mapping *mapping,
66 			 unsigned long offset)
67 {
68 	resource_size_t phys_addr;
69 
70 	BUG_ON(offset >= mapping->size);
71 	phys_addr = mapping->base + offset;
72 	return iomap_atomic_prot_pfn(PHYS_PFN(phys_addr), mapping->prot);
73 }
74 
75 static inline void
io_mapping_unmap_atomic(void __iomem * vaddr)76 io_mapping_unmap_atomic(void __iomem *vaddr)
77 {
78 	iounmap_atomic(vaddr);
79 }
80 
81 static inline void __iomem *
io_mapping_map_wc(struct io_mapping * mapping,unsigned long offset,unsigned long size)82 io_mapping_map_wc(struct io_mapping *mapping,
83 		  unsigned long offset,
84 		  unsigned long size)
85 {
86 	resource_size_t phys_addr;
87 
88 	BUG_ON(offset >= mapping->size);
89 	phys_addr = mapping->base + offset;
90 
91 	return ioremap_wc(phys_addr, size);
92 }
93 
94 static inline void
io_mapping_unmap(void __iomem * vaddr)95 io_mapping_unmap(void __iomem *vaddr)
96 {
97 	iounmap(vaddr);
98 }
99 
100 #else
101 
102 #include <linux/uaccess.h>
103 
104 /* Create the io_mapping object*/
105 static inline struct io_mapping *
io_mapping_init_wc(struct io_mapping * iomap,resource_size_t base,unsigned long size)106 io_mapping_init_wc(struct io_mapping *iomap,
107 		   resource_size_t base,
108 		   unsigned long size)
109 {
110 	iomap->iomem = ioremap_wc(base, size);
111 	if (!iomap->iomem)
112 		return NULL;
113 
114 	iomap->base = base;
115 	iomap->size = size;
116 #if defined(pgprot_noncached_wc) /* archs can't agree on a name ... */
117 	iomap->prot = pgprot_noncached_wc(PAGE_KERNEL);
118 #elif defined(pgprot_writecombine)
119 	iomap->prot = pgprot_writecombine(PAGE_KERNEL);
120 #else
121 	iomap->prot = pgprot_noncached(PAGE_KERNEL);
122 #endif
123 
124 	return iomap;
125 }
126 
127 static inline void
io_mapping_fini(struct io_mapping * mapping)128 io_mapping_fini(struct io_mapping *mapping)
129 {
130 	iounmap(mapping->iomem);
131 }
132 
133 /* Non-atomic map/unmap */
134 static inline void __iomem *
io_mapping_map_wc(struct io_mapping * mapping,unsigned long offset,unsigned long size)135 io_mapping_map_wc(struct io_mapping *mapping,
136 		  unsigned long offset,
137 		  unsigned long size)
138 {
139 	return mapping->iomem + offset;
140 }
141 
142 static inline void
io_mapping_unmap(void __iomem * vaddr)143 io_mapping_unmap(void __iomem *vaddr)
144 {
145 }
146 
147 /* Atomic map/unmap */
148 static inline void __iomem *
io_mapping_map_atomic_wc(struct io_mapping * mapping,unsigned long offset)149 io_mapping_map_atomic_wc(struct io_mapping *mapping,
150 			 unsigned long offset)
151 {
152 	preempt_disable();
153 	pagefault_disable();
154 	return io_mapping_map_wc(mapping, offset, PAGE_SIZE);
155 }
156 
157 static inline void
io_mapping_unmap_atomic(void __iomem * vaddr)158 io_mapping_unmap_atomic(void __iomem *vaddr)
159 {
160 	io_mapping_unmap(vaddr);
161 	pagefault_enable();
162 	preempt_enable();
163 }
164 
165 #endif /* HAVE_ATOMIC_IOMAP */
166 
167 static inline struct io_mapping *
io_mapping_create_wc(resource_size_t base,unsigned long size)168 io_mapping_create_wc(resource_size_t base,
169 		     unsigned long size)
170 {
171 	struct io_mapping *iomap;
172 
173 	iomap = kmalloc(sizeof(*iomap), GFP_KERNEL);
174 	if (!iomap)
175 		return NULL;
176 
177 	if (!io_mapping_init_wc(iomap, base, size)) {
178 		kfree(iomap);
179 		return NULL;
180 	}
181 
182 	return iomap;
183 }
184 
185 static inline void
io_mapping_free(struct io_mapping * iomap)186 io_mapping_free(struct io_mapping *iomap)
187 {
188 	io_mapping_fini(iomap);
189 	kfree(iomap);
190 }
191 
192 #endif /* _LINUX_IO_MAPPING_H */
193