1 /*
2 * Trapped io support
3 *
4 * Copyright (C) 2008 Magnus Damm
5 *
6 * Intercept io operations by trapping.
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12 #include <linux/kernel.h>
13 #include <linux/mm.h>
14 #include <linux/bitops.h>
15 #include <linux/vmalloc.h>
16 #include <linux/module.h>
17 #include <linux/init.h>
18 #include <asm/mmu_context.h>
19 #include <asm/uaccess.h>
20 #include <asm/io.h>
21 #include <asm/io_trapped.h>
22
23 #define TRAPPED_PAGES_MAX 16
24
25 #ifdef CONFIG_HAS_IOPORT_MAP
26 LIST_HEAD(trapped_io);
27 EXPORT_SYMBOL_GPL(trapped_io);
28 #endif
29 #ifdef CONFIG_HAS_IOMEM
30 LIST_HEAD(trapped_mem);
31 EXPORT_SYMBOL_GPL(trapped_mem);
32 #endif
33 static DEFINE_SPINLOCK(trapped_lock);
34
35 static int trapped_io_disable __read_mostly;
36
trapped_io_setup(char * __unused)37 static int __init trapped_io_setup(char *__unused)
38 {
39 trapped_io_disable = 1;
40 return 1;
41 }
42 __setup("noiotrap", trapped_io_setup);
43
register_trapped_io(struct trapped_io * tiop)44 int register_trapped_io(struct trapped_io *tiop)
45 {
46 struct resource *res;
47 unsigned long len = 0, flags = 0;
48 struct page *pages[TRAPPED_PAGES_MAX];
49 int k, n;
50
51 if (unlikely(trapped_io_disable))
52 return 0;
53
54 /* structure must be page aligned */
55 if ((unsigned long)tiop & (PAGE_SIZE - 1))
56 goto bad;
57
58 for (k = 0; k < tiop->num_resources; k++) {
59 res = tiop->resource + k;
60 len += roundup(resource_size(res), PAGE_SIZE);
61 flags |= res->flags;
62 }
63
64 /* support IORESOURCE_IO _or_ MEM, not both */
65 if (hweight_long(flags) != 1)
66 goto bad;
67
68 n = len >> PAGE_SHIFT;
69
70 if (n >= TRAPPED_PAGES_MAX)
71 goto bad;
72
73 for (k = 0; k < n; k++)
74 pages[k] = virt_to_page(tiop);
75
76 tiop->virt_base = vmap(pages, n, VM_MAP, PAGE_NONE);
77 if (!tiop->virt_base)
78 goto bad;
79
80 len = 0;
81 for (k = 0; k < tiop->num_resources; k++) {
82 res = tiop->resource + k;
83 pr_info("trapped io 0x%08lx overrides %s 0x%08lx\n",
84 (unsigned long)(tiop->virt_base + len),
85 res->flags & IORESOURCE_IO ? "io" : "mmio",
86 (unsigned long)res->start);
87 len += roundup(resource_size(res), PAGE_SIZE);
88 }
89
90 tiop->magic = IO_TRAPPED_MAGIC;
91 INIT_LIST_HEAD(&tiop->list);
92 spin_lock_irq(&trapped_lock);
93 #ifdef CONFIG_HAS_IOPORT_MAP
94 if (flags & IORESOURCE_IO)
95 list_add(&tiop->list, &trapped_io);
96 #endif
97 #ifdef CONFIG_HAS_IOMEM
98 if (flags & IORESOURCE_MEM)
99 list_add(&tiop->list, &trapped_mem);
100 #endif
101 spin_unlock_irq(&trapped_lock);
102
103 return 0;
104 bad:
105 pr_warning("unable to install trapped io filter\n");
106 return -1;
107 }
108 EXPORT_SYMBOL_GPL(register_trapped_io);
109
match_trapped_io_handler(struct list_head * list,unsigned long offset,unsigned long size)110 void __iomem *match_trapped_io_handler(struct list_head *list,
111 unsigned long offset,
112 unsigned long size)
113 {
114 unsigned long voffs;
115 struct trapped_io *tiop;
116 struct resource *res;
117 int k, len;
118 unsigned long flags;
119
120 spin_lock_irqsave(&trapped_lock, flags);
121 list_for_each_entry(tiop, list, list) {
122 voffs = 0;
123 for (k = 0; k < tiop->num_resources; k++) {
124 res = tiop->resource + k;
125 if (res->start == offset) {
126 spin_unlock_irqrestore(&trapped_lock, flags);
127 return tiop->virt_base + voffs;
128 }
129
130 len = resource_size(res);
131 voffs += roundup(len, PAGE_SIZE);
132 }
133 }
134 spin_unlock_irqrestore(&trapped_lock, flags);
135 return NULL;
136 }
137 EXPORT_SYMBOL_GPL(match_trapped_io_handler);
138
lookup_tiop(unsigned long address)139 static struct trapped_io *lookup_tiop(unsigned long address)
140 {
141 pgd_t *pgd_k;
142 pud_t *pud_k;
143 pmd_t *pmd_k;
144 pte_t *pte_k;
145 pte_t entry;
146
147 pgd_k = swapper_pg_dir + pgd_index(address);
148 if (!pgd_present(*pgd_k))
149 return NULL;
150
151 pud_k = pud_offset(pgd_k, address);
152 if (!pud_present(*pud_k))
153 return NULL;
154
155 pmd_k = pmd_offset(pud_k, address);
156 if (!pmd_present(*pmd_k))
157 return NULL;
158
159 pte_k = pte_offset_kernel(pmd_k, address);
160 entry = *pte_k;
161
162 return pfn_to_kaddr(pte_pfn(entry));
163 }
164
lookup_address(struct trapped_io * tiop,unsigned long address)165 static unsigned long lookup_address(struct trapped_io *tiop,
166 unsigned long address)
167 {
168 struct resource *res;
169 unsigned long vaddr = (unsigned long)tiop->virt_base;
170 unsigned long len;
171 int k;
172
173 for (k = 0; k < tiop->num_resources; k++) {
174 res = tiop->resource + k;
175 len = roundup(resource_size(res), PAGE_SIZE);
176 if (address < (vaddr + len))
177 return res->start + (address - vaddr);
178 vaddr += len;
179 }
180 return 0;
181 }
182
copy_word(unsigned long src_addr,int src_len,unsigned long dst_addr,int dst_len)183 static unsigned long long copy_word(unsigned long src_addr, int src_len,
184 unsigned long dst_addr, int dst_len)
185 {
186 unsigned long long tmp = 0;
187
188 switch (src_len) {
189 case 1:
190 tmp = __raw_readb(src_addr);
191 break;
192 case 2:
193 tmp = __raw_readw(src_addr);
194 break;
195 case 4:
196 tmp = __raw_readl(src_addr);
197 break;
198 case 8:
199 tmp = __raw_readq(src_addr);
200 break;
201 }
202
203 switch (dst_len) {
204 case 1:
205 __raw_writeb(tmp, dst_addr);
206 break;
207 case 2:
208 __raw_writew(tmp, dst_addr);
209 break;
210 case 4:
211 __raw_writel(tmp, dst_addr);
212 break;
213 case 8:
214 __raw_writeq(tmp, dst_addr);
215 break;
216 }
217
218 return tmp;
219 }
220
from_device(void * dst,const void * src,unsigned long cnt)221 static unsigned long from_device(void *dst, const void *src, unsigned long cnt)
222 {
223 struct trapped_io *tiop;
224 unsigned long src_addr = (unsigned long)src;
225 unsigned long long tmp;
226
227 pr_debug("trapped io read 0x%08lx (%ld)\n", src_addr, cnt);
228 tiop = lookup_tiop(src_addr);
229 WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
230
231 src_addr = lookup_address(tiop, src_addr);
232 if (!src_addr)
233 return cnt;
234
235 tmp = copy_word(src_addr,
236 max_t(unsigned long, cnt,
237 (tiop->minimum_bus_width / 8)),
238 (unsigned long)dst, cnt);
239
240 pr_debug("trapped io read 0x%08lx -> 0x%08llx\n", src_addr, tmp);
241 return 0;
242 }
243
to_device(void * dst,const void * src,unsigned long cnt)244 static unsigned long to_device(void *dst, const void *src, unsigned long cnt)
245 {
246 struct trapped_io *tiop;
247 unsigned long dst_addr = (unsigned long)dst;
248 unsigned long long tmp;
249
250 pr_debug("trapped io write 0x%08lx (%ld)\n", dst_addr, cnt);
251 tiop = lookup_tiop(dst_addr);
252 WARN_ON(!tiop || (tiop->magic != IO_TRAPPED_MAGIC));
253
254 dst_addr = lookup_address(tiop, dst_addr);
255 if (!dst_addr)
256 return cnt;
257
258 tmp = copy_word((unsigned long)src, cnt,
259 dst_addr, max_t(unsigned long, cnt,
260 (tiop->minimum_bus_width / 8)));
261
262 pr_debug("trapped io write 0x%08lx -> 0x%08llx\n", dst_addr, tmp);
263 return 0;
264 }
265
266 static struct mem_access trapped_io_access = {
267 from_device,
268 to_device,
269 };
270
handle_trapped_io(struct pt_regs * regs,unsigned long address)271 int handle_trapped_io(struct pt_regs *regs, unsigned long address)
272 {
273 mm_segment_t oldfs;
274 insn_size_t instruction;
275 int tmp;
276
277 if (trapped_io_disable)
278 return 0;
279 if (!lookup_tiop(address))
280 return 0;
281
282 WARN_ON(user_mode(regs));
283
284 oldfs = get_fs();
285 set_fs(KERNEL_DS);
286 if (copy_from_user(&instruction, (void *)(regs->pc),
287 sizeof(instruction))) {
288 set_fs(oldfs);
289 return 0;
290 }
291
292 tmp = handle_unaligned_access(instruction, regs,
293 &trapped_io_access, 1, address);
294 set_fs(oldfs);
295 return tmp == 0;
296 }
297