1 /*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 #include <linux/memremap.h>
14 #include <linux/rculist.h>
15 #include <linux/export.h>
16 #include <linux/ioport.h>
17 #include <linux/module.h>
18 #include <linux/types.h>
19 #include <linux/pfn_t.h>
20 #include <linux/acpi.h>
21 #include <linux/io.h>
22 #include <linux/mm.h>
23 #include "nfit_test.h"
24
25 static LIST_HEAD(iomap_head);
26
27 static struct iomap_ops {
28 nfit_test_lookup_fn nfit_test_lookup;
29 nfit_test_evaluate_dsm_fn evaluate_dsm;
30 struct list_head list;
31 } iomap_ops = {
32 .list = LIST_HEAD_INIT(iomap_ops.list),
33 };
34
nfit_test_setup(nfit_test_lookup_fn lookup,nfit_test_evaluate_dsm_fn evaluate)35 void nfit_test_setup(nfit_test_lookup_fn lookup,
36 nfit_test_evaluate_dsm_fn evaluate)
37 {
38 iomap_ops.nfit_test_lookup = lookup;
39 iomap_ops.evaluate_dsm = evaluate;
40 list_add_rcu(&iomap_ops.list, &iomap_head);
41 }
42 EXPORT_SYMBOL(nfit_test_setup);
43
nfit_test_teardown(void)44 void nfit_test_teardown(void)
45 {
46 list_del_rcu(&iomap_ops.list);
47 synchronize_rcu();
48 }
49 EXPORT_SYMBOL(nfit_test_teardown);
50
__get_nfit_res(resource_size_t resource)51 static struct nfit_test_resource *__get_nfit_res(resource_size_t resource)
52 {
53 struct iomap_ops *ops;
54
55 ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
56 if (ops)
57 return ops->nfit_test_lookup(resource);
58 return NULL;
59 }
60
get_nfit_res(resource_size_t resource)61 struct nfit_test_resource *get_nfit_res(resource_size_t resource)
62 {
63 struct nfit_test_resource *res;
64
65 rcu_read_lock();
66 res = __get_nfit_res(resource);
67 rcu_read_unlock();
68
69 return res;
70 }
71 EXPORT_SYMBOL(get_nfit_res);
72
__nfit_test_ioremap(resource_size_t offset,unsigned long size,void __iomem * (* fallback_fn)(resource_size_t,unsigned long))73 void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
74 void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
75 {
76 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
77
78 if (nfit_res)
79 return (void __iomem *) nfit_res->buf + offset
80 - nfit_res->res.start;
81 return fallback_fn(offset, size);
82 }
83
__wrap_devm_ioremap_nocache(struct device * dev,resource_size_t offset,unsigned long size)84 void __iomem *__wrap_devm_ioremap_nocache(struct device *dev,
85 resource_size_t offset, unsigned long size)
86 {
87 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
88
89 if (nfit_res)
90 return (void __iomem *) nfit_res->buf + offset
91 - nfit_res->res.start;
92 return devm_ioremap_nocache(dev, offset, size);
93 }
94 EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
95
__wrap_devm_memremap(struct device * dev,resource_size_t offset,size_t size,unsigned long flags)96 void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
97 size_t size, unsigned long flags)
98 {
99 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
100
101 if (nfit_res)
102 return nfit_res->buf + offset - nfit_res->res.start;
103 return devm_memremap(dev, offset, size, flags);
104 }
105 EXPORT_SYMBOL(__wrap_devm_memremap);
106
nfit_test_kill(void * _pgmap)107 static void nfit_test_kill(void *_pgmap)
108 {
109 struct dev_pagemap *pgmap = _pgmap;
110
111 pgmap->kill(pgmap->ref);
112 }
113
__wrap_devm_memremap_pages(struct device * dev,struct dev_pagemap * pgmap)114 void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
115 {
116 resource_size_t offset = pgmap->res.start;
117 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
118
119 if (nfit_res) {
120 int rc;
121
122 rc = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
123 if (rc)
124 return ERR_PTR(rc);
125 return nfit_res->buf + offset - nfit_res->res.start;
126 }
127 return devm_memremap_pages(dev, pgmap);
128 }
129 EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
130
__wrap_phys_to_pfn_t(phys_addr_t addr,unsigned long flags)131 pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
132 {
133 struct nfit_test_resource *nfit_res = get_nfit_res(addr);
134
135 if (nfit_res)
136 flags &= ~PFN_MAP;
137 return phys_to_pfn_t(addr, flags);
138 }
139 EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
140
__wrap_memremap(resource_size_t offset,size_t size,unsigned long flags)141 void *__wrap_memremap(resource_size_t offset, size_t size,
142 unsigned long flags)
143 {
144 struct nfit_test_resource *nfit_res = get_nfit_res(offset);
145
146 if (nfit_res)
147 return nfit_res->buf + offset - nfit_res->res.start;
148 return memremap(offset, size, flags);
149 }
150 EXPORT_SYMBOL(__wrap_memremap);
151
__wrap_devm_memunmap(struct device * dev,void * addr)152 void __wrap_devm_memunmap(struct device *dev, void *addr)
153 {
154 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
155
156 if (nfit_res)
157 return;
158 return devm_memunmap(dev, addr);
159 }
160 EXPORT_SYMBOL(__wrap_devm_memunmap);
161
__wrap_ioremap_nocache(resource_size_t offset,unsigned long size)162 void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size)
163 {
164 return __nfit_test_ioremap(offset, size, ioremap_nocache);
165 }
166 EXPORT_SYMBOL(__wrap_ioremap_nocache);
167
__wrap_ioremap_wc(resource_size_t offset,unsigned long size)168 void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
169 {
170 return __nfit_test_ioremap(offset, size, ioremap_wc);
171 }
172 EXPORT_SYMBOL(__wrap_ioremap_wc);
173
__wrap_iounmap(volatile void __iomem * addr)174 void __wrap_iounmap(volatile void __iomem *addr)
175 {
176 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
177 if (nfit_res)
178 return;
179 return iounmap(addr);
180 }
181 EXPORT_SYMBOL(__wrap_iounmap);
182
__wrap_memunmap(void * addr)183 void __wrap_memunmap(void *addr)
184 {
185 struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
186
187 if (nfit_res)
188 return;
189 return memunmap(addr);
190 }
191 EXPORT_SYMBOL(__wrap_memunmap);
192
193 static bool nfit_test_release_region(struct device *dev,
194 struct resource *parent, resource_size_t start,
195 resource_size_t n);
196
nfit_devres_release(struct device * dev,void * data)197 static void nfit_devres_release(struct device *dev, void *data)
198 {
199 struct resource *res = *((struct resource **) data);
200
201 WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start,
202 resource_size(res)));
203 }
204
match(struct device * dev,void * __res,void * match_data)205 static int match(struct device *dev, void *__res, void *match_data)
206 {
207 struct resource *res = *((struct resource **) __res);
208 resource_size_t start = *((resource_size_t *) match_data);
209
210 return res->start == start;
211 }
212
nfit_test_release_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n)213 static bool nfit_test_release_region(struct device *dev,
214 struct resource *parent, resource_size_t start,
215 resource_size_t n)
216 {
217 if (parent == &iomem_resource) {
218 struct nfit_test_resource *nfit_res = get_nfit_res(start);
219
220 if (nfit_res) {
221 struct nfit_test_request *req;
222 struct resource *res = NULL;
223
224 if (dev) {
225 devres_release(dev, nfit_devres_release, match,
226 &start);
227 return true;
228 }
229
230 spin_lock(&nfit_res->lock);
231 list_for_each_entry(req, &nfit_res->requests, list)
232 if (req->res.start == start) {
233 res = &req->res;
234 list_del(&req->list);
235 break;
236 }
237 spin_unlock(&nfit_res->lock);
238
239 WARN(!res || resource_size(res) != n,
240 "%s: start: %llx n: %llx mismatch: %pr\n",
241 __func__, start, n, res);
242 if (res)
243 kfree(req);
244 return true;
245 }
246 }
247 return false;
248 }
249
nfit_test_request_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags)250 static struct resource *nfit_test_request_region(struct device *dev,
251 struct resource *parent, resource_size_t start,
252 resource_size_t n, const char *name, int flags)
253 {
254 struct nfit_test_resource *nfit_res;
255
256 if (parent == &iomem_resource) {
257 nfit_res = get_nfit_res(start);
258 if (nfit_res) {
259 struct nfit_test_request *req;
260 struct resource *res = NULL;
261
262 if (start + n > nfit_res->res.start
263 + resource_size(&nfit_res->res)) {
264 pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
265 __func__, start, n,
266 &nfit_res->res);
267 return NULL;
268 }
269
270 spin_lock(&nfit_res->lock);
271 list_for_each_entry(req, &nfit_res->requests, list)
272 if (start == req->res.start) {
273 res = &req->res;
274 break;
275 }
276 spin_unlock(&nfit_res->lock);
277
278 if (res) {
279 WARN(1, "%pr already busy\n", res);
280 return NULL;
281 }
282
283 req = kzalloc(sizeof(*req), GFP_KERNEL);
284 if (!req)
285 return NULL;
286 INIT_LIST_HEAD(&req->list);
287 res = &req->res;
288
289 res->start = start;
290 res->end = start + n - 1;
291 res->name = name;
292 res->flags = resource_type(parent);
293 res->flags |= IORESOURCE_BUSY | flags;
294 spin_lock(&nfit_res->lock);
295 list_add(&req->list, &nfit_res->requests);
296 spin_unlock(&nfit_res->lock);
297
298 if (dev) {
299 struct resource **d;
300
301 d = devres_alloc(nfit_devres_release,
302 sizeof(struct resource *),
303 GFP_KERNEL);
304 if (!d)
305 return NULL;
306 *d = res;
307 devres_add(dev, d);
308 }
309
310 pr_debug("%s: %pr\n", __func__, res);
311 return res;
312 }
313 }
314 if (dev)
315 return __devm_request_region(dev, parent, start, n, name);
316 return __request_region(parent, start, n, name, flags);
317 }
318
__wrap___request_region(struct resource * parent,resource_size_t start,resource_size_t n,const char * name,int flags)319 struct resource *__wrap___request_region(struct resource *parent,
320 resource_size_t start, resource_size_t n, const char *name,
321 int flags)
322 {
323 return nfit_test_request_region(NULL, parent, start, n, name, flags);
324 }
325 EXPORT_SYMBOL(__wrap___request_region);
326
__wrap_insert_resource(struct resource * parent,struct resource * res)327 int __wrap_insert_resource(struct resource *parent, struct resource *res)
328 {
329 if (get_nfit_res(res->start))
330 return 0;
331 return insert_resource(parent, res);
332 }
333 EXPORT_SYMBOL(__wrap_insert_resource);
334
__wrap_remove_resource(struct resource * res)335 int __wrap_remove_resource(struct resource *res)
336 {
337 if (get_nfit_res(res->start))
338 return 0;
339 return remove_resource(res);
340 }
341 EXPORT_SYMBOL(__wrap_remove_resource);
342
__wrap___devm_request_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n,const char * name)343 struct resource *__wrap___devm_request_region(struct device *dev,
344 struct resource *parent, resource_size_t start,
345 resource_size_t n, const char *name)
346 {
347 if (!dev)
348 return NULL;
349 return nfit_test_request_region(dev, parent, start, n, name, 0);
350 }
351 EXPORT_SYMBOL(__wrap___devm_request_region);
352
__wrap___release_region(struct resource * parent,resource_size_t start,resource_size_t n)353 void __wrap___release_region(struct resource *parent, resource_size_t start,
354 resource_size_t n)
355 {
356 if (!nfit_test_release_region(NULL, parent, start, n))
357 __release_region(parent, start, n);
358 }
359 EXPORT_SYMBOL(__wrap___release_region);
360
__wrap___devm_release_region(struct device * dev,struct resource * parent,resource_size_t start,resource_size_t n)361 void __wrap___devm_release_region(struct device *dev, struct resource *parent,
362 resource_size_t start, resource_size_t n)
363 {
364 if (!nfit_test_release_region(dev, parent, start, n))
365 __devm_release_region(dev, parent, start, n);
366 }
367 EXPORT_SYMBOL(__wrap___devm_release_region);
368
__wrap_acpi_evaluate_object(acpi_handle handle,acpi_string path,struct acpi_object_list * p,struct acpi_buffer * buf)369 acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
370 struct acpi_object_list *p, struct acpi_buffer *buf)
371 {
372 struct nfit_test_resource *nfit_res = get_nfit_res((long) handle);
373 union acpi_object **obj;
374
375 if (!nfit_res || strcmp(path, "_FIT") || !buf)
376 return acpi_evaluate_object(handle, path, p, buf);
377
378 obj = nfit_res->buf;
379 buf->length = sizeof(union acpi_object);
380 buf->pointer = *obj;
381 return AE_OK;
382 }
383 EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
384
__wrap_acpi_evaluate_dsm(acpi_handle handle,const guid_t * guid,u64 rev,u64 func,union acpi_object * argv4)385 union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
386 u64 rev, u64 func, union acpi_object *argv4)
387 {
388 union acpi_object *obj = ERR_PTR(-ENXIO);
389 struct iomap_ops *ops;
390
391 rcu_read_lock();
392 ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
393 if (ops)
394 obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
395 rcu_read_unlock();
396
397 if (IS_ERR(obj))
398 return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
399 return obj;
400 }
401 EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
402
403 MODULE_LICENSE("GPL v2");
404