1 /*
2 * Copyright(c) 2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 #include <linux/device.h>
14 #include <linux/types.h>
15 #include <linux/io.h>
16 #include <linux/mm.h>
17 #include <linux/memory_hotplug.h>
18
19 #ifndef ioremap_cache
20 /* temporary while we convert existing ioremap_cache users to memremap */
ioremap_cache(resource_size_t offset,unsigned long size)21 __weak void __iomem *ioremap_cache(resource_size_t offset, unsigned long size)
22 {
23 return ioremap(offset, size);
24 }
25 #endif
26
try_ram_remap(resource_size_t offset,size_t size)27 static void *try_ram_remap(resource_size_t offset, size_t size)
28 {
29 struct page *page = pfn_to_page(offset >> PAGE_SHIFT);
30
31 /* In the simple case just return the existing linear address */
32 if (!PageHighMem(page))
33 return __va(offset);
34 return NULL; /* fallback to ioremap_cache */
35 }
36
37 /**
38 * memremap() - remap an iomem_resource as cacheable memory
39 * @offset: iomem resource start address
40 * @size: size of remap
41 * @flags: either MEMREMAP_WB or MEMREMAP_WT
42 *
43 * memremap() is "ioremap" for cases where it is known that the resource
44 * being mapped does not have i/o side effects and the __iomem
45 * annotation is not applicable.
46 *
47 * MEMREMAP_WB - matches the default mapping for "System RAM" on
48 * the architecture. This is usually a read-allocate write-back cache.
49 * Morever, if MEMREMAP_WB is specified and the requested remap region is RAM
50 * memremap() will bypass establishing a new mapping and instead return
51 * a pointer into the direct map.
52 *
53 * MEMREMAP_WT - establish a mapping whereby writes either bypass the
54 * cache or are written through to memory and never exist in a
55 * cache-dirty state with respect to program visibility. Attempts to
56 * map "System RAM" with this mapping type will fail.
57 */
memremap(resource_size_t offset,size_t size,unsigned long flags)58 void *memremap(resource_size_t offset, size_t size, unsigned long flags)
59 {
60 int is_ram = region_intersects(offset, size, "System RAM");
61 void *addr = NULL;
62
63 if (is_ram == REGION_MIXED) {
64 WARN_ONCE(1, "memremap attempted on mixed range %pa size: %#lx\n",
65 &offset, (unsigned long) size);
66 return NULL;
67 }
68
69 /* Try all mapping types requested until one returns non-NULL */
70 if (flags & MEMREMAP_WB) {
71 flags &= ~MEMREMAP_WB;
72 /*
73 * MEMREMAP_WB is special in that it can be satisifed
74 * from the direct map. Some archs depend on the
75 * capability of memremap() to autodetect cases where
76 * the requested range is potentially in "System RAM"
77 */
78 if (is_ram == REGION_INTERSECTS)
79 addr = try_ram_remap(offset, size);
80 if (!addr)
81 addr = ioremap_cache(offset, size);
82 }
83
84 /*
85 * If we don't have a mapping yet and more request flags are
86 * pending then we will be attempting to establish a new virtual
87 * address mapping. Enforce that this mapping is not aliasing
88 * "System RAM"
89 */
90 if (!addr && is_ram == REGION_INTERSECTS && flags) {
91 WARN_ONCE(1, "memremap attempted on ram %pa size: %#lx\n",
92 &offset, (unsigned long) size);
93 return NULL;
94 }
95
96 if (!addr && (flags & MEMREMAP_WT)) {
97 flags &= ~MEMREMAP_WT;
98 addr = ioremap_wt(offset, size);
99 }
100
101 return addr;
102 }
103 EXPORT_SYMBOL(memremap);
104
memunmap(void * addr)105 void memunmap(void *addr)
106 {
107 if (is_vmalloc_addr(addr))
108 iounmap((void __iomem *) addr);
109 }
110 EXPORT_SYMBOL(memunmap);
111
devm_memremap_release(struct device * dev,void * res)112 static void devm_memremap_release(struct device *dev, void *res)
113 {
114 memunmap(*(void **)res);
115 }
116
devm_memremap_match(struct device * dev,void * res,void * match_data)117 static int devm_memremap_match(struct device *dev, void *res, void *match_data)
118 {
119 return *(void **)res == match_data;
120 }
121
devm_memremap(struct device * dev,resource_size_t offset,size_t size,unsigned long flags)122 void *devm_memremap(struct device *dev, resource_size_t offset,
123 size_t size, unsigned long flags)
124 {
125 void **ptr, *addr;
126
127 ptr = devres_alloc_node(devm_memremap_release, sizeof(*ptr), GFP_KERNEL,
128 dev_to_node(dev));
129 if (!ptr)
130 return ERR_PTR(-ENOMEM);
131
132 addr = memremap(offset, size, flags);
133 if (addr) {
134 *ptr = addr;
135 devres_add(dev, ptr);
136 } else {
137 devres_free(ptr);
138 return ERR_PTR(-ENXIO);
139 }
140
141 return addr;
142 }
143 EXPORT_SYMBOL(devm_memremap);
144
devm_memunmap(struct device * dev,void * addr)145 void devm_memunmap(struct device *dev, void *addr)
146 {
147 WARN_ON(devres_release(dev, devm_memremap_release,
148 devm_memremap_match, addr));
149 }
150 EXPORT_SYMBOL(devm_memunmap);
151
152 #ifdef CONFIG_ZONE_DEVICE
153 struct page_map {
154 struct resource res;
155 };
156
devm_memremap_pages_release(struct device * dev,void * res)157 static void devm_memremap_pages_release(struct device *dev, void *res)
158 {
159 struct page_map *page_map = res;
160
161 /* pages are dead and unused, undo the arch mapping */
162 mem_hotplug_begin();
163 arch_remove_memory(page_map->res.start, resource_size(&page_map->res));
164 mem_hotplug_done();
165 }
166
devm_memremap_pages(struct device * dev,struct resource * res)167 void *devm_memremap_pages(struct device *dev, struct resource *res)
168 {
169 int is_ram = region_intersects(res->start, resource_size(res),
170 "System RAM");
171 struct page_map *page_map;
172 int error, nid;
173
174 if (is_ram != REGION_DISJOINT) {
175 WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
176 is_ram == REGION_MIXED ? "mixed" : "ram", res);
177 return ERR_PTR(-ENXIO);
178 }
179
180 page_map = devres_alloc_node(devm_memremap_pages_release,
181 sizeof(*page_map), GFP_KERNEL, dev_to_node(dev));
182 if (!page_map)
183 return ERR_PTR(-ENOMEM);
184
185 memcpy(&page_map->res, res, sizeof(*res));
186
187 nid = dev_to_node(dev);
188 if (nid < 0)
189 nid = numa_mem_id();
190
191 mem_hotplug_begin();
192 error = arch_add_memory(nid, res->start, resource_size(res), true);
193 mem_hotplug_done();
194 if (error) {
195 devres_free(page_map);
196 return ERR_PTR(error);
197 }
198
199 devres_add(dev, page_map);
200 return __va(res->start);
201 }
202 EXPORT_SYMBOL_GPL(devm_memremap_pages);
203 #endif /* CONFIG_ZONE_DEVICE */
204