• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Common EFI memory map functions.
4  */
5 
6 #define pr_fmt(fmt) "efi: " fmt
7 
8 #include <linux/init.h>
9 #include <linux/kernel.h>
10 #include <linux/efi.h>
11 #include <linux/io.h>
12 #include <asm/early_ioremap.h>
13 #include <linux/memblock.h>
14 #include <linux/slab.h>
15 
__efi_memmap_alloc_early(unsigned long size)16 static phys_addr_t __init __efi_memmap_alloc_early(unsigned long size)
17 {
18 	return memblock_phys_alloc(size, SMP_CACHE_BYTES);
19 }
20 
__efi_memmap_alloc_late(unsigned long size)21 static phys_addr_t __init __efi_memmap_alloc_late(unsigned long size)
22 {
23 	unsigned int order = get_order(size);
24 	struct page *p = alloc_pages(GFP_KERNEL, order);
25 
26 	if (!p)
27 		return 0;
28 
29 	return PFN_PHYS(page_to_pfn(p));
30 }
31 
__efi_memmap_free(u64 phys,unsigned long size,unsigned long flags)32 void __init __efi_memmap_free(u64 phys, unsigned long size, unsigned long flags)
33 {
34 	if (flags & EFI_MEMMAP_MEMBLOCK) {
35 		if (slab_is_available())
36 			memblock_free_late(phys, size);
37 		else
38 			memblock_free(phys, size);
39 	} else if (flags & EFI_MEMMAP_SLAB) {
40 		struct page *p = pfn_to_page(PHYS_PFN(phys));
41 		unsigned int order = get_order(size);
42 
43 		free_pages((unsigned long) page_address(p), order);
44 	}
45 }
46 
efi_memmap_free(void)47 static void __init efi_memmap_free(void)
48 {
49 	__efi_memmap_free(efi.memmap.phys_map,
50 			efi.memmap.desc_size * efi.memmap.nr_map,
51 			efi.memmap.flags);
52 }
53 
54 /**
55  * efi_memmap_alloc - Allocate memory for the EFI memory map
56  * @num_entries: Number of entries in the allocated map.
57  * @data: efi memmap installation parameters
58  *
59  * Depending on whether mm_init() has already been invoked or not,
60  * either memblock or "normal" page allocation is used.
61  *
62  * Returns the physical address of the allocated memory map on
63  * success, zero on failure.
64  */
efi_memmap_alloc(unsigned int num_entries,struct efi_memory_map_data * data)65 int __init efi_memmap_alloc(unsigned int num_entries,
66 		struct efi_memory_map_data *data)
67 {
68 	/* Expect allocation parameters are zero initialized */
69 	WARN_ON(data->phys_map || data->size);
70 
71 	data->size = num_entries * efi.memmap.desc_size;
72 	data->desc_version = efi.memmap.desc_version;
73 	data->desc_size = efi.memmap.desc_size;
74 	data->flags &= ~(EFI_MEMMAP_SLAB | EFI_MEMMAP_MEMBLOCK);
75 	data->flags |= efi.memmap.flags & EFI_MEMMAP_LATE;
76 
77 	if (slab_is_available()) {
78 		data->flags |= EFI_MEMMAP_SLAB;
79 		data->phys_map = __efi_memmap_alloc_late(data->size);
80 	} else {
81 		data->flags |= EFI_MEMMAP_MEMBLOCK;
82 		data->phys_map = __efi_memmap_alloc_early(data->size);
83 	}
84 
85 	if (!data->phys_map)
86 		return -ENOMEM;
87 	return 0;
88 }
89 
90 /**
91  * __efi_memmap_init - Common code for mapping the EFI memory map
92  * @data: EFI memory map data
93  *
94  * This function takes care of figuring out which function to use to
95  * map the EFI memory map in efi.memmap based on how far into the boot
96  * we are.
97  *
98  * During bootup EFI_MEMMAP_LATE in data->flags should be clear since we
99  * only have access to the early_memremap*() functions as the vmalloc
100  * space isn't setup.  Once the kernel is fully booted we can fallback
101  * to the more robust memremap*() API.
102  *
103  * Returns zero on success, a negative error code on failure.
104  */
__efi_memmap_init(struct efi_memory_map_data * data)105 static int __init __efi_memmap_init(struct efi_memory_map_data *data)
106 {
107 	struct efi_memory_map map;
108 	phys_addr_t phys_map;
109 
110 	if (efi_enabled(EFI_PARAVIRT))
111 		return 0;
112 
113 	phys_map = data->phys_map;
114 
115 	if (data->flags & EFI_MEMMAP_LATE)
116 		map.map = memremap(phys_map, data->size, MEMREMAP_WB);
117 	else
118 		map.map = early_memremap(phys_map, data->size);
119 
120 	if (!map.map) {
121 		pr_err("Could not map the memory map!\n");
122 		return -ENOMEM;
123 	}
124 
125 	/* NOP if data->flags & (EFI_MEMMAP_MEMBLOCK | EFI_MEMMAP_SLAB) == 0 */
126 	efi_memmap_free();
127 
128 	map.phys_map = data->phys_map;
129 	map.nr_map = data->size / data->desc_size;
130 	map.map_end = map.map + data->size;
131 
132 	map.desc_version = data->desc_version;
133 	map.desc_size = data->desc_size;
134 	map.flags = data->flags;
135 
136 	set_bit(EFI_MEMMAP, &efi.flags);
137 
138 	efi.memmap = map;
139 
140 	return 0;
141 }
142 
143 /**
144  * efi_memmap_init_early - Map the EFI memory map data structure
145  * @data: EFI memory map data
146  *
147  * Use early_memremap() to map the passed in EFI memory map and assign
148  * it to efi.memmap.
149  */
efi_memmap_init_early(struct efi_memory_map_data * data)150 int __init efi_memmap_init_early(struct efi_memory_map_data *data)
151 {
152 	/* Cannot go backwards */
153 	WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
154 
155 	data->flags = 0;
156 	return __efi_memmap_init(data);
157 }
158 
efi_memmap_unmap(void)159 void __init efi_memmap_unmap(void)
160 {
161 	if (!efi_enabled(EFI_MEMMAP))
162 		return;
163 
164 	if (!(efi.memmap.flags & EFI_MEMMAP_LATE)) {
165 		unsigned long size;
166 
167 		size = efi.memmap.desc_size * efi.memmap.nr_map;
168 		early_memunmap(efi.memmap.map, size);
169 	} else {
170 		memunmap(efi.memmap.map);
171 	}
172 
173 	efi.memmap.map = NULL;
174 	clear_bit(EFI_MEMMAP, &efi.flags);
175 }
176 
177 /**
178  * efi_memmap_init_late - Map efi.memmap with memremap()
179  * @phys_addr: Physical address of the new EFI memory map
180  * @size: Size in bytes of the new EFI memory map
181  *
182  * Setup a mapping of the EFI memory map using ioremap_cache(). This
183  * function should only be called once the vmalloc space has been
184  * setup and is therefore not suitable for calling during early EFI
185  * initialise, e.g. in efi_init(). Additionally, it expects
186  * efi_memmap_init_early() to have already been called.
187  *
188  * The reason there are two EFI memmap initialisation
189  * (efi_memmap_init_early() and this late version) is because the
190  * early EFI memmap should be explicitly unmapped once EFI
191  * initialisation is complete as the fixmap space used to map the EFI
192  * memmap (via early_memremap()) is a scarce resource.
193  *
194  * This late mapping is intended to persist for the duration of
195  * runtime so that things like efi_mem_desc_lookup() and
196  * efi_mem_attributes() always work.
197  *
198  * Returns zero on success, a negative error code on failure.
199  */
efi_memmap_init_late(phys_addr_t addr,unsigned long size)200 int __init efi_memmap_init_late(phys_addr_t addr, unsigned long size)
201 {
202 	struct efi_memory_map_data data = {
203 		.phys_map = addr,
204 		.size = size,
205 		.flags = EFI_MEMMAP_LATE,
206 	};
207 
208 	/* Did we forget to unmap the early EFI memmap? */
209 	WARN_ON(efi.memmap.map);
210 
211 	/* Were we already called? */
212 	WARN_ON(efi.memmap.flags & EFI_MEMMAP_LATE);
213 
214 	/*
215 	 * It makes no sense to allow callers to register different
216 	 * values for the following fields. Copy them out of the
217 	 * existing early EFI memmap.
218 	 */
219 	data.desc_version = efi.memmap.desc_version;
220 	data.desc_size = efi.memmap.desc_size;
221 
222 	return __efi_memmap_init(&data);
223 }
224 
225 /**
226  * efi_memmap_install - Install a new EFI memory map in efi.memmap
227  * @ctx: map allocation parameters (address, size, flags)
228  *
229  * Unlike efi_memmap_init_*(), this function does not allow the caller
230  * to switch from early to late mappings. It simply uses the existing
231  * mapping function and installs the new memmap.
232  *
233  * Returns zero on success, a negative error code on failure.
234  */
efi_memmap_install(struct efi_memory_map_data * data)235 int __init efi_memmap_install(struct efi_memory_map_data *data)
236 {
237 	efi_memmap_unmap();
238 
239 	return __efi_memmap_init(data);
240 }
241 
242 /**
243  * efi_memmap_split_count - Count number of additional EFI memmap entries
244  * @md: EFI memory descriptor to split
245  * @range: Address range (start, end) to split around
246  *
247  * Returns the number of additional EFI memmap entries required to
248  * accomodate @range.
249  */
efi_memmap_split_count(efi_memory_desc_t * md,struct range * range)250 int __init efi_memmap_split_count(efi_memory_desc_t *md, struct range *range)
251 {
252 	u64 m_start, m_end;
253 	u64 start, end;
254 	int count = 0;
255 
256 	start = md->phys_addr;
257 	end = start + (md->num_pages << EFI_PAGE_SHIFT) - 1;
258 
259 	/* modifying range */
260 	m_start = range->start;
261 	m_end = range->end;
262 
263 	if (m_start <= start) {
264 		/* split into 2 parts */
265 		if (start < m_end && m_end < end)
266 			count++;
267 	}
268 
269 	if (start < m_start && m_start < end) {
270 		/* split into 3 parts */
271 		if (m_end < end)
272 			count += 2;
273 		/* split into 2 parts */
274 		if (end <= m_end)
275 			count++;
276 	}
277 
278 	return count;
279 }
280 
281 /**
282  * efi_memmap_insert - Insert a memory region in an EFI memmap
283  * @old_memmap: The existing EFI memory map structure
284  * @buf: Address of buffer to store new map
285  * @mem: Memory map entry to insert
286  *
287  * It is suggested that you call efi_memmap_split_count() first
288  * to see how large @buf needs to be.
289  */
efi_memmap_insert(struct efi_memory_map * old_memmap,void * buf,struct efi_mem_range * mem)290 void __init efi_memmap_insert(struct efi_memory_map *old_memmap, void *buf,
291 			      struct efi_mem_range *mem)
292 {
293 	u64 m_start, m_end, m_attr;
294 	efi_memory_desc_t *md;
295 	u64 start, end;
296 	void *old, *new;
297 
298 	/* modifying range */
299 	m_start = mem->range.start;
300 	m_end = mem->range.end;
301 	m_attr = mem->attribute;
302 
303 	/*
304 	 * The EFI memory map deals with regions in EFI_PAGE_SIZE
305 	 * units. Ensure that the region described by 'mem' is aligned
306 	 * correctly.
307 	 */
308 	if (!IS_ALIGNED(m_start, EFI_PAGE_SIZE) ||
309 	    !IS_ALIGNED(m_end + 1, EFI_PAGE_SIZE)) {
310 		WARN_ON(1);
311 		return;
312 	}
313 
314 	for (old = old_memmap->map, new = buf;
315 	     old < old_memmap->map_end;
316 	     old += old_memmap->desc_size, new += old_memmap->desc_size) {
317 
318 		/* copy original EFI memory descriptor */
319 		memcpy(new, old, old_memmap->desc_size);
320 		md = new;
321 		start = md->phys_addr;
322 		end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1;
323 
324 		if (m_start <= start && end <= m_end)
325 			md->attribute |= m_attr;
326 
327 		if (m_start <= start &&
328 		    (start < m_end && m_end < end)) {
329 			/* first part */
330 			md->attribute |= m_attr;
331 			md->num_pages = (m_end - md->phys_addr + 1) >>
332 				EFI_PAGE_SHIFT;
333 			/* latter part */
334 			new += old_memmap->desc_size;
335 			memcpy(new, old, old_memmap->desc_size);
336 			md = new;
337 			md->phys_addr = m_end + 1;
338 			md->num_pages = (end - md->phys_addr + 1) >>
339 				EFI_PAGE_SHIFT;
340 		}
341 
342 		if ((start < m_start && m_start < end) && m_end < end) {
343 			/* first part */
344 			md->num_pages = (m_start - md->phys_addr) >>
345 				EFI_PAGE_SHIFT;
346 			/* middle part */
347 			new += old_memmap->desc_size;
348 			memcpy(new, old, old_memmap->desc_size);
349 			md = new;
350 			md->attribute |= m_attr;
351 			md->phys_addr = m_start;
352 			md->num_pages = (m_end - m_start + 1) >>
353 				EFI_PAGE_SHIFT;
354 			/* last part */
355 			new += old_memmap->desc_size;
356 			memcpy(new, old, old_memmap->desc_size);
357 			md = new;
358 			md->phys_addr = m_end + 1;
359 			md->num_pages = (end - m_end) >>
360 				EFI_PAGE_SHIFT;
361 		}
362 
363 		if ((start < m_start && m_start < end) &&
364 		    (end <= m_end)) {
365 			/* first part */
366 			md->num_pages = (m_start - md->phys_addr) >>
367 				EFI_PAGE_SHIFT;
368 			/* latter part */
369 			new += old_memmap->desc_size;
370 			memcpy(new, old, old_memmap->desc_size);
371 			md = new;
372 			md->phys_addr = m_start;
373 			md->num_pages = (end - md->phys_addr + 1) >>
374 				EFI_PAGE_SHIFT;
375 			md->attribute |= m_attr;
376 		}
377 	}
378 }
379