1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Based on arch/arm/mm/init.c
4 *
5 * Copyright (C) 1995-2005 Russell King
6 * Copyright (C) 2012 ARM Ltd.
7 */
8
9 #include <linux/kernel.h>
10 #include <linux/export.h>
11 #include <linux/errno.h>
12 #include <linux/swap.h>
13 #include <linux/init.h>
14 #include <linux/cache.h>
15 #include <linux/mman.h>
16 #include <linux/nodemask.h>
17 #include <linux/initrd.h>
18 #include <linux/gfp.h>
19 #include <linux/memblock.h>
20 #include <linux/sort.h>
21 #include <linux/of.h>
22 #include <linux/of_fdt.h>
23 #include <linux/dma-direct.h>
24 #include <linux/dma-map-ops.h>
25 #include <linux/efi.h>
26 #include <linux/swiotlb.h>
27 #include <linux/vmalloc.h>
28 #include <linux/mm.h>
29 #include <linux/kexec.h>
30 #include <linux/crash_dump.h>
31 #include <linux/hugetlb.h>
32 #include <linux/acpi_iort.h>
33
34 #include <asm/boot.h>
35 #include <asm/fixmap.h>
36 #include <asm/kasan.h>
37 #include <asm/kernel-pgtable.h>
38 #include <asm/kvm_host.h>
39 #include <asm/memory.h>
40 #include <asm/numa.h>
41 #include <asm/sections.h>
42 #include <asm/setup.h>
43 #include <linux/sizes.h>
44 #include <asm/tlb.h>
45 #include <asm/alternative.h>
46
47 /*
48 * We need to be able to catch inadvertent references to memstart_addr
49 * that occur (potentially in generic code) before arm64_memblock_init()
50 * executes, which assigns it its actual value. So use a default value
51 * that cannot be mistaken for a real physical address.
52 */
53 s64 memstart_addr __ro_after_init = -1;
54 EXPORT_SYMBOL(memstart_addr);
55
56 /*
57 * If the corresponding config options are enabled, we create both ZONE_DMA
58 * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory
59 * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4).
60 * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory,
61 * otherwise it is empty.
62 *
63 * Memory reservation for crash kernel either done early or deferred
64 * depending on DMA memory zones configs (ZONE_DMA) --
65 *
66 * In absence of ZONE_DMA configs arm64_dma_phys_limit initialized
67 * here instead of max_zone_phys(). This lets early reservation of
68 * crash kernel memory which has a dependency on arm64_dma_phys_limit.
69 * Reserving memory early for crash kernel allows linear creation of block
70 * mappings (greater than page-granularity) for all the memory bank rangs.
71 * In this scheme a comparatively quicker boot is observed.
72 *
73 * If ZONE_DMA configs are defined, crash kernel memory reservation
74 * is delayed until DMA zone memory range size initilazation performed in
75 * zone_sizes_init(). The defer is necessary to steer clear of DMA zone
76 * memory range to avoid overlap allocation. So crash kernel memory boundaries
77 * are not known when mapping all bank memory ranges, which otherwise means
78 * not possible to exclude crash kernel range from creating block mappings
79 * so page-granularity mappings are created for the entire memory range.
80 * Hence a slightly slower boot is observed.
81 *
82 * Note: Page-granularity mapppings are necessary for crash kernel memory
83 * range for shrinking its size via /sys/kernel/kexec_crash_size interface.
84 */
85 #if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)
86 phys_addr_t __ro_after_init arm64_dma_phys_limit;
87 #else
88 phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1;
89 #endif
90
91 /*
92 * Provide a run-time mean of disabling ZONE_DMA32 if it is enabled via
93 * CONFIG_ZONE_DMA32.
94 */
95 static bool disable_dma32 __ro_after_init;
96
97 #ifdef CONFIG_KEXEC_CORE
98 /*
99 * reserve_crashkernel() - reserves memory for crash kernel
100 *
101 * This function reserves memory area given in "crashkernel=" kernel command
102 * line parameter. The memory reserved is used by dump capture kernel when
103 * primary kernel is crashing.
104 */
reserve_crashkernel(void)105 static void __init reserve_crashkernel(void)
106 {
107 unsigned long long crash_base, crash_size;
108 int ret;
109
110 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
111 &crash_size, &crash_base);
112 /* no crashkernel= or invalid value specified */
113 if (ret || !crash_size)
114 return;
115
116 crash_size = PAGE_ALIGN(crash_size);
117
118 if (crash_base == 0) {
119 /* Current arm64 boot protocol requires 2MB alignment */
120 crash_base = memblock_find_in_range(0, arm64_dma_phys_limit,
121 crash_size, SZ_2M);
122 if (crash_base == 0) {
123 pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
124 crash_size);
125 return;
126 }
127 } else {
128 /* User specifies base address explicitly. */
129 if (!memblock_is_region_memory(crash_base, crash_size)) {
130 pr_warn("cannot reserve crashkernel: region is not memory\n");
131 return;
132 }
133
134 if (memblock_is_region_reserved(crash_base, crash_size)) {
135 pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
136 return;
137 }
138
139 if (!IS_ALIGNED(crash_base, SZ_2M)) {
140 pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
141 return;
142 }
143 }
144 memblock_reserve(crash_base, crash_size);
145
146 pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
147 crash_base, crash_base + crash_size, crash_size >> 20);
148
149 crashk_res.start = crash_base;
150 crashk_res.end = crash_base + crash_size - 1;
151 }
152 #else
reserve_crashkernel(void)153 static void __init reserve_crashkernel(void)
154 {
155 }
156 #endif /* CONFIG_KEXEC_CORE */
157
158 #ifdef CONFIG_CRASH_DUMP
early_init_dt_scan_elfcorehdr(unsigned long node,const char * uname,int depth,void * data)159 static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
160 const char *uname, int depth, void *data)
161 {
162 const __be32 *reg;
163 int len;
164
165 if (depth != 1 || strcmp(uname, "chosen") != 0)
166 return 0;
167
168 reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
169 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
170 return 1;
171
172 elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, ®);
173 elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, ®);
174
175 return 1;
176 }
177
178 /*
179 * reserve_elfcorehdr() - reserves memory for elf core header
180 *
181 * This function reserves the memory occupied by an elf core header
182 * described in the device tree. This region contains all the
183 * information about primary kernel's core image and is used by a dump
184 * capture kernel to access the system memory on primary kernel.
185 */
reserve_elfcorehdr(void)186 static void __init reserve_elfcorehdr(void)
187 {
188 of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);
189
190 if (!elfcorehdr_size)
191 return;
192
193 if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
194 pr_warn("elfcorehdr is overlapped\n");
195 return;
196 }
197
198 memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
199
200 pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
201 elfcorehdr_size >> 10, elfcorehdr_addr);
202 }
203 #else
reserve_elfcorehdr(void)204 static void __init reserve_elfcorehdr(void)
205 {
206 }
207 #endif /* CONFIG_CRASH_DUMP */
208
209 /*
210 * Return the maximum physical address for a zone accessible by the given bits
211 * limit. If DRAM starts above 32-bit, expand the zone to the maximum
212 * available memory, otherwise cap it at 32-bit.
213 */
max_zone_phys(unsigned int zone_bits)214 static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
215 {
216 phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits);
217 phys_addr_t phys_start = memblock_start_of_DRAM();
218
219 if (phys_start > U32_MAX)
220 zone_mask = PHYS_ADDR_MAX;
221 else if (phys_start > zone_mask)
222 zone_mask = U32_MAX;
223
224 return min(zone_mask, memblock_end_of_DRAM() - 1) + 1;
225 }
226
zone_sizes_init(unsigned long min,unsigned long max)227 static void __init zone_sizes_init(unsigned long min, unsigned long max)
228 {
229 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
230 unsigned int __maybe_unused acpi_zone_dma_bits;
231 unsigned int __maybe_unused dt_zone_dma_bits;
232 phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32);
233
234 #ifdef CONFIG_ZONE_DMA
235 acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address());
236 dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL));
237 zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits);
238 arm64_dma_phys_limit = max_zone_phys(zone_dma_bits);
239 max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
240 #endif
241 #ifdef CONFIG_ZONE_DMA32
242 max_zone_pfns[ZONE_DMA32] = disable_dma32 ? 0 : PFN_DOWN(dma32_phys_limit);
243 if (!arm64_dma_phys_limit)
244 arm64_dma_phys_limit = dma32_phys_limit;
245 #endif
246 max_zone_pfns[ZONE_NORMAL] = max;
247
248 free_area_init(max_zone_pfns);
249 }
250
early_disable_dma32(char * buf)251 static int __init early_disable_dma32(char *buf)
252 {
253 if (!buf)
254 return -EINVAL;
255
256 if (!strcmp(buf, "on"))
257 disable_dma32 = true;
258
259 return 0;
260 }
261 early_param("disable_dma32", early_disable_dma32);
262
pfn_valid(unsigned long pfn)263 int pfn_valid(unsigned long pfn)
264 {
265 phys_addr_t addr = pfn << PAGE_SHIFT;
266
267 if ((addr >> PAGE_SHIFT) != pfn)
268 return 0;
269
270 #ifdef CONFIG_SPARSEMEM
271 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
272 return 0;
273
274 if (!valid_section(__pfn_to_section(pfn)))
275 return 0;
276
277 /*
278 * ZONE_DEVICE memory does not have the memblock entries.
279 * memblock_is_map_memory() check for ZONE_DEVICE based
280 * addresses will always fail. Even the normal hotplugged
281 * memory will never have MEMBLOCK_NOMAP flag set in their
282 * memblock entries. Skip memblock search for all non early
283 * memory sections covering all of hotplug memory including
284 * both normal and ZONE_DEVICE based.
285 */
286 if (!early_section(__pfn_to_section(pfn)))
287 return pfn_section_valid(__pfn_to_section(pfn), pfn);
288 #endif
289 return memblock_is_map_memory(addr);
290 }
291 EXPORT_SYMBOL(pfn_valid);
292
293 static phys_addr_t memory_limit = PHYS_ADDR_MAX;
294
295 /*
296 * Limit the memory size that was specified via FDT.
297 */
early_mem(char * p)298 static int __init early_mem(char *p)
299 {
300 if (!p)
301 return 1;
302
303 memory_limit = memparse(p, &p) & PAGE_MASK;
304 pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
305
306 return 0;
307 }
308 early_param("mem", early_mem);
309
early_init_dt_scan_usablemem(unsigned long node,const char * uname,int depth,void * data)310 static int __init early_init_dt_scan_usablemem(unsigned long node,
311 const char *uname, int depth, void *data)
312 {
313 struct memblock_region *usablemem = data;
314 const __be32 *reg;
315 int len;
316
317 if (depth != 1 || strcmp(uname, "chosen") != 0)
318 return 0;
319
320 reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
321 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
322 return 1;
323
324 usablemem->base = dt_mem_next_cell(dt_root_addr_cells, ®);
325 usablemem->size = dt_mem_next_cell(dt_root_size_cells, ®);
326
327 return 1;
328 }
329
fdt_enforce_memory_region(void)330 static void __init fdt_enforce_memory_region(void)
331 {
332 struct memblock_region reg = {
333 .size = 0,
334 };
335
336 of_scan_flat_dt(early_init_dt_scan_usablemem, ®);
337
338 if (reg.size)
339 memblock_cap_memory_range(reg.base, reg.size);
340 }
341
arm64_memblock_init(void)342 void __init arm64_memblock_init(void)
343 {
344 const s64 linear_region_size = BIT(vabits_actual - 1);
345
346 /* Handle linux,usable-memory-range property */
347 fdt_enforce_memory_region();
348
349 /* Remove memory above our supported physical address size */
350 memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
351
352 /*
353 * Select a suitable value for the base of physical memory.
354 */
355 memstart_addr = round_down(memblock_start_of_DRAM(),
356 ARM64_MEMSTART_ALIGN);
357
358 /*
359 * Remove the memory that we will not be able to cover with the
360 * linear mapping. Take care not to clip the kernel which may be
361 * high in memory.
362 */
363 memblock_remove(max_t(u64, memstart_addr + linear_region_size,
364 __pa_symbol(_end)), ULLONG_MAX);
365 if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
366 /* ensure that memstart_addr remains sufficiently aligned */
367 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
368 ARM64_MEMSTART_ALIGN);
369 memblock_remove(0, memstart_addr);
370 }
371
372 /*
373 * If we are running with a 52-bit kernel VA config on a system that
374 * does not support it, we have to place the available physical
375 * memory in the 48-bit addressable part of the linear region, i.e.,
376 * we have to move it upward. Since memstart_addr represents the
377 * physical address of PAGE_OFFSET, we have to *subtract* from it.
378 */
379 if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52))
380 memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52);
381
382 /*
383 * Apply the memory limit if it was set. Since the kernel may be loaded
384 * high up in memory, add back the kernel region that must be accessible
385 * via the linear mapping.
386 */
387 if (memory_limit != PHYS_ADDR_MAX) {
388 memblock_mem_limit_remove_map(memory_limit);
389 memblock_add(__pa_symbol(_text), (u64)(_end - _text));
390 }
391
392 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
393 /*
394 * Add back the memory we just removed if it results in the
395 * initrd to become inaccessible via the linear mapping.
396 * Otherwise, this is a no-op
397 */
398 u64 base = phys_initrd_start & PAGE_MASK;
399 u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base;
400
401 /*
402 * We can only add back the initrd memory if we don't end up
403 * with more memory than we can address via the linear mapping.
404 * It is up to the bootloader to position the kernel and the
405 * initrd reasonably close to each other (i.e., within 32 GB of
406 * each other) so that all granule/#levels combinations can
407 * always access both.
408 */
409 if (WARN(base < memblock_start_of_DRAM() ||
410 base + size > memblock_start_of_DRAM() +
411 linear_region_size,
412 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
413 phys_initrd_size = 0;
414 } else {
415 memblock_remove(base, size); /* clear MEMBLOCK_ flags */
416 memblock_add(base, size);
417 memblock_reserve(base, size);
418 }
419 }
420
421 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
422 extern u16 memstart_offset_seed;
423 u64 mmfr0 = read_cpuid(ID_AA64MMFR0_EL1);
424 int parange = cpuid_feature_extract_unsigned_field(
425 mmfr0, ID_AA64MMFR0_PARANGE_SHIFT);
426 s64 range = linear_region_size -
427 BIT(id_aa64mmfr0_parange_to_phys_shift(parange));
428
429 /*
430 * If the size of the linear region exceeds, by a sufficient
431 * margin, the size of the region that the physical memory can
432 * span, randomize the linear region as well.
433 */
434 if (memstart_offset_seed > 0 && range >= (s64)ARM64_MEMSTART_ALIGN) {
435 range /= ARM64_MEMSTART_ALIGN;
436 memstart_addr -= ARM64_MEMSTART_ALIGN *
437 ((range * memstart_offset_seed) >> 16);
438 }
439 }
440
441 /*
442 * Register the kernel text, kernel data, initrd, and initial
443 * pagetables with memblock.
444 */
445 memblock_reserve(__pa_symbol(_text), _end - _text);
446 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
447 /* the generic initrd code expects virtual addresses */
448 initrd_start = __phys_to_virt(phys_initrd_start);
449 initrd_end = initrd_start + phys_initrd_size;
450 }
451
452 early_init_fdt_scan_reserved_mem();
453
454 reserve_elfcorehdr();
455
456 if (!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32))
457 reserve_crashkernel();
458
459 high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
460 }
461
bootmem_init(void)462 void __init bootmem_init(void)
463 {
464 unsigned long min, max;
465
466 min = PFN_UP(memblock_start_of_DRAM());
467 max = PFN_DOWN(memblock_end_of_DRAM());
468
469 early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
470
471 max_pfn = max_low_pfn = max;
472 min_low_pfn = min;
473
474 arm64_numa_init();
475
476 /*
477 * must be done after arm64_numa_init() which calls numa_init() to
478 * initialize node_online_map that gets used in hugetlb_cma_reserve()
479 * while allocating required CMA size across online nodes.
480 */
481 #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA)
482 arm64_hugetlb_cma_reserve();
483 #endif
484
485 dma_pernuma_cma_reserve();
486
487 kvm_hyp_reserve();
488
489 /*
490 * sparse_init() tries to allocate memory from memblock, so must be
491 * done after the fixed reservations
492 */
493 sparse_init();
494 zone_sizes_init(min, max);
495
496 /*
497 * Reserve the CMA area after arm64_dma_phys_limit was initialised.
498 */
499 dma_contiguous_reserve(arm64_dma_phys_limit);
500
501 /*
502 * request_standard_resources() depends on crashkernel's memory being
503 * reserved, so do it here.
504 */
505 if (IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32))
506 reserve_crashkernel();
507
508 memblock_dump_all();
509 }
510
511 #ifndef CONFIG_SPARSEMEM_VMEMMAP
free_memmap(unsigned long start_pfn,unsigned long end_pfn)512 static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
513 {
514 struct page *start_pg, *end_pg;
515 unsigned long pg, pgend;
516
517 /*
518 * Convert start_pfn/end_pfn to a struct page pointer.
519 */
520 start_pg = pfn_to_page(start_pfn - 1) + 1;
521 end_pg = pfn_to_page(end_pfn - 1) + 1;
522
523 /*
524 * Convert to physical addresses, and round start upwards and end
525 * downwards.
526 */
527 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
528 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
529
530 /*
531 * If there are free pages between these, free the section of the
532 * memmap array.
533 */
534 if (pg < pgend)
535 memblock_free(pg, pgend - pg);
536 }
537
538 /*
539 * The mem_map array can get very big. Free the unused area of the memory map.
540 */
free_unused_memmap(void)541 static void __init free_unused_memmap(void)
542 {
543 unsigned long start, end, prev_end = 0;
544 int i;
545
546 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
547 #ifdef CONFIG_SPARSEMEM
548 /*
549 * Take care not to free memmap entries that don't exist due
550 * to SPARSEMEM sections which aren't present.
551 */
552 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
553 #endif
554 /*
555 * If we had a previous bank, and there is a space between the
556 * current bank and the previous, free it.
557 */
558 if (prev_end && prev_end < start)
559 free_memmap(prev_end, start);
560
561 /*
562 * Align up here since the VM subsystem insists that the
563 * memmap entries are valid from the bank end aligned to
564 * MAX_ORDER_NR_PAGES.
565 */
566 prev_end = ALIGN(end, MAX_ORDER_NR_PAGES);
567 }
568
569 #ifdef CONFIG_SPARSEMEM
570 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
571 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
572 #endif
573 }
574 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
575
576 /*
577 * mem_init() marks the free areas in the mem_map and tells us how much memory
578 * is free. This is done after various parts of the system have claimed their
579 * memory after the kernel image.
580 */
mem_init(void)581 void __init mem_init(void)
582 {
583 if (swiotlb_force == SWIOTLB_FORCE ||
584 max_pfn > PFN_DOWN(arm64_dma_phys_limit))
585 swiotlb_init(1);
586 else
587 swiotlb_force = SWIOTLB_NO_FORCE;
588
589 set_max_mapnr(max_pfn - PHYS_PFN_OFFSET);
590
591 #ifndef CONFIG_SPARSEMEM_VMEMMAP
592 free_unused_memmap();
593 #endif
594 /* this will put all unused low memory onto the freelists */
595 memblock_free_all();
596
597 mem_init_print_info(NULL);
598
599 /*
600 * Check boundaries twice: Some fundamental inconsistencies can be
601 * detected at build time already.
602 */
603 #ifdef CONFIG_COMPAT
604 BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
605 #endif
606
607 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
608 extern int sysctl_overcommit_memory;
609 /*
610 * On a machine this small we won't get anywhere without
611 * overcommit, so turn it on by default.
612 */
613 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
614 }
615 }
616
free_initmem(void)617 void free_initmem(void)
618 {
619 free_reserved_area(lm_alias(__init_begin),
620 lm_alias(__init_end),
621 POISON_FREE_INITMEM, "unused kernel");
622 /*
623 * Unmap the __init region but leave the VM area in place. This
624 * prevents the region from being reused for kernel modules, which
625 * is not supported by kallsyms.
626 */
627 unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
628 }
629
dump_mem_limit(void)630 void dump_mem_limit(void)
631 {
632 if (memory_limit != PHYS_ADDR_MAX) {
633 pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
634 } else {
635 pr_emerg("Memory Limit: none\n");
636 }
637 }
638