• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/errno.h>
3 #include <linux/init.h>
4 #include <asm/sclp.h>
5 #include <asm/sections.h>
6 #include <asm/mem_detect.h>
7 #include <asm/sparsemem.h>
8 #include "compressed/decompressor.h"
9 #include "boot.h"
10 
11 unsigned long __bootdata(max_physmem_end);
12 struct mem_detect_info __bootdata(mem_detect);
13 
14 /* up to 256 storage elements, 1020 subincrements each */
15 #define ENTRIES_EXTENDED_MAX						       \
16 	(256 * (1020 / 2) * sizeof(struct mem_detect_block))
17 
18 /*
19  * To avoid corrupting old kernel memory during dump, find lowest memory
20  * chunk possible either right after the kernel end (decompressed kernel) or
21  * after initrd (if it is present and there is no hole between the kernel end
22  * and initrd)
23  */
mem_detect_alloc_extended(void)24 static void *mem_detect_alloc_extended(void)
25 {
26 	unsigned long offset = ALIGN(mem_safe_offset(), sizeof(u64));
27 
28 	if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE &&
29 	    INITRD_START < offset + ENTRIES_EXTENDED_MAX)
30 		offset = ALIGN(INITRD_START + INITRD_SIZE, sizeof(u64));
31 
32 	return (void *)offset;
33 }
34 
__get_mem_detect_block_ptr(u32 n)35 static struct mem_detect_block *__get_mem_detect_block_ptr(u32 n)
36 {
37 	if (n < MEM_INLINED_ENTRIES)
38 		return &mem_detect.entries[n];
39 	if (unlikely(!mem_detect.entries_extended))
40 		mem_detect.entries_extended = mem_detect_alloc_extended();
41 	return &mem_detect.entries_extended[n - MEM_INLINED_ENTRIES];
42 }
43 
44 /*
45  * sequential calls to add_mem_detect_block with adjacent memory areas
46  * are merged together into single memory block.
47  */
add_mem_detect_block(u64 start,u64 end)48 void add_mem_detect_block(u64 start, u64 end)
49 {
50 	struct mem_detect_block *block;
51 
52 	if (mem_detect.count) {
53 		block = __get_mem_detect_block_ptr(mem_detect.count - 1);
54 		if (block->end == start) {
55 			block->end = end;
56 			return;
57 		}
58 	}
59 
60 	block = __get_mem_detect_block_ptr(mem_detect.count);
61 	block->start = start;
62 	block->end = end;
63 	mem_detect.count++;
64 }
65 
__diag260(unsigned long rx1,unsigned long rx2)66 static int __diag260(unsigned long rx1, unsigned long rx2)
67 {
68 	register unsigned long _rx1 asm("2") = rx1;
69 	register unsigned long _rx2 asm("3") = rx2;
70 	register unsigned long _ry asm("4") = 0x10; /* storage configuration */
71 	int rc = -1;				    /* fail */
72 	unsigned long reg1, reg2;
73 	psw_t old = S390_lowcore.program_new_psw;
74 
75 	asm volatile(
76 		"	epsw	%0,%1\n"
77 		"	st	%0,%[psw_pgm]\n"
78 		"	st	%1,%[psw_pgm]+4\n"
79 		"	larl	%0,1f\n"
80 		"	stg	%0,%[psw_pgm]+8\n"
81 		"	diag	%[rx],%[ry],0x260\n"
82 		"	ipm	%[rc]\n"
83 		"	srl	%[rc],28\n"
84 		"1:\n"
85 		: "=&d" (reg1), "=&a" (reg2),
86 		  [psw_pgm] "=Q" (S390_lowcore.program_new_psw),
87 		  [rc] "+&d" (rc), [ry] "+d" (_ry)
88 		: [rx] "d" (_rx1), "d" (_rx2)
89 		: "cc", "memory");
90 	S390_lowcore.program_new_psw = old;
91 	return rc == 0 ? _ry : -1;
92 }
93 
diag260(void)94 static int diag260(void)
95 {
96 	int rc, i;
97 
98 	struct {
99 		unsigned long start;
100 		unsigned long end;
101 	} storage_extents[8] __aligned(16); /* VM supports up to 8 extends */
102 
103 	memset(storage_extents, 0, sizeof(storage_extents));
104 	rc = __diag260((unsigned long)storage_extents, sizeof(storage_extents));
105 	if (rc == -1)
106 		return -1;
107 
108 	for (i = 0; i < min_t(int, rc, ARRAY_SIZE(storage_extents)); i++)
109 		add_mem_detect_block(storage_extents[i].start, storage_extents[i].end + 1);
110 	return 0;
111 }
112 
tprot(unsigned long addr)113 static int tprot(unsigned long addr)
114 {
115 	unsigned long pgm_addr;
116 	int rc = -EFAULT;
117 	psw_t old = S390_lowcore.program_new_psw;
118 
119 	S390_lowcore.program_new_psw.mask = __extract_psw();
120 	asm volatile(
121 		"	larl	%[pgm_addr],1f\n"
122 		"	stg	%[pgm_addr],%[psw_pgm_addr]\n"
123 		"	tprot	0(%[addr]),0\n"
124 		"	ipm	%[rc]\n"
125 		"	srl	%[rc],28\n"
126 		"1:\n"
127 		: [pgm_addr] "=&d"(pgm_addr),
128 		  [psw_pgm_addr] "=Q"(S390_lowcore.program_new_psw.addr),
129 		  [rc] "+&d"(rc)
130 		: [addr] "a"(addr)
131 		: "cc", "memory");
132 	S390_lowcore.program_new_psw = old;
133 	return rc;
134 }
135 
search_mem_end(void)136 static void search_mem_end(void)
137 {
138 	unsigned long range = 1 << (MAX_PHYSMEM_BITS - 20); /* in 1MB blocks */
139 	unsigned long offset = 0;
140 	unsigned long pivot;
141 
142 	while (range > 1) {
143 		range >>= 1;
144 		pivot = offset + range;
145 		if (!tprot(pivot << 20))
146 			offset = pivot;
147 	}
148 
149 	add_mem_detect_block(0, (offset + 1) << 20);
150 }
151 
detect_memory(void)152 void detect_memory(void)
153 {
154 	sclp_early_get_memsize(&max_physmem_end);
155 
156 	if (!sclp_early_read_storage_info()) {
157 		mem_detect.info_source = MEM_DETECT_SCLP_STOR_INFO;
158 		return;
159 	}
160 
161 	if (!diag260()) {
162 		mem_detect.info_source = MEM_DETECT_DIAG260;
163 		return;
164 	}
165 
166 	if (max_physmem_end) {
167 		add_mem_detect_block(0, max_physmem_end);
168 		mem_detect.info_source = MEM_DETECT_SCLP_READ_INFO;
169 		return;
170 	}
171 
172 	search_mem_end();
173 	mem_detect.info_source = MEM_DETECT_BIN_SEARCH;
174 	max_physmem_end = get_mem_detect_end();
175 }
176