• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Based on arch/arm/kernel/setup.c
4  *
5  * Copyright (C) 1995-2001 Russell King
6  * Copyright (C) 2012 ARM Ltd.
7  */
8 
9 #include <linux/acpi.h>
10 #include <linux/export.h>
11 #include <linux/kernel.h>
12 #include <linux/stddef.h>
13 #include <linux/ioport.h>
14 #include <linux/delay.h>
15 #include <linux/initrd.h>
16 #include <linux/console.h>
17 #include <linux/cache.h>
18 #include <linux/screen_info.h>
19 #include <linux/init.h>
20 #include <linux/kexec.h>
21 #include <linux/root_dev.h>
22 #include <linux/cpu.h>
23 #include <linux/interrupt.h>
24 #include <linux/smp.h>
25 #include <linux/fs.h>
26 #include <linux/proc_fs.h>
27 #include <linux/memblock.h>
28 #include <linux/of_fdt.h>
29 #include <linux/efi.h>
30 #include <linux/psci.h>
31 #include <linux/sched/task.h>
32 #include <linux/mm.h>
33 
34 #include <asm/acpi.h>
35 #include <asm/fixmap.h>
36 #include <asm/cpu.h>
37 #include <asm/cputype.h>
38 #include <asm/daifflags.h>
39 #include <asm/elf.h>
40 #include <asm/cpufeature.h>
41 #include <asm/cpu_ops.h>
42 #include <asm/hypervisor.h>
43 #include <asm/kasan.h>
44 #include <asm/numa.h>
45 #include <asm/sections.h>
46 #include <asm/setup.h>
47 #include <asm/smp_plat.h>
48 #include <asm/cacheflush.h>
49 #include <asm/tlbflush.h>
50 #include <asm/traps.h>
51 #include <asm/efi.h>
52 #include <asm/hypervisor.h>
53 #include <asm/xen/hypervisor.h>
54 #include <asm/mmu_context.h>
55 
56 static int num_standard_resources;
57 static struct resource *standard_resources;
58 
59 phys_addr_t __fdt_pointer __initdata;
60 
61 /*
62  * Standard memory resources
63  */
64 static struct resource mem_res[] = {
65 	{
66 		.name = "Kernel code",
67 		.start = 0,
68 		.end = 0,
69 		.flags = IORESOURCE_SYSTEM_RAM
70 	},
71 	{
72 		.name = "Kernel data",
73 		.start = 0,
74 		.end = 0,
75 		.flags = IORESOURCE_SYSTEM_RAM
76 	}
77 };
78 
79 #define kernel_code mem_res[0]
80 #define kernel_data mem_res[1]
81 
82 /*
83  * The recorded values of x0 .. x3 upon kernel entry.
84  */
85 u64 __cacheline_aligned boot_args[4];
86 
smp_setup_processor_id(void)87 void __init smp_setup_processor_id(void)
88 {
89 	u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
90 	set_cpu_logical_map(0, mpidr);
91 
92 	/*
93 	 * clear __my_cpu_offset on boot CPU to avoid hang caused by
94 	 * using percpu variable early, for example, lockdep will
95 	 * access percpu variable inside lock_release
96 	 */
97 	set_my_cpu_offset(0);
98 	pr_info("Booting Linux on physical CPU 0x%010lx [0x%08x]\n",
99 		(unsigned long)mpidr, read_cpuid_id());
100 }
101 
arch_match_cpu_phys_id(int cpu,u64 phys_id)102 bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
103 {
104 	return phys_id == cpu_logical_map(cpu);
105 }
106 
107 struct mpidr_hash mpidr_hash;
108 /**
109  * smp_build_mpidr_hash - Pre-compute shifts required at each affinity
110  *			  level in order to build a linear index from an
111  *			  MPIDR value. Resulting algorithm is a collision
112  *			  free hash carried out through shifting and ORing
113  */
smp_build_mpidr_hash(void)114 static void __init smp_build_mpidr_hash(void)
115 {
116 	u32 i, affinity, fs[4], bits[4], ls;
117 	u64 mask = 0;
118 	/*
119 	 * Pre-scan the list of MPIDRS and filter out bits that do
120 	 * not contribute to affinity levels, ie they never toggle.
121 	 */
122 	for_each_possible_cpu(i)
123 		mask |= (cpu_logical_map(i) ^ cpu_logical_map(0));
124 	pr_debug("mask of set bits %#llx\n", mask);
125 	/*
126 	 * Find and stash the last and first bit set at all affinity levels to
127 	 * check how many bits are required to represent them.
128 	 */
129 	for (i = 0; i < 4; i++) {
130 		affinity = MPIDR_AFFINITY_LEVEL(mask, i);
131 		/*
132 		 * Find the MSB bit and LSB bits position
133 		 * to determine how many bits are required
134 		 * to express the affinity level.
135 		 */
136 		ls = fls(affinity);
137 		fs[i] = affinity ? ffs(affinity) - 1 : 0;
138 		bits[i] = ls - fs[i];
139 	}
140 	/*
141 	 * An index can be created from the MPIDR_EL1 by isolating the
142 	 * significant bits at each affinity level and by shifting
143 	 * them in order to compress the 32 bits values space to a
144 	 * compressed set of values. This is equivalent to hashing
145 	 * the MPIDR_EL1 through shifting and ORing. It is a collision free
146 	 * hash though not minimal since some levels might contain a number
147 	 * of CPUs that is not an exact power of 2 and their bit
148 	 * representation might contain holes, eg MPIDR_EL1[7:0] = {0x2, 0x80}.
149 	 */
150 	mpidr_hash.shift_aff[0] = MPIDR_LEVEL_SHIFT(0) + fs[0];
151 	mpidr_hash.shift_aff[1] = MPIDR_LEVEL_SHIFT(1) + fs[1] - bits[0];
152 	mpidr_hash.shift_aff[2] = MPIDR_LEVEL_SHIFT(2) + fs[2] -
153 						(bits[1] + bits[0]);
154 	mpidr_hash.shift_aff[3] = MPIDR_LEVEL_SHIFT(3) +
155 				  fs[3] - (bits[2] + bits[1] + bits[0]);
156 	mpidr_hash.mask = mask;
157 	mpidr_hash.bits = bits[3] + bits[2] + bits[1] + bits[0];
158 	pr_debug("MPIDR hash: aff0[%u] aff1[%u] aff2[%u] aff3[%u] mask[%#llx] bits[%u]\n",
159 		mpidr_hash.shift_aff[0],
160 		mpidr_hash.shift_aff[1],
161 		mpidr_hash.shift_aff[2],
162 		mpidr_hash.shift_aff[3],
163 		mpidr_hash.mask,
164 		mpidr_hash.bits);
165 	/*
166 	 * 4x is an arbitrary value used to warn on a hash table much bigger
167 	 * than expected on most systems.
168 	 */
169 	if (mpidr_hash_size() > 4 * num_possible_cpus())
170 		pr_warn("Large number of MPIDR hash buckets detected\n");
171 }
172 
173 static void *early_fdt_ptr __initdata;
174 
get_early_fdt_ptr(void)175 void __init *get_early_fdt_ptr(void)
176 {
177 	return early_fdt_ptr;
178 }
179 
early_fdt_map(u64 dt_phys)180 asmlinkage void __init early_fdt_map(u64 dt_phys)
181 {
182 	int fdt_size;
183 
184 	early_fixmap_init();
185 	early_fdt_ptr = fixmap_remap_fdt(dt_phys, &fdt_size, PAGE_KERNEL);
186 }
187 
setup_machine_fdt(phys_addr_t dt_phys)188 static void __init setup_machine_fdt(phys_addr_t dt_phys)
189 {
190 	int size;
191 	void *dt_virt = fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL);
192 	const char *name;
193 
194 	if (dt_virt)
195 		memblock_reserve(dt_phys, size);
196 
197 	if (!dt_virt || !early_init_dt_scan(dt_virt)) {
198 		pr_crit("\n"
199 			"Error: invalid device tree blob at physical address %pa (virtual address 0x%p)\n"
200 			"The dtb must be 8-byte aligned and must not exceed 2 MB in size\n"
201 			"\nPlease check your bootloader.",
202 			&dt_phys, dt_virt);
203 
204 		while (true)
205 			cpu_relax();
206 	}
207 
208 	/* Early fixups are done, map the FDT as read-only now */
209 	fixmap_remap_fdt(dt_phys, &size, PAGE_KERNEL_RO);
210 
211 	name = of_flat_dt_get_machine_name();
212 	if (!name)
213 		return;
214 
215 	pr_info("Machine model: %s\n", name);
216 	dump_stack_set_arch_desc("%s (DT)", name);
217 }
218 
request_standard_resources(void)219 static void __init request_standard_resources(void)
220 {
221 	struct memblock_region *region;
222 	struct resource *res;
223 	unsigned long i = 0;
224 	size_t res_size;
225 
226 	kernel_code.start   = __pa_symbol(_text);
227 	kernel_code.end     = __pa_symbol(__init_begin - 1);
228 	kernel_data.start   = __pa_symbol(_sdata);
229 	kernel_data.end     = __pa_symbol(_end - 1);
230 
231 	num_standard_resources = memblock.memory.cnt;
232 	res_size = num_standard_resources * sizeof(*standard_resources);
233 	standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
234 	if (!standard_resources)
235 		panic("%s: Failed to allocate %zu bytes\n", __func__, res_size);
236 
237 	for_each_mem_region(region) {
238 		res = &standard_resources[i++];
239 		if (memblock_is_nomap(region)) {
240 			res->name  = "reserved";
241 			res->flags = IORESOURCE_MEM;
242 		} else {
243 			res->name  = "System RAM";
244 			res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
245 		}
246 		res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
247 		res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
248 
249 		request_resource(&iomem_resource, res);
250 
251 		if (kernel_code.start >= res->start &&
252 		    kernel_code.end <= res->end)
253 			request_resource(res, &kernel_code);
254 		if (kernel_data.start >= res->start &&
255 		    kernel_data.end <= res->end)
256 			request_resource(res, &kernel_data);
257 #ifdef CONFIG_KEXEC_CORE
258 		/* Userspace will find "Crash kernel" region in /proc/iomem. */
259 		if (crashk_res.end && crashk_res.start >= res->start &&
260 		    crashk_res.end <= res->end)
261 			request_resource(res, &crashk_res);
262 #endif
263 	}
264 }
265 
reserve_memblock_reserved_regions(void)266 static int __init reserve_memblock_reserved_regions(void)
267 {
268 	u64 i, j;
269 
270 	for (i = 0; i < num_standard_resources; ++i) {
271 		struct resource *mem = &standard_resources[i];
272 		phys_addr_t r_start, r_end, mem_size = resource_size(mem);
273 
274 		if (!memblock_is_region_reserved(mem->start, mem_size))
275 			continue;
276 
277 		for_each_reserved_mem_range(j, &r_start, &r_end) {
278 			resource_size_t start, end;
279 
280 			start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
281 			end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
282 
283 			if (start > mem->end || end < mem->start)
284 				continue;
285 
286 			reserve_region_with_split(mem, start, end, "reserved");
287 		}
288 	}
289 
290 	return 0;
291 }
292 arch_initcall(reserve_memblock_reserved_regions);
293 
294 u64 __cpu_logical_map[NR_CPUS] = { [0 ... NR_CPUS-1] = INVALID_HWID };
295 
cpu_logical_map(unsigned int cpu)296 u64 cpu_logical_map(unsigned int cpu)
297 {
298 	return __cpu_logical_map[cpu];
299 }
300 
setup_arch(char ** cmdline_p)301 void __init __no_sanitize_address setup_arch(char **cmdline_p)
302 {
303 	init_mm.start_code = (unsigned long) _text;
304 	init_mm.end_code   = (unsigned long) _etext;
305 	init_mm.end_data   = (unsigned long) _edata;
306 	init_mm.brk	   = (unsigned long) _end;
307 
308 	*cmdline_p = boot_command_line;
309 
310 	/*
311 	 * If know now we are going to need KPTI then use non-global
312 	 * mappings from the start, avoiding the cost of rewriting
313 	 * everything later.
314 	 */
315 	arm64_use_ng_mappings = kaslr_requires_kpti();
316 
317 	early_fixmap_init();
318 	early_ioremap_init();
319 
320 	setup_machine_fdt(__fdt_pointer);
321 
322 	/*
323 	 * Initialise the static keys early as they may be enabled by the
324 	 * cpufeature code and early parameters.
325 	 */
326 	jump_label_init();
327 	parse_early_param();
328 
329 	/*
330 	 * Unmask asynchronous aborts and fiq after bringing up possible
331 	 * earlycon. (Report possible System Errors once we can report this
332 	 * occurred).
333 	 */
334 	local_daif_restore(DAIF_PROCCTX_NOIRQ);
335 
336 	/*
337 	 * TTBR0 is only used for the identity mapping at this stage. Make it
338 	 * point to zero page to avoid speculatively fetching new entries.
339 	 */
340 	cpu_uninstall_idmap();
341 
342 	xen_early_init();
343 	efi_init();
344 
345 	if (!efi_enabled(EFI_BOOT) && ((u64)_text % MIN_KIMG_ALIGN) != 0)
346 	     pr_warn(FW_BUG "Kernel image misaligned at boot, please fix your bootloader!");
347 
348 	arm64_memblock_init();
349 
350 	paging_init();
351 
352 	acpi_table_upgrade();
353 
354 	/* Parse the ACPI tables for possible boot-time configuration */
355 	acpi_boot_table_init();
356 
357 	if (acpi_disabled)
358 		unflatten_device_tree();
359 
360 	bootmem_init();
361 
362 	kasan_init();
363 
364 	request_standard_resources();
365 
366 	early_ioremap_reset();
367 
368 	if (acpi_disabled)
369 		psci_dt_init();
370 	else
371 		psci_acpi_init();
372 
373 	init_bootcpu_ops();
374 	smp_init_cpus();
375 	smp_build_mpidr_hash();
376 
377 	/* Init percpu seeds for random tags after cpus are set up. */
378 	kasan_init_sw_tags();
379 
380 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
381 	/*
382 	 * Make sure init_thread_info.ttbr0 always generates translation
383 	 * faults in case uaccess_enable() is inadvertently called by the init
384 	 * thread.
385 	 */
386 	init_task.thread_info.ttbr0 = phys_to_ttbr(__pa_symbol(reserved_pg_dir));
387 #endif
388 
389 	if (boot_args[1] || boot_args[2] || boot_args[3]) {
390 		pr_err("WARNING: x1-x3 nonzero in violation of boot protocol:\n"
391 			"\tx1: %016llx\n\tx2: %016llx\n\tx3: %016llx\n"
392 			"This indicates a broken bootloader or old kernel\n",
393 			boot_args[1], boot_args[2], boot_args[3]);
394 	}
395 }
396 
cpu_can_disable(unsigned int cpu)397 static inline bool cpu_can_disable(unsigned int cpu)
398 {
399 #ifdef CONFIG_HOTPLUG_CPU
400 	const struct cpu_operations *ops = get_cpu_ops(cpu);
401 
402 	if (ops && ops->cpu_can_disable)
403 		return ops->cpu_can_disable(cpu);
404 #endif
405 	return false;
406 }
407 
topology_init(void)408 static int __init topology_init(void)
409 {
410 	int i;
411 
412 	for_each_online_node(i)
413 		register_one_node(i);
414 
415 	for_each_possible_cpu(i) {
416 		struct cpu *cpu = &per_cpu(cpu_data.cpu, i);
417 		cpu->hotpluggable = cpu_can_disable(i);
418 		register_cpu(cpu, i);
419 	}
420 
421 	return 0;
422 }
423 subsys_initcall(topology_init);
424 
dump_kernel_offset(void)425 static void dump_kernel_offset(void)
426 {
427 	const unsigned long offset = kaslr_offset();
428 
429 	if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && offset > 0) {
430 		pr_emerg("Kernel Offset: 0x%lx from 0x%lx\n",
431 			 offset, KIMAGE_VADDR);
432 		pr_emerg("PHYS_OFFSET: 0x%llx\n", PHYS_OFFSET);
433 	} else {
434 		pr_emerg("Kernel Offset: disabled\n");
435 	}
436 }
437 
arm64_panic_block_dump(struct notifier_block * self,unsigned long v,void * p)438 static int arm64_panic_block_dump(struct notifier_block *self,
439 				  unsigned long v, void *p)
440 {
441 	dump_kernel_offset();
442 	dump_cpu_features();
443 	dump_mem_limit();
444 	return 0;
445 }
446 
447 static struct notifier_block arm64_panic_block = {
448 	.notifier_call = arm64_panic_block_dump
449 };
450 
register_arm64_panic_block(void)451 static int __init register_arm64_panic_block(void)
452 {
453 	atomic_notifier_chain_register(&panic_notifier_list,
454 				       &arm64_panic_block);
455 	return 0;
456 }
457 device_initcall(register_arm64_panic_block);
458 
kvm_arm_init_hyp_services(void)459 void kvm_arm_init_hyp_services(void)
460 {
461 	kvm_init_ioremap_services();
462 	kvm_init_memshare_services();
463 }
464