• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2005-2017 Andes Technology Corporation
3 
4 #include <linux/cpu.h>
5 #include <linux/memblock.h>
6 #include <linux/seq_file.h>
7 #include <linux/console.h>
8 #include <linux/screen_info.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/of_fdt.h>
12 #include <linux/of_platform.h>
13 #include <asm/setup.h>
14 #include <asm/sections.h>
15 #include <asm/proc-fns.h>
16 #include <asm/cache_info.h>
17 #include <asm/elf.h>
18 #include <asm/fpu.h>
19 #include <nds32_intrinsic.h>
20 
21 #define HWCAP_MFUSR_PC		0x000001
22 #define HWCAP_EXT		0x000002
23 #define HWCAP_EXT2		0x000004
24 #define HWCAP_FPU		0x000008
25 #define HWCAP_AUDIO		0x000010
26 #define HWCAP_BASE16		0x000020
27 #define HWCAP_STRING		0x000040
28 #define HWCAP_REDUCED_REGS	0x000080
29 #define HWCAP_VIDEO		0x000100
30 #define HWCAP_ENCRYPT		0x000200
31 #define HWCAP_EDM		0x000400
32 #define HWCAP_LMDMA		0x000800
33 #define HWCAP_PFM		0x001000
34 #define HWCAP_HSMP		0x002000
35 #define HWCAP_TRACE		0x004000
36 #define HWCAP_DIV		0x008000
37 #define HWCAP_MAC		0x010000
38 #define HWCAP_L2C		0x020000
39 #define HWCAP_FPU_DP		0x040000
40 #define HWCAP_V2		0x080000
41 #define HWCAP_DX_REGS		0x100000
42 #define HWCAP_HWPRE		0x200000
43 
44 unsigned long cpu_id, cpu_rev, cpu_cfgid;
45 bool has_fpu = false;
46 char cpu_series;
47 char *endianness = NULL;
48 
49 unsigned int __atags_pointer __initdata;
50 unsigned int elf_hwcap;
51 EXPORT_SYMBOL(elf_hwcap);
52 
53 /*
54  * The following string table, must sync with HWCAP_xx bitmask,
55  * which is defined above
56  */
57 static const char *hwcap_str[] = {
58 	"mfusr_pc",
59 	"perf1",
60 	"perf2",
61 	"fpu",
62 	"audio",
63 	"16b",
64 	"string",
65 	"reduced_regs",
66 	"video",
67 	"encrypt",
68 	"edm",
69 	"lmdma",
70 	"pfm",
71 	"hsmp",
72 	"trace",
73 	"div",
74 	"mac",
75 	"l2c",
76 	"fpu_dp",
77 	"v2",
78 	"dx_regs",
79 	"hw_pre",
80 	NULL,
81 };
82 
83 #ifdef CONFIG_CPU_DCACHE_WRITETHROUGH
84 #define WRITE_METHOD "write through"
85 #else
86 #define WRITE_METHOD "write back"
87 #endif
88 
89 struct cache_info L1_cache_info[2];
dump_cpu_info(int cpu)90 static void __init dump_cpu_info(int cpu)
91 {
92 	int i, p = 0;
93 	char str[sizeof(hwcap_str) + 16];
94 
95 	for (i = 0; hwcap_str[i]; i++) {
96 		if (elf_hwcap & (1 << i)) {
97 			sprintf(str + p, "%s ", hwcap_str[i]);
98 			p += strlen(hwcap_str[i]) + 1;
99 		}
100 	}
101 
102 	pr_info("CPU%d Features: %s\n", cpu, str);
103 
104 	L1_cache_info[ICACHE].ways = CACHE_WAY(ICACHE);
105 	L1_cache_info[ICACHE].line_size = CACHE_LINE_SIZE(ICACHE);
106 	L1_cache_info[ICACHE].sets = CACHE_SET(ICACHE);
107 	L1_cache_info[ICACHE].size =
108 	    L1_cache_info[ICACHE].ways * L1_cache_info[ICACHE].line_size *
109 	    L1_cache_info[ICACHE].sets / 1024;
110 	pr_info("L1I:%dKB/%dS/%dW/%dB\n", L1_cache_info[ICACHE].size,
111 		L1_cache_info[ICACHE].sets, L1_cache_info[ICACHE].ways,
112 		L1_cache_info[ICACHE].line_size);
113 	L1_cache_info[DCACHE].ways = CACHE_WAY(DCACHE);
114 	L1_cache_info[DCACHE].line_size = CACHE_LINE_SIZE(DCACHE);
115 	L1_cache_info[DCACHE].sets = CACHE_SET(DCACHE);
116 	L1_cache_info[DCACHE].size =
117 	    L1_cache_info[DCACHE].ways * L1_cache_info[DCACHE].line_size *
118 	    L1_cache_info[DCACHE].sets / 1024;
119 	pr_info("L1D:%dKB/%dS/%dW/%dB\n", L1_cache_info[DCACHE].size,
120 		L1_cache_info[DCACHE].sets, L1_cache_info[DCACHE].ways,
121 		L1_cache_info[DCACHE].line_size);
122 	pr_info("L1 D-Cache is %s\n", WRITE_METHOD);
123 	if (L1_cache_info[DCACHE].size != L1_CACHE_BYTES)
124 		pr_crit
125 		    ("The cache line size(%d) of this processor is not the same as L1_CACHE_BYTES(%d).\n",
126 		     L1_cache_info[DCACHE].size, L1_CACHE_BYTES);
127 #ifdef CONFIG_CPU_CACHE_ALIASING
128 	{
129 		int aliasing_num;
130 		aliasing_num =
131 		    L1_cache_info[ICACHE].size * 1024 / PAGE_SIZE /
132 		    L1_cache_info[ICACHE].ways;
133 		L1_cache_info[ICACHE].aliasing_num = aliasing_num;
134 		L1_cache_info[ICACHE].aliasing_mask =
135 		    (aliasing_num - 1) << PAGE_SHIFT;
136 		aliasing_num =
137 		    L1_cache_info[DCACHE].size * 1024 / PAGE_SIZE /
138 		    L1_cache_info[DCACHE].ways;
139 		L1_cache_info[DCACHE].aliasing_num = aliasing_num;
140 		L1_cache_info[DCACHE].aliasing_mask =
141 		    (aliasing_num - 1) << PAGE_SHIFT;
142 	}
143 #endif
144 #ifdef CONFIG_FPU
145 	/* Disable fpu and enable when it is used. */
146 	if (has_fpu)
147 		disable_fpu();
148 #endif
149 }
150 
setup_cpuinfo(void)151 static void __init setup_cpuinfo(void)
152 {
153 	unsigned long tmp = 0, cpu_name;
154 
155 	cpu_dcache_inval_all();
156 	cpu_icache_inval_all();
157 	__nds32__isb();
158 
159 	cpu_id = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskCPUID) >> CPU_VER_offCPUID;
160 	cpu_name = ((cpu_id) & 0xf0) >> 4;
161 	cpu_series = cpu_name ? cpu_name - 10 + 'A' : 'N';
162 	cpu_id = cpu_id & 0xf;
163 	cpu_rev = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskREV) >> CPU_VER_offREV;
164 	cpu_cfgid = (__nds32__mfsr(NDS32_SR_CPU_VER) & CPU_VER_mskCFGID) >> CPU_VER_offCFGID;
165 
166 	pr_info("CPU:%c%ld, CPU_VER 0x%08x(id %lu, rev %lu, cfg %lu)\n",
167 		cpu_series, cpu_id, __nds32__mfsr(NDS32_SR_CPU_VER), cpu_id, cpu_rev, cpu_cfgid);
168 
169 	elf_hwcap |= HWCAP_MFUSR_PC;
170 
171 	if (((__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskBASEV) >> MSC_CFG_offBASEV) == 0) {
172 		if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskDIV)
173 			elf_hwcap |= HWCAP_DIV;
174 
175 		if ((__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskMAC)
176 		    || (cpu_id == 12 && cpu_rev < 4))
177 			elf_hwcap |= HWCAP_MAC;
178 	} else {
179 		elf_hwcap |= HWCAP_V2;
180 		elf_hwcap |= HWCAP_DIV;
181 		elf_hwcap |= HWCAP_MAC;
182 	}
183 
184 	if (cpu_cfgid & 0x0001)
185 		elf_hwcap |= HWCAP_EXT;
186 
187 	if (cpu_cfgid & 0x0002)
188 		elf_hwcap |= HWCAP_BASE16;
189 
190 	if (cpu_cfgid & 0x0004)
191 		elf_hwcap |= HWCAP_EXT2;
192 
193 	if (cpu_cfgid & 0x0008) {
194 		elf_hwcap |= HWCAP_FPU;
195 		has_fpu = true;
196 	}
197 	if (cpu_cfgid & 0x0010)
198 		elf_hwcap |= HWCAP_STRING;
199 
200 	if (__nds32__mfsr(NDS32_SR_MMU_CFG) & MMU_CFG_mskDE)
201 		endianness = "MSB";
202 	else
203 		endianness = "LSB";
204 
205 	if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskEDM)
206 		elf_hwcap |= HWCAP_EDM;
207 
208 	if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskLMDMA)
209 		elf_hwcap |= HWCAP_LMDMA;
210 
211 	if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskPFM)
212 		elf_hwcap |= HWCAP_PFM;
213 
214 	if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskHSMP)
215 		elf_hwcap |= HWCAP_HSMP;
216 
217 	if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskTRACE)
218 		elf_hwcap |= HWCAP_TRACE;
219 
220 	if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskAUDIO)
221 		elf_hwcap |= HWCAP_AUDIO;
222 
223 	if (__nds32__mfsr(NDS32_SR_MSC_CFG) & MSC_CFG_mskL2C)
224 		elf_hwcap |= HWCAP_L2C;
225 
226 #ifdef CONFIG_HW_PRE
227 	if (__nds32__mfsr(NDS32_SR_MISC_CTL) & MISC_CTL_makHWPRE_EN)
228 		elf_hwcap |= HWCAP_HWPRE;
229 #endif
230 
231 	tmp = __nds32__mfsr(NDS32_SR_CACHE_CTL);
232 	if (!IS_ENABLED(CONFIG_CPU_DCACHE_DISABLE))
233 		tmp |= CACHE_CTL_mskDC_EN;
234 
235 	if (!IS_ENABLED(CONFIG_CPU_ICACHE_DISABLE))
236 		tmp |= CACHE_CTL_mskIC_EN;
237 	__nds32__mtsr_isb(tmp, NDS32_SR_CACHE_CTL);
238 
239 	dump_cpu_info(smp_processor_id());
240 }
241 
setup_memory(void)242 static void __init setup_memory(void)
243 {
244 	unsigned long ram_start_pfn;
245 	unsigned long free_ram_start_pfn;
246 	phys_addr_t memory_start, memory_end;
247 
248 	memory_end = memory_start = 0;
249 
250 	/* Find main memory where is the kernel */
251 	memory_start = memblock_start_of_DRAM();
252 	memory_end = memblock_end_of_DRAM();
253 
254 	if (!memory_end) {
255 		panic("No memory!");
256 	}
257 
258 	ram_start_pfn = PFN_UP(memblock_start_of_DRAM());
259 	/* free_ram_start_pfn is first page after kernel */
260 	free_ram_start_pfn = PFN_UP(__pa(&_end));
261 	max_pfn = PFN_DOWN(memblock_end_of_DRAM());
262 	/* it could update max_pfn */
263 	if (max_pfn - ram_start_pfn <= MAXMEM_PFN)
264 		max_low_pfn = max_pfn;
265 	else {
266 		max_low_pfn = MAXMEM_PFN + ram_start_pfn;
267 		if (!IS_ENABLED(CONFIG_HIGHMEM))
268 			max_pfn = MAXMEM_PFN + ram_start_pfn;
269 	}
270 	/* high_memory is related with VMALLOC */
271 	high_memory = (void *)__va(max_low_pfn * PAGE_SIZE);
272 	min_low_pfn = free_ram_start_pfn;
273 
274 	/*
275 	 * initialize the boot-time allocator (with low memory only).
276 	 *
277 	 * This makes the memory from the end of the kernel to the end of
278 	 * RAM usable.
279 	 */
280 	memblock_set_bottom_up(true);
281 	memblock_reserve(PFN_PHYS(ram_start_pfn), PFN_PHYS(free_ram_start_pfn - ram_start_pfn));
282 
283 	early_init_fdt_reserve_self();
284 	early_init_fdt_scan_reserved_mem();
285 
286 	memblock_dump_all();
287 }
288 
setup_arch(char ** cmdline_p)289 void __init setup_arch(char **cmdline_p)
290 {
291 	early_init_devtree(__atags_pointer ? \
292 		phys_to_virt(__atags_pointer) : __dtb_start);
293 
294 	setup_cpuinfo();
295 
296 	setup_initial_init_mm(_stext, _etext, _edata, _end);
297 
298 	/* setup bootmem allocator */
299 	setup_memory();
300 
301 	/* paging_init() sets up the MMU and marks all pages as reserved */
302 	paging_init();
303 
304 	/* invalidate all TLB entries because the new mapping is created */
305 	__nds32__tlbop_flua();
306 
307 	/* use generic way to parse */
308 	parse_early_param();
309 
310 	unflatten_and_copy_device_tree();
311 
312 	*cmdline_p = boot_command_line;
313 	early_trap_init();
314 }
315 
c_show(struct seq_file * m,void * v)316 static int c_show(struct seq_file *m, void *v)
317 {
318 	int i;
319 
320 	seq_printf(m, "Processor\t: %c%ld (id %lu, rev %lu, cfg %lu)\n",
321 		   cpu_series, cpu_id, cpu_id, cpu_rev, cpu_cfgid);
322 
323 	seq_printf(m, "L1I\t\t: %luKB/%luS/%luW/%luB\n",
324 		   CACHE_SET(ICACHE) * CACHE_WAY(ICACHE) *
325 		   CACHE_LINE_SIZE(ICACHE) / 1024, CACHE_SET(ICACHE),
326 		   CACHE_WAY(ICACHE), CACHE_LINE_SIZE(ICACHE));
327 
328 	seq_printf(m, "L1D\t\t: %luKB/%luS/%luW/%luB\n",
329 		   CACHE_SET(DCACHE) * CACHE_WAY(DCACHE) *
330 		   CACHE_LINE_SIZE(DCACHE) / 1024, CACHE_SET(DCACHE),
331 		   CACHE_WAY(DCACHE), CACHE_LINE_SIZE(DCACHE));
332 
333 	seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
334 		   loops_per_jiffy / (500000 / HZ),
335 		   (loops_per_jiffy / (5000 / HZ)) % 100);
336 
337 	/* dump out the processor features */
338 	seq_puts(m, "Features\t: ");
339 
340 	for (i = 0; hwcap_str[i]; i++)
341 		if (elf_hwcap & (1 << i))
342 			seq_printf(m, "%s ", hwcap_str[i]);
343 
344 	seq_puts(m, "\n\n");
345 
346 	return 0;
347 }
348 
c_start(struct seq_file * m,loff_t * pos)349 static void *c_start(struct seq_file *m, loff_t * pos)
350 {
351 	return *pos < 1 ? (void *)1 : NULL;
352 }
353 
c_next(struct seq_file * m,void * v,loff_t * pos)354 static void *c_next(struct seq_file *m, void *v, loff_t * pos)
355 {
356 	++*pos;
357 	return NULL;
358 }
359 
c_stop(struct seq_file * m,void * v)360 static void c_stop(struct seq_file *m, void *v)
361 {
362 }
363 
364 struct seq_operations cpuinfo_op = {
365 	.start = c_start,
366 	.next = c_next,
367 	.stop = c_stop,
368 	.show = c_show
369 };
370