• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copied from arch/arm64/kernel/cpufeature.c
4  *
5  * Copyright (C) 2015 ARM Ltd.
6  * Copyright (C) 2017 SiFive
7  */
8 
9 #include <linux/acpi.h>
10 #include <linux/bitmap.h>
11 #include <linux/ctype.h>
12 #include <linux/log2.h>
13 #include <linux/memory.h>
14 #include <linux/module.h>
15 #include <linux/of.h>
16 #include <asm/acpi.h>
17 #include <asm/alternative.h>
18 #include <asm/cacheflush.h>
19 #include <asm/cpufeature.h>
20 #include <asm/hwcap.h>
21 #include <asm/hwprobe.h>
22 #include <asm/patch.h>
23 #include <asm/processor.h>
24 #include <asm/sbi.h>
25 #include <asm/vector.h>
26 
27 #include "copy-unaligned.h"
28 
29 #define NUM_ALPHA_EXTS ('z' - 'a' + 1)
30 
31 #define MISALIGNED_ACCESS_JIFFIES_LG2 1
32 #define MISALIGNED_BUFFER_SIZE 0x4000
33 #define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
34 
35 unsigned long elf_hwcap __read_mostly;
36 
37 /* Host ISA bitmap */
38 static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
39 
40 /* Per-cpu ISA extensions. */
41 struct riscv_isainfo hart_isa[NR_CPUS];
42 
43 /* Performance information */
44 DEFINE_PER_CPU(long, misaligned_access_speed);
45 
46 /**
47  * riscv_isa_extension_base() - Get base extension word
48  *
49  * @isa_bitmap: ISA bitmap to use
50  * Return: base extension word as unsigned long value
51  *
52  * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
53  */
riscv_isa_extension_base(const unsigned long * isa_bitmap)54 unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap)
55 {
56 	if (!isa_bitmap)
57 		return riscv_isa[0];
58 	return isa_bitmap[0];
59 }
60 EXPORT_SYMBOL_GPL(riscv_isa_extension_base);
61 
62 /**
63  * __riscv_isa_extension_available() - Check whether given extension
64  * is available or not
65  *
66  * @isa_bitmap: ISA bitmap to use
67  * @bit: bit position of the desired extension
68  * Return: true or false
69  *
70  * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
71  */
__riscv_isa_extension_available(const unsigned long * isa_bitmap,unsigned int bit)72 bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit)
73 {
74 	const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa;
75 
76 	if (bit >= RISCV_ISA_EXT_MAX)
77 		return false;
78 
79 	return test_bit(bit, bmap) ? true : false;
80 }
81 EXPORT_SYMBOL_GPL(__riscv_isa_extension_available);
82 
riscv_isa_extension_check(int id)83 static bool riscv_isa_extension_check(int id)
84 {
85 	switch (id) {
86 	case RISCV_ISA_EXT_ZICBOM:
87 		if (!riscv_cbom_block_size) {
88 			pr_err("Zicbom detected in ISA string, disabling as no cbom-block-size found\n");
89 			return false;
90 		} else if (!is_power_of_2(riscv_cbom_block_size)) {
91 			pr_err("Zicbom disabled as cbom-block-size present, but is not a power-of-2\n");
92 			return false;
93 		}
94 		return true;
95 	case RISCV_ISA_EXT_ZICBOZ:
96 		if (!riscv_cboz_block_size) {
97 			pr_err("Zicboz detected in ISA string, but no cboz-block-size found\n");
98 			return false;
99 		} else if (!is_power_of_2(riscv_cboz_block_size)) {
100 			pr_err("cboz-block-size present, but is not a power-of-2\n");
101 			return false;
102 		}
103 		return true;
104 	case RISCV_ISA_EXT_INVALID:
105 		return false;
106 	}
107 
108 	return true;
109 }
110 
111 #define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size) {	\
112 	.name = #_name,								\
113 	.property = #_name,							\
114 	.id = _id,								\
115 	.subset_ext_ids = _subset_exts,						\
116 	.subset_ext_size = _subset_exts_size					\
117 }
118 
119 #define __RISCV_ISA_EXT_DATA(_name, _id) _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0)
120 
121 /* Used to declare pure "lasso" extension (Zk for instance) */
122 #define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \
123 	_RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, ARRAY_SIZE(_bundled_exts))
124 
125 /* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */
126 #define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \
127 	_RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts))
128 
129 static const unsigned int riscv_zk_bundled_exts[] = {
130 	RISCV_ISA_EXT_ZBKB,
131 	RISCV_ISA_EXT_ZBKC,
132 	RISCV_ISA_EXT_ZBKX,
133 	RISCV_ISA_EXT_ZKND,
134 	RISCV_ISA_EXT_ZKNE,
135 	RISCV_ISA_EXT_ZKR,
136 	RISCV_ISA_EXT_ZKT,
137 };
138 
139 static const unsigned int riscv_zkn_bundled_exts[] = {
140 	RISCV_ISA_EXT_ZBKB,
141 	RISCV_ISA_EXT_ZBKC,
142 	RISCV_ISA_EXT_ZBKX,
143 	RISCV_ISA_EXT_ZKND,
144 	RISCV_ISA_EXT_ZKNE,
145 	RISCV_ISA_EXT_ZKNH,
146 };
147 
148 static const unsigned int riscv_zks_bundled_exts[] = {
149 	RISCV_ISA_EXT_ZBKB,
150 	RISCV_ISA_EXT_ZBKC,
151 	RISCV_ISA_EXT_ZKSED,
152 	RISCV_ISA_EXT_ZKSH
153 };
154 
155 #define RISCV_ISA_EXT_ZVKN	\
156 	RISCV_ISA_EXT_ZVKNED,	\
157 	RISCV_ISA_EXT_ZVKNHB,	\
158 	RISCV_ISA_EXT_ZVKB,	\
159 	RISCV_ISA_EXT_ZVKT
160 
161 static const unsigned int riscv_zvkn_bundled_exts[] = {
162 	RISCV_ISA_EXT_ZVKN
163 };
164 
165 static const unsigned int riscv_zvknc_bundled_exts[] = {
166 	RISCV_ISA_EXT_ZVKN,
167 	RISCV_ISA_EXT_ZVBC
168 };
169 
170 static const unsigned int riscv_zvkng_bundled_exts[] = {
171 	RISCV_ISA_EXT_ZVKN,
172 	RISCV_ISA_EXT_ZVKG
173 };
174 
175 #define RISCV_ISA_EXT_ZVKS	\
176 	RISCV_ISA_EXT_ZVKSED,	\
177 	RISCV_ISA_EXT_ZVKSH,	\
178 	RISCV_ISA_EXT_ZVKB,	\
179 	RISCV_ISA_EXT_ZVKT
180 
181 static const unsigned int riscv_zvks_bundled_exts[] = {
182 	RISCV_ISA_EXT_ZVKS
183 };
184 
185 static const unsigned int riscv_zvksc_bundled_exts[] = {
186 	RISCV_ISA_EXT_ZVKS,
187 	RISCV_ISA_EXT_ZVBC
188 };
189 
190 static const unsigned int riscv_zvksg_bundled_exts[] = {
191 	RISCV_ISA_EXT_ZVKS,
192 	RISCV_ISA_EXT_ZVKG
193 };
194 
195 static const unsigned int riscv_zvbb_exts[] = {
196 	RISCV_ISA_EXT_ZVKB
197 };
198 
199 /*
200  * The canonical order of ISA extension names in the ISA string is defined in
201  * chapter 27 of the unprivileged specification.
202  *
203  * Ordinarily, for in-kernel data structures, this order is unimportant but
204  * isa_ext_arr defines the order of the ISA string in /proc/cpuinfo.
205  *
206  * The specification uses vague wording, such as should, when it comes to
207  * ordering, so for our purposes the following rules apply:
208  *
209  * 1. All multi-letter extensions must be separated from other extensions by an
210  *    underscore.
211  *
212  * 2. Additional standard extensions (starting with 'Z') must be sorted after
213  *    single-letter extensions and before any higher-privileged extensions.
214  *
215  * 3. The first letter following the 'Z' conventionally indicates the most
216  *    closely related alphabetical extension category, IMAFDQLCBKJTPVH.
217  *    If multiple 'Z' extensions are named, they must be ordered first by
218  *    category, then alphabetically within a category.
219  *
220  * 3. Standard supervisor-level extensions (starting with 'S') must be listed
221  *    after standard unprivileged extensions.  If multiple supervisor-level
222  *    extensions are listed, they must be ordered alphabetically.
223  *
224  * 4. Standard machine-level extensions (starting with 'Zxm') must be listed
225  *    after any lower-privileged, standard extensions.  If multiple
226  *    machine-level extensions are listed, they must be ordered
227  *    alphabetically.
228  *
229  * 5. Non-standard extensions (starting with 'X') must be listed after all
230  *    standard extensions. If multiple non-standard extensions are listed, they
231  *    must be ordered alphabetically.
232  *
233  * An example string following the order is:
234  *    rv64imadc_zifoo_zigoo_zafoo_sbar_scar_zxmbaz_xqux_xrux
235  *
236  * New entries to this struct should follow the ordering rules described above.
237  */
238 const struct riscv_isa_ext_data riscv_isa_ext[] = {
239 	__RISCV_ISA_EXT_DATA(i, RISCV_ISA_EXT_i),
240 	__RISCV_ISA_EXT_DATA(m, RISCV_ISA_EXT_m),
241 	__RISCV_ISA_EXT_DATA(a, RISCV_ISA_EXT_a),
242 	__RISCV_ISA_EXT_DATA(f, RISCV_ISA_EXT_f),
243 	__RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d),
244 	__RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q),
245 	__RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c),
246 	__RISCV_ISA_EXT_DATA(b, RISCV_ISA_EXT_b),
247 	__RISCV_ISA_EXT_DATA(k, RISCV_ISA_EXT_k),
248 	__RISCV_ISA_EXT_DATA(j, RISCV_ISA_EXT_j),
249 	__RISCV_ISA_EXT_DATA(p, RISCV_ISA_EXT_p),
250 	__RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v),
251 	__RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h),
252 	__RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
253 	__RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ),
254 	__RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
255 	__RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND),
256 	__RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
257 	__RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI),
258 	__RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
259 	__RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM),
260 	__RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA),
261 	__RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB),
262 	__RISCV_ISA_EXT_DATA(zbc, RISCV_ISA_EXT_ZBC),
263 	__RISCV_ISA_EXT_DATA(zbkb, RISCV_ISA_EXT_ZBKB),
264 	__RISCV_ISA_EXT_DATA(zbkc, RISCV_ISA_EXT_ZBKC),
265 	__RISCV_ISA_EXT_DATA(zbkx, RISCV_ISA_EXT_ZBKX),
266 	__RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS),
267 	__RISCV_ISA_EXT_BUNDLE(zk, riscv_zk_bundled_exts),
268 	__RISCV_ISA_EXT_BUNDLE(zkn, riscv_zkn_bundled_exts),
269 	__RISCV_ISA_EXT_DATA(zknd, RISCV_ISA_EXT_ZKND),
270 	__RISCV_ISA_EXT_DATA(zkne, RISCV_ISA_EXT_ZKNE),
271 	__RISCV_ISA_EXT_DATA(zknh, RISCV_ISA_EXT_ZKNH),
272 	__RISCV_ISA_EXT_DATA(zkr, RISCV_ISA_EXT_ZKR),
273 	__RISCV_ISA_EXT_BUNDLE(zks, riscv_zks_bundled_exts),
274 	__RISCV_ISA_EXT_DATA(zkt, RISCV_ISA_EXT_ZKT),
275 	__RISCV_ISA_EXT_DATA(zksed, RISCV_ISA_EXT_ZKSED),
276 	__RISCV_ISA_EXT_DATA(zksh, RISCV_ISA_EXT_ZKSH),
277 	__RISCV_ISA_EXT_SUPERSET(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts),
278 	__RISCV_ISA_EXT_DATA(zvbc, RISCV_ISA_EXT_ZVBC),
279 	__RISCV_ISA_EXT_DATA(zvkb, RISCV_ISA_EXT_ZVKB),
280 	__RISCV_ISA_EXT_DATA(zvkg, RISCV_ISA_EXT_ZVKG),
281 	__RISCV_ISA_EXT_BUNDLE(zvkn, riscv_zvkn_bundled_exts),
282 	__RISCV_ISA_EXT_BUNDLE(zvknc, riscv_zvknc_bundled_exts),
283 	__RISCV_ISA_EXT_DATA(zvkned, RISCV_ISA_EXT_ZVKNED),
284 	__RISCV_ISA_EXT_BUNDLE(zvkng, riscv_zvkng_bundled_exts),
285 	__RISCV_ISA_EXT_DATA(zvknha, RISCV_ISA_EXT_ZVKNHA),
286 	__RISCV_ISA_EXT_DATA(zvknhb, RISCV_ISA_EXT_ZVKNHB),
287 	__RISCV_ISA_EXT_BUNDLE(zvks, riscv_zvks_bundled_exts),
288 	__RISCV_ISA_EXT_BUNDLE(zvksc, riscv_zvksc_bundled_exts),
289 	__RISCV_ISA_EXT_DATA(zvksed, RISCV_ISA_EXT_ZVKSED),
290 	__RISCV_ISA_EXT_DATA(zvksh, RISCV_ISA_EXT_ZVKSH),
291 	__RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts),
292 	__RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT),
293 	__RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
294 	__RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN),
295 	__RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA),
296 	__RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
297 	__RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
298 	__RISCV_ISA_EXT_DATA(svinval, RISCV_ISA_EXT_SVINVAL),
299 	__RISCV_ISA_EXT_DATA(svnapot, RISCV_ISA_EXT_SVNAPOT),
300 	__RISCV_ISA_EXT_DATA(svpbmt, RISCV_ISA_EXT_SVPBMT),
301 };
302 
303 const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext);
304 
match_isa_ext(const struct riscv_isa_ext_data * ext,const char * name,const char * name_end,struct riscv_isainfo * isainfo)305 static void __init match_isa_ext(const struct riscv_isa_ext_data *ext, const char *name,
306 				 const char *name_end, struct riscv_isainfo *isainfo)
307 {
308 	if ((name_end - name == strlen(ext->name)) &&
309 	     !strncasecmp(name, ext->name, name_end - name)) {
310 		/*
311 		 * If this is a bundle, enable all the ISA extensions that
312 		 * comprise the bundle.
313 		 */
314 		if (ext->subset_ext_size) {
315 			for (int i = 0; i < ext->subset_ext_size; i++) {
316 				if (riscv_isa_extension_check(ext->subset_ext_ids[i]))
317 					set_bit(ext->subset_ext_ids[i], isainfo->isa);
318 			}
319 		}
320 
321 		/*
322 		 * This is valid even for bundle extensions which uses the RISCV_ISA_EXT_INVALID id
323 		 * (rejected by riscv_isa_extension_check()).
324 		 */
325 		if (riscv_isa_extension_check(ext->id))
326 			set_bit(ext->id, isainfo->isa);
327 	}
328 }
329 
riscv_parse_isa_string(unsigned long * this_hwcap,struct riscv_isainfo * isainfo,unsigned long * isa2hwcap,const char * isa)330 static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo,
331 					  unsigned long *isa2hwcap, const char *isa)
332 {
333 	/*
334 	 * For all possible cpus, we have already validated in
335 	 * the boot process that they at least contain "rv" and
336 	 * whichever of "32"/"64" this kernel supports, and so this
337 	 * section can be skipped.
338 	 */
339 	isa += 4;
340 
341 	while (*isa) {
342 		const char *ext = isa++;
343 		const char *ext_end = isa;
344 		bool ext_long = false, ext_err = false;
345 
346 		switch (*ext) {
347 		case 's':
348 			/*
349 			 * Workaround for invalid single-letter 's' & 'u'(QEMU).
350 			 * No need to set the bit in riscv_isa as 's' & 'u' are
351 			 * not valid ISA extensions. It works until multi-letter
352 			 * extension starting with "Su" appears.
353 			 */
354 			if (ext[-1] != '_' && ext[1] == 'u') {
355 				++isa;
356 				ext_err = true;
357 				break;
358 			}
359 			fallthrough;
360 		case 'S':
361 		case 'x':
362 		case 'X':
363 		case 'z':
364 		case 'Z':
365 			/*
366 			 * Before attempting to parse the extension itself, we find its end.
367 			 * As multi-letter extensions must be split from other multi-letter
368 			 * extensions with an "_", the end of a multi-letter extension will
369 			 * either be the null character or the "_" at the start of the next
370 			 * multi-letter extension.
371 			 *
372 			 * Next, as the extensions version is currently ignored, we
373 			 * eliminate that portion. This is done by parsing backwards from
374 			 * the end of the extension, removing any numbers. This may be a
375 			 * major or minor number however, so the process is repeated if a
376 			 * minor number was found.
377 			 *
378 			 * ext_end is intended to represent the first character *after* the
379 			 * name portion of an extension, but will be decremented to the last
380 			 * character itself while eliminating the extensions version number.
381 			 * A simple re-increment solves this problem.
382 			 */
383 			ext_long = true;
384 			for (; *isa && *isa != '_'; ++isa)
385 				if (unlikely(!isalnum(*isa)))
386 					ext_err = true;
387 
388 			ext_end = isa;
389 			if (unlikely(ext_err))
390 				break;
391 
392 			if (!isdigit(ext_end[-1]))
393 				break;
394 
395 			while (isdigit(*--ext_end))
396 				;
397 
398 			if (tolower(ext_end[0]) != 'p' || !isdigit(ext_end[-1])) {
399 				++ext_end;
400 				break;
401 			}
402 
403 			while (isdigit(*--ext_end))
404 				;
405 
406 			++ext_end;
407 			break;
408 		default:
409 			/*
410 			 * Things are a little easier for single-letter extensions, as they
411 			 * are parsed forwards.
412 			 *
413 			 * After checking that our starting position is valid, we need to
414 			 * ensure that, when isa was incremented at the start of the loop,
415 			 * that it arrived at the start of the next extension.
416 			 *
417 			 * If we are already on a non-digit, there is nothing to do. Either
418 			 * we have a multi-letter extension's _, or the start of an
419 			 * extension.
420 			 *
421 			 * Otherwise we have found the current extension's major version
422 			 * number. Parse past it, and a subsequent p/minor version number
423 			 * if present. The `p` extension must not appear immediately after
424 			 * a number, so there is no fear of missing it.
425 			 *
426 			 */
427 			if (unlikely(!isalpha(*ext))) {
428 				ext_err = true;
429 				break;
430 			}
431 
432 			if (!isdigit(*isa))
433 				break;
434 
435 			while (isdigit(*++isa))
436 				;
437 
438 			if (tolower(*isa) != 'p')
439 				break;
440 
441 			if (!isdigit(*++isa)) {
442 				--isa;
443 				break;
444 			}
445 
446 			while (isdigit(*++isa))
447 				;
448 
449 			break;
450 		}
451 
452 		/*
453 		 * The parser expects that at the start of an iteration isa points to the
454 		 * first character of the next extension. As we stop parsing an extension
455 		 * on meeting a non-alphanumeric character, an extra increment is needed
456 		 * where the succeeding extension is a multi-letter prefixed with an "_".
457 		 */
458 		if (*isa == '_')
459 			++isa;
460 
461 		if (unlikely(ext_err))
462 			continue;
463 		if (!ext_long) {
464 			int nr = tolower(*ext) - 'a';
465 
466 			if (riscv_isa_extension_check(nr)) {
467 				*this_hwcap |= isa2hwcap[nr];
468 				set_bit(nr, isainfo->isa);
469 			}
470 		} else {
471 			for (int i = 0; i < riscv_isa_ext_count; i++)
472 				match_isa_ext(&riscv_isa_ext[i], ext, ext_end, isainfo);
473 		}
474 	}
475 }
476 
riscv_fill_hwcap_from_isa_string(unsigned long * isa2hwcap)477 static void __init riscv_fill_hwcap_from_isa_string(unsigned long *isa2hwcap)
478 {
479 	struct device_node *node;
480 	const char *isa;
481 	int rc;
482 	struct acpi_table_header *rhct;
483 	acpi_status status;
484 	unsigned int cpu;
485 
486 	if (!acpi_disabled) {
487 		status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
488 		if (ACPI_FAILURE(status))
489 			return;
490 	}
491 
492 	for_each_possible_cpu(cpu) {
493 		struct riscv_isainfo *isainfo = &hart_isa[cpu];
494 		unsigned long this_hwcap = 0;
495 
496 		if (acpi_disabled) {
497 			node = of_cpu_device_node_get(cpu);
498 			if (!node) {
499 				pr_warn("Unable to find cpu node\n");
500 				continue;
501 			}
502 
503 			rc = of_property_read_string(node, "riscv,isa", &isa);
504 			of_node_put(node);
505 			if (rc) {
506 				pr_warn("Unable to find \"riscv,isa\" devicetree entry\n");
507 				continue;
508 			}
509 		} else {
510 			rc = acpi_get_riscv_isa(rhct, cpu, &isa);
511 			if (rc < 0) {
512 				pr_warn("Unable to get ISA for the hart - %d\n", cpu);
513 				continue;
514 			}
515 		}
516 
517 		riscv_parse_isa_string(&this_hwcap, isainfo, isa2hwcap, isa);
518 
519 		/*
520 		 * These ones were as they were part of the base ISA when the
521 		 * port & dt-bindings were upstreamed, and so can be set
522 		 * unconditionally where `i` is in riscv,isa on DT systems.
523 		 */
524 		if (acpi_disabled) {
525 			set_bit(RISCV_ISA_EXT_ZICSR, isainfo->isa);
526 			set_bit(RISCV_ISA_EXT_ZIFENCEI, isainfo->isa);
527 			set_bit(RISCV_ISA_EXT_ZICNTR, isainfo->isa);
528 			set_bit(RISCV_ISA_EXT_ZIHPM, isainfo->isa);
529 		}
530 
531 		/*
532 		 * "V" in ISA strings is ambiguous in practice: it should mean
533 		 * just the standard V-1.0 but vendors aren't well behaved.
534 		 * Many vendors with T-Head CPU cores which implement the 0.7.1
535 		 * version of the vector specification put "v" into their DTs.
536 		 * CPU cores with the ratified spec will contain non-zero
537 		 * marchid.
538 		 */
539 		if (acpi_disabled && riscv_cached_mvendorid(cpu) == THEAD_VENDOR_ID &&
540 		    riscv_cached_marchid(cpu) == 0x0) {
541 			this_hwcap &= ~isa2hwcap[RISCV_ISA_EXT_v];
542 			clear_bit(RISCV_ISA_EXT_v, isainfo->isa);
543 		}
544 
545 		/*
546 		 * All "okay" hart should have same isa. Set HWCAP based on
547 		 * common capabilities of every "okay" hart, in case they don't
548 		 * have.
549 		 */
550 		if (elf_hwcap)
551 			elf_hwcap &= this_hwcap;
552 		else
553 			elf_hwcap = this_hwcap;
554 
555 		if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
556 			bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
557 		else
558 			bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
559 	}
560 
561 	if (!acpi_disabled && rhct)
562 		acpi_put_table((struct acpi_table_header *)rhct);
563 }
564 
riscv_fill_hwcap_from_ext_list(unsigned long * isa2hwcap)565 static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap)
566 {
567 	unsigned int cpu;
568 
569 	for_each_possible_cpu(cpu) {
570 		unsigned long this_hwcap = 0;
571 		struct device_node *cpu_node;
572 		struct riscv_isainfo *isainfo = &hart_isa[cpu];
573 
574 		cpu_node = of_cpu_device_node_get(cpu);
575 		if (!cpu_node) {
576 			pr_warn("Unable to find cpu node\n");
577 			continue;
578 		}
579 
580 		if (!of_property_present(cpu_node, "riscv,isa-extensions")) {
581 			of_node_put(cpu_node);
582 			continue;
583 		}
584 
585 		for (int i = 0; i < riscv_isa_ext_count; i++) {
586 			const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i];
587 
588 			if (of_property_match_string(cpu_node, "riscv,isa-extensions",
589 						     ext->property) < 0)
590 				continue;
591 
592 			if (ext->subset_ext_size) {
593 				for (int j = 0; j < ext->subset_ext_size; j++) {
594 					if (riscv_isa_extension_check(ext->subset_ext_ids[i]))
595 						set_bit(ext->subset_ext_ids[j], isainfo->isa);
596 				}
597 			}
598 
599 			if (riscv_isa_extension_check(ext->id)) {
600 				set_bit(ext->id, isainfo->isa);
601 
602 				/* Only single letter extensions get set in hwcap */
603 				if (strnlen(riscv_isa_ext[i].name, 2) == 1)
604 					this_hwcap |= isa2hwcap[riscv_isa_ext[i].id];
605 			}
606 		}
607 
608 		of_node_put(cpu_node);
609 
610 		/*
611 		 * All "okay" harts should have same isa. Set HWCAP based on
612 		 * common capabilities of every "okay" hart, in case they don't.
613 		 */
614 		if (elf_hwcap)
615 			elf_hwcap &= this_hwcap;
616 		else
617 			elf_hwcap = this_hwcap;
618 
619 		if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
620 			bitmap_copy(riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
621 		else
622 			bitmap_and(riscv_isa, riscv_isa, isainfo->isa, RISCV_ISA_EXT_MAX);
623 	}
624 
625 	if (bitmap_empty(riscv_isa, RISCV_ISA_EXT_MAX))
626 		return -ENOENT;
627 
628 	return 0;
629 }
630 
631 #ifdef CONFIG_RISCV_ISA_FALLBACK
632 bool __initdata riscv_isa_fallback = true;
633 #else
634 bool __initdata riscv_isa_fallback;
riscv_isa_fallback_setup(char * __unused)635 static int __init riscv_isa_fallback_setup(char *__unused)
636 {
637 	riscv_isa_fallback = true;
638 	return 1;
639 }
640 early_param("riscv_isa_fallback", riscv_isa_fallback_setup);
641 #endif
642 
riscv_fill_hwcap(void)643 void __init riscv_fill_hwcap(void)
644 {
645 	char print_str[NUM_ALPHA_EXTS + 1];
646 	unsigned long isa2hwcap[26] = {0};
647 	int i, j;
648 
649 	isa2hwcap['i' - 'a'] = COMPAT_HWCAP_ISA_I;
650 	isa2hwcap['m' - 'a'] = COMPAT_HWCAP_ISA_M;
651 	isa2hwcap['a' - 'a'] = COMPAT_HWCAP_ISA_A;
652 	isa2hwcap['f' - 'a'] = COMPAT_HWCAP_ISA_F;
653 	isa2hwcap['d' - 'a'] = COMPAT_HWCAP_ISA_D;
654 	isa2hwcap['c' - 'a'] = COMPAT_HWCAP_ISA_C;
655 	isa2hwcap['v' - 'a'] = COMPAT_HWCAP_ISA_V;
656 
657 	if (!acpi_disabled) {
658 		riscv_fill_hwcap_from_isa_string(isa2hwcap);
659 	} else {
660 		int ret = riscv_fill_hwcap_from_ext_list(isa2hwcap);
661 
662 		if (ret && riscv_isa_fallback) {
663 			pr_info("Falling back to deprecated \"riscv,isa\"\n");
664 			riscv_fill_hwcap_from_isa_string(isa2hwcap);
665 		}
666 	}
667 
668 	/*
669 	 * We don't support systems with F but without D, so mask those out
670 	 * here.
671 	 */
672 	if ((elf_hwcap & COMPAT_HWCAP_ISA_F) && !(elf_hwcap & COMPAT_HWCAP_ISA_D)) {
673 		pr_info("This kernel does not support systems with F but not D\n");
674 		elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
675 	}
676 
677 	if (elf_hwcap & COMPAT_HWCAP_ISA_V) {
678 		riscv_v_setup_vsize();
679 		/*
680 		 * ISA string in device tree might have 'v' flag, but
681 		 * CONFIG_RISCV_ISA_V is disabled in kernel.
682 		 * Clear V flag in elf_hwcap if CONFIG_RISCV_ISA_V is disabled.
683 		 */
684 		if (!IS_ENABLED(CONFIG_RISCV_ISA_V))
685 			elf_hwcap &= ~COMPAT_HWCAP_ISA_V;
686 	}
687 
688 	memset(print_str, 0, sizeof(print_str));
689 	for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
690 		if (riscv_isa[0] & BIT_MASK(i))
691 			print_str[j++] = (char)('a' + i);
692 	pr_info("riscv: base ISA extensions %s\n", print_str);
693 
694 	memset(print_str, 0, sizeof(print_str));
695 	for (i = 0, j = 0; i < NUM_ALPHA_EXTS; i++)
696 		if (elf_hwcap & BIT_MASK(i))
697 			print_str[j++] = (char)('a' + i);
698 	pr_info("riscv: ELF capabilities %s\n", print_str);
699 }
700 
riscv_get_elf_hwcap(void)701 unsigned long riscv_get_elf_hwcap(void)
702 {
703 	unsigned long hwcap;
704 
705 	hwcap = (elf_hwcap & ((1UL << RISCV_ISA_EXT_BASE) - 1));
706 
707 	if (!riscv_v_vstate_ctrl_user_allowed())
708 		hwcap &= ~COMPAT_HWCAP_ISA_V;
709 
710 	return hwcap;
711 }
712 
check_unaligned_access(int cpu)713 void check_unaligned_access(int cpu)
714 {
715 	u64 start_cycles, end_cycles;
716 	u64 word_cycles;
717 	u64 byte_cycles;
718 	int ratio;
719 	unsigned long start_jiffies, now;
720 	struct page *page;
721 	void *dst;
722 	void *src;
723 	long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
724 
725 	/* We are already set since the last check */
726 	if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
727 		return;
728 
729 	page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
730 	if (!page) {
731 		pr_warn("Can't alloc pages to measure memcpy performance");
732 		return;
733 	}
734 
735 	/* Make an unaligned destination buffer. */
736 	dst = (void *)((unsigned long)page_address(page) | 0x1);
737 	/* Unalign src as well, but differently (off by 1 + 2 = 3). */
738 	src = dst + (MISALIGNED_BUFFER_SIZE / 2);
739 	src += 2;
740 	word_cycles = -1ULL;
741 	/* Do a warmup. */
742 	__riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
743 	preempt_disable();
744 	start_jiffies = jiffies;
745 	while ((now = jiffies) == start_jiffies)
746 		cpu_relax();
747 
748 	/*
749 	 * For a fixed amount of time, repeatedly try the function, and take
750 	 * the best time in cycles as the measurement.
751 	 */
752 	while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
753 		start_cycles = get_cycles64();
754 		/* Ensure the CSR read can't reorder WRT to the copy. */
755 		mb();
756 		__riscv_copy_words_unaligned(dst, src, MISALIGNED_COPY_SIZE);
757 		/* Ensure the copy ends before the end time is snapped. */
758 		mb();
759 		end_cycles = get_cycles64();
760 		if ((end_cycles - start_cycles) < word_cycles)
761 			word_cycles = end_cycles - start_cycles;
762 	}
763 
764 	byte_cycles = -1ULL;
765 	__riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
766 	start_jiffies = jiffies;
767 	while ((now = jiffies) == start_jiffies)
768 		cpu_relax();
769 
770 	while (time_before(jiffies, now + (1 << MISALIGNED_ACCESS_JIFFIES_LG2))) {
771 		start_cycles = get_cycles64();
772 		mb();
773 		__riscv_copy_bytes_unaligned(dst, src, MISALIGNED_COPY_SIZE);
774 		mb();
775 		end_cycles = get_cycles64();
776 		if ((end_cycles - start_cycles) < byte_cycles)
777 			byte_cycles = end_cycles - start_cycles;
778 	}
779 
780 	preempt_enable();
781 
782 	/* Don't divide by zero. */
783 	if (!word_cycles || !byte_cycles) {
784 		pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
785 			cpu);
786 
787 		goto out;
788 	}
789 
790 	if (word_cycles < byte_cycles)
791 		speed = RISCV_HWPROBE_MISALIGNED_FAST;
792 
793 	ratio = div_u64((byte_cycles * 100), word_cycles);
794 	pr_info("cpu%d: Ratio of byte access time to unaligned word access is %d.%02d, unaligned accesses are %s\n",
795 		cpu,
796 		ratio / 100,
797 		ratio % 100,
798 		(speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
799 
800 	per_cpu(misaligned_access_speed, cpu) = speed;
801 
802 out:
803 	__free_pages(page, get_order(MISALIGNED_BUFFER_SIZE));
804 }
805 
check_unaligned_access_boot_cpu(void)806 static int check_unaligned_access_boot_cpu(void)
807 {
808 	check_unaligned_access(0);
809 	return 0;
810 }
811 
812 arch_initcall(check_unaligned_access_boot_cpu);
813 
814 #ifdef CONFIG_RISCV_ALTERNATIVE
815 /*
816  * Alternative patch sites consider 48 bits when determining when to patch
817  * the old instruction sequence with the new. These bits are broken into a
818  * 16-bit vendor ID and a 32-bit patch ID. A non-zero vendor ID means the
819  * patch site is for an erratum, identified by the 32-bit patch ID. When
820  * the vendor ID is zero, the patch site is for a cpufeature. cpufeatures
821  * further break down patch ID into two 16-bit numbers. The lower 16 bits
822  * are the cpufeature ID and the upper 16 bits are used for a value specific
823  * to the cpufeature and patch site. If the upper 16 bits are zero, then it
824  * implies no specific value is specified. cpufeatures that want to control
825  * patching on a per-site basis will provide non-zero values and implement
826  * checks here. The checks return true when patching should be done, and
827  * false otherwise.
828  */
riscv_cpufeature_patch_check(u16 id,u16 value)829 static bool riscv_cpufeature_patch_check(u16 id, u16 value)
830 {
831 	if (!value)
832 		return true;
833 
834 	switch (id) {
835 	case RISCV_ISA_EXT_ZICBOZ:
836 		/*
837 		 * Zicboz alternative applications provide the maximum
838 		 * supported block size order, or zero when it doesn't
839 		 * matter. If the current block size exceeds the maximum,
840 		 * then the alternative cannot be applied.
841 		 */
842 		return riscv_cboz_block_size <= (1U << value);
843 	}
844 
845 	return false;
846 }
847 
riscv_cpufeature_patch_func(struct alt_entry * begin,struct alt_entry * end,unsigned int stage)848 void __init_or_module riscv_cpufeature_patch_func(struct alt_entry *begin,
849 						  struct alt_entry *end,
850 						  unsigned int stage)
851 {
852 	struct alt_entry *alt;
853 	void *oldptr, *altptr;
854 	u16 id, value;
855 
856 	if (stage == RISCV_ALTERNATIVES_EARLY_BOOT)
857 		return;
858 
859 	for (alt = begin; alt < end; alt++) {
860 		if (alt->vendor_id != 0)
861 			continue;
862 
863 		id = PATCH_ID_CPUFEATURE_ID(alt->patch_id);
864 
865 		if (id >= RISCV_ISA_EXT_MAX) {
866 			WARN(1, "This extension id:%d is not in ISA extension list", id);
867 			continue;
868 		}
869 
870 		if (!__riscv_isa_extension_available(NULL, id))
871 			continue;
872 
873 		value = PATCH_ID_CPUFEATURE_VALUE(alt->patch_id);
874 		if (!riscv_cpufeature_patch_check(id, value))
875 			continue;
876 
877 		oldptr = ALT_OLD_PTR(alt);
878 		altptr = ALT_ALT_PTR(alt);
879 
880 		mutex_lock(&text_mutex);
881 		patch_text_nosync(oldptr, altptr, alt->alt_len);
882 		riscv_alternative_fix_offsets(oldptr, alt->alt_len, oldptr - altptr);
883 		mutex_unlock(&text_mutex);
884 	}
885 }
886 #endif
887