1 // SPDX-License-Identifier: GPL-2.0+ 2 // 3 // Security related flags and so on. 4 // 5 // Copyright 2018, Michael Ellerman, IBM Corporation. 6 7 #include <linux/cpu.h> 8 #include <linux/kernel.h> 9 #include <linux/device.h> 10 #include <linux/nospec.h> 11 #include <linux/prctl.h> 12 #include <linux/seq_buf.h> 13 14 #include <asm/asm-prototypes.h> 15 #include <asm/code-patching.h> 16 #include <asm/debugfs.h> 17 #include <asm/security_features.h> 18 #include <asm/setup.h> 19 #include <asm/inst.h> 20 21 22 u64 powerpc_security_features __read_mostly = SEC_FTR_DEFAULT; 23 24 enum branch_cache_flush_type { 25 BRANCH_CACHE_FLUSH_NONE = 0x1, 26 BRANCH_CACHE_FLUSH_SW = 0x2, 27 BRANCH_CACHE_FLUSH_HW = 0x4, 28 }; 29 static enum branch_cache_flush_type count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE; 30 static enum branch_cache_flush_type link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE; 31 32 bool barrier_nospec_enabled; 33 static bool no_nospec; 34 static bool btb_flush_enabled; 35 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) 36 static bool no_spectrev2; 37 #endif 38 enable_barrier_nospec(bool enable)39 static void enable_barrier_nospec(bool enable) 40 { 41 barrier_nospec_enabled = enable; 42 do_barrier_nospec_fixups(enable); 43 } 44 setup_barrier_nospec(void)45 void setup_barrier_nospec(void) 46 { 47 bool enable; 48 49 /* 50 * It would make sense to check SEC_FTR_SPEC_BAR_ORI31 below as well. 51 * But there's a good reason not to. The two flags we check below are 52 * both are enabled by default in the kernel, so if the hcall is not 53 * functional they will be enabled. 54 * On a system where the host firmware has been updated (so the ori 55 * functions as a barrier), but on which the hypervisor (KVM/Qemu) has 56 * not been updated, we would like to enable the barrier. Dropping the 57 * check for SEC_FTR_SPEC_BAR_ORI31 achieves that. The only downside is 58 * we potentially enable the barrier on systems where the host firmware 59 * is not updated, but that's harmless as it's a no-op. 60 */ 61 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 62 security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR); 63 64 if (!no_nospec && !cpu_mitigations_off()) 65 enable_barrier_nospec(enable); 66 } 67 handle_nospectre_v1(char * p)68 static int __init handle_nospectre_v1(char *p) 69 { 70 no_nospec = true; 71 72 return 0; 73 } 74 early_param("nospectre_v1", handle_nospectre_v1); 75 76 #ifdef CONFIG_DEBUG_FS barrier_nospec_set(void * data,u64 val)77 static int barrier_nospec_set(void *data, u64 val) 78 { 79 switch (val) { 80 case 0: 81 case 1: 82 break; 83 default: 84 return -EINVAL; 85 } 86 87 if (!!val == !!barrier_nospec_enabled) 88 return 0; 89 90 enable_barrier_nospec(!!val); 91 92 return 0; 93 } 94 barrier_nospec_get(void * data,u64 * val)95 static int barrier_nospec_get(void *data, u64 *val) 96 { 97 *val = barrier_nospec_enabled ? 1 : 0; 98 return 0; 99 } 100 101 DEFINE_DEBUGFS_ATTRIBUTE(fops_barrier_nospec, barrier_nospec_get, 102 barrier_nospec_set, "%llu\n"); 103 barrier_nospec_debugfs_init(void)104 static __init int barrier_nospec_debugfs_init(void) 105 { 106 debugfs_create_file_unsafe("barrier_nospec", 0600, 107 powerpc_debugfs_root, NULL, 108 &fops_barrier_nospec); 109 return 0; 110 } 111 device_initcall(barrier_nospec_debugfs_init); 112 security_feature_debugfs_init(void)113 static __init int security_feature_debugfs_init(void) 114 { 115 debugfs_create_x64("security_features", 0400, powerpc_debugfs_root, 116 &powerpc_security_features); 117 return 0; 118 } 119 device_initcall(security_feature_debugfs_init); 120 #endif /* CONFIG_DEBUG_FS */ 121 122 #if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3S_64) handle_nospectre_v2(char * p)123 static int __init handle_nospectre_v2(char *p) 124 { 125 no_spectrev2 = true; 126 127 return 0; 128 } 129 early_param("nospectre_v2", handle_nospectre_v2); 130 #endif /* CONFIG_PPC_FSL_BOOK3E || CONFIG_PPC_BOOK3S_64 */ 131 132 #ifdef CONFIG_PPC_FSL_BOOK3E setup_spectre_v2(void)133 void setup_spectre_v2(void) 134 { 135 if (no_spectrev2 || cpu_mitigations_off()) 136 do_btb_flush_fixups(); 137 else 138 btb_flush_enabled = true; 139 } 140 #endif /* CONFIG_PPC_FSL_BOOK3E */ 141 142 #ifdef CONFIG_PPC_BOOK3S_64 cpu_show_meltdown(struct device * dev,struct device_attribute * attr,char * buf)143 ssize_t cpu_show_meltdown(struct device *dev, struct device_attribute *attr, char *buf) 144 { 145 bool thread_priv; 146 147 thread_priv = security_ftr_enabled(SEC_FTR_L1D_THREAD_PRIV); 148 149 if (rfi_flush) { 150 struct seq_buf s; 151 seq_buf_init(&s, buf, PAGE_SIZE - 1); 152 153 seq_buf_printf(&s, "Mitigation: RFI Flush"); 154 if (thread_priv) 155 seq_buf_printf(&s, ", L1D private per thread"); 156 157 seq_buf_printf(&s, "\n"); 158 159 return s.len; 160 } 161 162 if (thread_priv) 163 return sprintf(buf, "Vulnerable: L1D private per thread\n"); 164 165 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 166 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 167 return sprintf(buf, "Not affected\n"); 168 169 return sprintf(buf, "Vulnerable\n"); 170 } 171 cpu_show_l1tf(struct device * dev,struct device_attribute * attr,char * buf)172 ssize_t cpu_show_l1tf(struct device *dev, struct device_attribute *attr, char *buf) 173 { 174 return cpu_show_meltdown(dev, attr, buf); 175 } 176 #endif 177 cpu_show_spectre_v1(struct device * dev,struct device_attribute * attr,char * buf)178 ssize_t cpu_show_spectre_v1(struct device *dev, struct device_attribute *attr, char *buf) 179 { 180 struct seq_buf s; 181 182 seq_buf_init(&s, buf, PAGE_SIZE - 1); 183 184 if (security_ftr_enabled(SEC_FTR_BNDS_CHK_SPEC_BAR)) { 185 if (barrier_nospec_enabled) 186 seq_buf_printf(&s, "Mitigation: __user pointer sanitization"); 187 else 188 seq_buf_printf(&s, "Vulnerable"); 189 190 if (security_ftr_enabled(SEC_FTR_SPEC_BAR_ORI31)) 191 seq_buf_printf(&s, ", ori31 speculation barrier enabled"); 192 193 seq_buf_printf(&s, "\n"); 194 } else 195 seq_buf_printf(&s, "Not affected\n"); 196 197 return s.len; 198 } 199 cpu_show_spectre_v2(struct device * dev,struct device_attribute * attr,char * buf)200 ssize_t cpu_show_spectre_v2(struct device *dev, struct device_attribute *attr, char *buf) 201 { 202 struct seq_buf s; 203 bool bcs, ccd; 204 205 seq_buf_init(&s, buf, PAGE_SIZE - 1); 206 207 bcs = security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED); 208 ccd = security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED); 209 210 if (bcs || ccd) { 211 seq_buf_printf(&s, "Mitigation: "); 212 213 if (bcs) 214 seq_buf_printf(&s, "Indirect branch serialisation (kernel only)"); 215 216 if (bcs && ccd) 217 seq_buf_printf(&s, ", "); 218 219 if (ccd) 220 seq_buf_printf(&s, "Indirect branch cache disabled"); 221 222 } else if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) { 223 seq_buf_printf(&s, "Mitigation: Software count cache flush"); 224 225 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) 226 seq_buf_printf(&s, " (hardware accelerated)"); 227 228 } else if (btb_flush_enabled) { 229 seq_buf_printf(&s, "Mitigation: Branch predictor state flush"); 230 } else { 231 seq_buf_printf(&s, "Vulnerable"); 232 } 233 234 if (bcs || ccd || count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) { 235 if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE) 236 seq_buf_printf(&s, ", Software link stack flush"); 237 if (link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) 238 seq_buf_printf(&s, " (hardware accelerated)"); 239 } 240 241 seq_buf_printf(&s, "\n"); 242 243 return s.len; 244 } 245 246 #ifdef CONFIG_PPC_BOOK3S_64 247 /* 248 * Store-forwarding barrier support. 249 */ 250 251 static enum stf_barrier_type stf_enabled_flush_types; 252 static bool no_stf_barrier; 253 bool stf_barrier; 254 handle_no_stf_barrier(char * p)255 static int __init handle_no_stf_barrier(char *p) 256 { 257 pr_info("stf-barrier: disabled on command line."); 258 no_stf_barrier = true; 259 return 0; 260 } 261 262 early_param("no_stf_barrier", handle_no_stf_barrier); 263 stf_barrier_type_get(void)264 enum stf_barrier_type stf_barrier_type_get(void) 265 { 266 return stf_enabled_flush_types; 267 } 268 269 /* This is the generic flag used by other architectures */ handle_ssbd(char * p)270 static int __init handle_ssbd(char *p) 271 { 272 if (!p || strncmp(p, "auto", 5) == 0 || strncmp(p, "on", 2) == 0 ) { 273 /* Until firmware tells us, we have the barrier with auto */ 274 return 0; 275 } else if (strncmp(p, "off", 3) == 0) { 276 handle_no_stf_barrier(NULL); 277 return 0; 278 } else 279 return 1; 280 281 return 0; 282 } 283 early_param("spec_store_bypass_disable", handle_ssbd); 284 285 /* This is the generic flag used by other architectures */ handle_no_ssbd(char * p)286 static int __init handle_no_ssbd(char *p) 287 { 288 handle_no_stf_barrier(NULL); 289 return 0; 290 } 291 early_param("nospec_store_bypass_disable", handle_no_ssbd); 292 stf_barrier_enable(bool enable)293 static void stf_barrier_enable(bool enable) 294 { 295 if (enable) 296 do_stf_barrier_fixups(stf_enabled_flush_types); 297 else 298 do_stf_barrier_fixups(STF_BARRIER_NONE); 299 300 stf_barrier = enable; 301 } 302 setup_stf_barrier(void)303 void setup_stf_barrier(void) 304 { 305 enum stf_barrier_type type; 306 bool enable, hv; 307 308 hv = cpu_has_feature(CPU_FTR_HVMODE); 309 310 /* Default to fallback in case fw-features are not available */ 311 if (cpu_has_feature(CPU_FTR_ARCH_300)) 312 type = STF_BARRIER_EIEIO; 313 else if (cpu_has_feature(CPU_FTR_ARCH_207S)) 314 type = STF_BARRIER_SYNC_ORI; 315 else if (cpu_has_feature(CPU_FTR_ARCH_206)) 316 type = STF_BARRIER_FALLBACK; 317 else 318 type = STF_BARRIER_NONE; 319 320 enable = security_ftr_enabled(SEC_FTR_FAVOUR_SECURITY) && 321 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR) || 322 (security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && hv)); 323 324 if (type == STF_BARRIER_FALLBACK) { 325 pr_info("stf-barrier: fallback barrier available\n"); 326 } else if (type == STF_BARRIER_SYNC_ORI) { 327 pr_info("stf-barrier: hwsync barrier available\n"); 328 } else if (type == STF_BARRIER_EIEIO) { 329 pr_info("stf-barrier: eieio barrier available\n"); 330 } 331 332 stf_enabled_flush_types = type; 333 334 if (!no_stf_barrier && !cpu_mitigations_off()) 335 stf_barrier_enable(enable); 336 } 337 cpu_show_spec_store_bypass(struct device * dev,struct device_attribute * attr,char * buf)338 ssize_t cpu_show_spec_store_bypass(struct device *dev, struct device_attribute *attr, char *buf) 339 { 340 if (stf_barrier && stf_enabled_flush_types != STF_BARRIER_NONE) { 341 const char *type; 342 switch (stf_enabled_flush_types) { 343 case STF_BARRIER_EIEIO: 344 type = "eieio"; 345 break; 346 case STF_BARRIER_SYNC_ORI: 347 type = "hwsync"; 348 break; 349 case STF_BARRIER_FALLBACK: 350 type = "fallback"; 351 break; 352 default: 353 type = "unknown"; 354 } 355 return sprintf(buf, "Mitigation: Kernel entry/exit barrier (%s)\n", type); 356 } 357 358 if (!security_ftr_enabled(SEC_FTR_L1D_FLUSH_HV) && 359 !security_ftr_enabled(SEC_FTR_L1D_FLUSH_PR)) 360 return sprintf(buf, "Not affected\n"); 361 362 return sprintf(buf, "Vulnerable\n"); 363 } 364 ssb_prctl_get(struct task_struct * task)365 static int ssb_prctl_get(struct task_struct *task) 366 { 367 if (stf_enabled_flush_types == STF_BARRIER_NONE) 368 /* 369 * We don't have an explicit signal from firmware that we're 370 * vulnerable or not, we only have certain CPU revisions that 371 * are known to be vulnerable. 372 * 373 * We assume that if we're on another CPU, where the barrier is 374 * NONE, then we are not vulnerable. 375 */ 376 return PR_SPEC_NOT_AFFECTED; 377 else 378 /* 379 * If we do have a barrier type then we are vulnerable. The 380 * barrier is not a global or per-process mitigation, so the 381 * only value we can report here is PR_SPEC_ENABLE, which 382 * appears as "vulnerable" in /proc. 383 */ 384 return PR_SPEC_ENABLE; 385 386 return -EINVAL; 387 } 388 arch_prctl_spec_ctrl_get(struct task_struct * task,unsigned long which)389 int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) 390 { 391 switch (which) { 392 case PR_SPEC_STORE_BYPASS: 393 return ssb_prctl_get(task); 394 default: 395 return -ENODEV; 396 } 397 } 398 399 #ifdef CONFIG_DEBUG_FS stf_barrier_set(void * data,u64 val)400 static int stf_barrier_set(void *data, u64 val) 401 { 402 bool enable; 403 404 if (val == 1) 405 enable = true; 406 else if (val == 0) 407 enable = false; 408 else 409 return -EINVAL; 410 411 /* Only do anything if we're changing state */ 412 if (enable != stf_barrier) 413 stf_barrier_enable(enable); 414 415 return 0; 416 } 417 stf_barrier_get(void * data,u64 * val)418 static int stf_barrier_get(void *data, u64 *val) 419 { 420 *val = stf_barrier ? 1 : 0; 421 return 0; 422 } 423 424 DEFINE_DEBUGFS_ATTRIBUTE(fops_stf_barrier, stf_barrier_get, stf_barrier_set, 425 "%llu\n"); 426 stf_barrier_debugfs_init(void)427 static __init int stf_barrier_debugfs_init(void) 428 { 429 debugfs_create_file_unsafe("stf_barrier", 0600, powerpc_debugfs_root, 430 NULL, &fops_stf_barrier); 431 return 0; 432 } 433 device_initcall(stf_barrier_debugfs_init); 434 #endif /* CONFIG_DEBUG_FS */ 435 update_branch_cache_flush(void)436 static void update_branch_cache_flush(void) 437 { 438 u32 *site; 439 440 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE 441 site = &patch__call_kvm_flush_link_stack; 442 // This controls the branch from guest_exit_cont to kvm_flush_link_stack 443 if (link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { 444 patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); 445 } else { 446 // Could use HW flush, but that could also flush count cache 447 patch_branch_site(site, (u64)&kvm_flush_link_stack, BRANCH_SET_LINK); 448 } 449 #endif 450 451 // Patch out the bcctr first, then nop the rest 452 site = &patch__call_flush_branch_caches3; 453 patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); 454 site = &patch__call_flush_branch_caches2; 455 patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); 456 site = &patch__call_flush_branch_caches1; 457 patch_instruction_site(site, ppc_inst(PPC_INST_NOP)); 458 459 // This controls the branch from _switch to flush_branch_caches 460 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE && 461 link_stack_flush_type == BRANCH_CACHE_FLUSH_NONE) { 462 // Nothing to be done 463 464 } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW && 465 link_stack_flush_type == BRANCH_CACHE_FLUSH_HW) { 466 // Patch in the bcctr last 467 site = &patch__call_flush_branch_caches1; 468 patch_instruction_site(site, ppc_inst(0x39207fff)); // li r9,0x7fff 469 site = &patch__call_flush_branch_caches2; 470 patch_instruction_site(site, ppc_inst(0x7d2903a6)); // mtctr r9 471 site = &patch__call_flush_branch_caches3; 472 patch_instruction_site(site, ppc_inst(PPC_INST_BCCTR_FLUSH)); 473 474 } else { 475 patch_branch_site(site, (u64)&flush_branch_caches, BRANCH_SET_LINK); 476 477 // If we just need to flush the link stack, early return 478 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) { 479 patch_instruction_site(&patch__flush_link_stack_return, 480 ppc_inst(PPC_INST_BLR)); 481 482 // If we have flush instruction, early return 483 } else if (count_cache_flush_type == BRANCH_CACHE_FLUSH_HW) { 484 patch_instruction_site(&patch__flush_count_cache_return, 485 ppc_inst(PPC_INST_BLR)); 486 } 487 } 488 } 489 toggle_branch_cache_flush(bool enable)490 static void toggle_branch_cache_flush(bool enable) 491 { 492 if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) { 493 if (count_cache_flush_type != BRANCH_CACHE_FLUSH_NONE) 494 count_cache_flush_type = BRANCH_CACHE_FLUSH_NONE; 495 496 pr_info("count-cache-flush: flush disabled.\n"); 497 } else { 498 if (security_ftr_enabled(SEC_FTR_BCCTR_FLUSH_ASSIST)) { 499 count_cache_flush_type = BRANCH_CACHE_FLUSH_HW; 500 pr_info("count-cache-flush: hardware flush enabled.\n"); 501 } else { 502 count_cache_flush_type = BRANCH_CACHE_FLUSH_SW; 503 pr_info("count-cache-flush: software flush enabled.\n"); 504 } 505 } 506 507 if (!enable || !security_ftr_enabled(SEC_FTR_FLUSH_LINK_STACK)) { 508 if (link_stack_flush_type != BRANCH_CACHE_FLUSH_NONE) 509 link_stack_flush_type = BRANCH_CACHE_FLUSH_NONE; 510 511 pr_info("link-stack-flush: flush disabled.\n"); 512 } else { 513 if (security_ftr_enabled(SEC_FTR_BCCTR_LINK_FLUSH_ASSIST)) { 514 link_stack_flush_type = BRANCH_CACHE_FLUSH_HW; 515 pr_info("link-stack-flush: hardware flush enabled.\n"); 516 } else { 517 link_stack_flush_type = BRANCH_CACHE_FLUSH_SW; 518 pr_info("link-stack-flush: software flush enabled.\n"); 519 } 520 } 521 522 update_branch_cache_flush(); 523 } 524 setup_count_cache_flush(void)525 void setup_count_cache_flush(void) 526 { 527 bool enable = true; 528 529 if (no_spectrev2 || cpu_mitigations_off()) { 530 if (security_ftr_enabled(SEC_FTR_BCCTRL_SERIALISED) || 531 security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED)) 532 pr_warn("Spectre v2 mitigations not fully under software control, can't disable\n"); 533 534 enable = false; 535 } 536 537 /* 538 * There's no firmware feature flag/hypervisor bit to tell us we need to 539 * flush the link stack on context switch. So we set it here if we see 540 * either of the Spectre v2 mitigations that aim to protect userspace. 541 */ 542 if (security_ftr_enabled(SEC_FTR_COUNT_CACHE_DISABLED) || 543 security_ftr_enabled(SEC_FTR_FLUSH_COUNT_CACHE)) 544 security_ftr_set(SEC_FTR_FLUSH_LINK_STACK); 545 546 toggle_branch_cache_flush(enable); 547 } 548 549 #ifdef CONFIG_DEBUG_FS count_cache_flush_set(void * data,u64 val)550 static int count_cache_flush_set(void *data, u64 val) 551 { 552 bool enable; 553 554 if (val == 1) 555 enable = true; 556 else if (val == 0) 557 enable = false; 558 else 559 return -EINVAL; 560 561 toggle_branch_cache_flush(enable); 562 563 return 0; 564 } 565 count_cache_flush_get(void * data,u64 * val)566 static int count_cache_flush_get(void *data, u64 *val) 567 { 568 if (count_cache_flush_type == BRANCH_CACHE_FLUSH_NONE) 569 *val = 0; 570 else 571 *val = 1; 572 573 return 0; 574 } 575 576 DEFINE_DEBUGFS_ATTRIBUTE(fops_count_cache_flush, count_cache_flush_get, 577 count_cache_flush_set, "%llu\n"); 578 count_cache_flush_debugfs_init(void)579 static __init int count_cache_flush_debugfs_init(void) 580 { 581 debugfs_create_file_unsafe("count_cache_flush", 0600, 582 powerpc_debugfs_root, NULL, 583 &fops_count_cache_flush); 584 return 0; 585 } 586 device_initcall(count_cache_flush_debugfs_init); 587 #endif /* CONFIG_DEBUG_FS */ 588 #endif /* CONFIG_PPC_BOOK3S_64 */ 589