1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2003, 04, 07 Ralf Baechle <ralf@linux-mips.org> 7 * Copyright (C) MIPS Technologies, Inc. 8 * written by Ralf Baechle <ralf@linux-mips.org> 9 */ 10 #ifndef _ASM_HAZARDS_H 11 #define _ASM_HAZARDS_H 12 13 #include <linux/stringify.h> 14 15 #define ___ssnop \ 16 sll $0, $0, 1 17 18 #define ___ehb \ 19 sll $0, $0, 3 20 21 /* 22 * TLB hazards 23 */ 24 #if defined(CONFIG_CPU_MIPSR6) || (defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_CPU_CAVIUM_OCTEON)) 25 26 /* 27 * MIPSR2 defines ehb for hazard avoidance 28 */ 29 30 #define __mtc0_tlbw_hazard \ 31 ___ehb 32 33 #define __tlbw_use_hazard \ 34 ___ehb 35 36 #define __tlb_probe_hazard \ 37 ___ehb 38 39 #define __irq_enable_hazard \ 40 ___ehb 41 42 #define __irq_disable_hazard \ 43 ___ehb 44 45 #define __back_to_back_c0_hazard \ 46 ___ehb 47 48 /* 49 * gcc has a tradition of misscompiling the previous construct using the 50 * address of a label as argument to inline assembler. Gas otoh has the 51 * annoying difference between la and dla which are only usable for 32-bit 52 * rsp. 64-bit code, so can't be used without conditional compilation. 53 * The alterantive is switching the assembler to 64-bit code which happens 54 * to work right even for 32-bit code ... 55 * 56 * LY22: Bad news - new toolchain (binutils) starts producing warning 57 * if ABI=32 for DLA, even for MIPS64. So, multiple variants. 58 */ 59 #if defined(CONFIG_CPU_MIPSR6) && defined(CONFIG_64BIT) 60 #define instruction_hazard() \ 61 do { \ 62 unsigned long tmp; \ 63 \ 64 __asm__ __volatile__( \ 65 " .set push \n" \ 66 " .set mips64r6 \n" \ 67 " dla %0, 1f \n" \ 68 " jr.hb %0 \n" \ 69 " .set pop \n" \ 70 "1: \n" \ 71 : "=r" (tmp)); \ 72 } while (0) 73 #else 74 #ifdef CONFIG_CPU_MIPSR6 75 #define instruction_hazard() \ 76 do { \ 77 unsigned long tmp; \ 78 \ 79 __asm__ __volatile__( \ 80 " .set push \n" \ 81 " .set mips64r6 \n" \ 82 " la %0, 1f \n" \ 83 " jr.hb %0 \n" \ 84 " .set pop \n" \ 85 "1: \n" \ 86 : "=r" (tmp)); \ 87 } while (0) 88 #else /* !CONFIG_CPU_MIPSR6 */ 89 #ifdef CONFIG_64BIT 90 #define instruction_hazard() \ 91 do { \ 92 unsigned long tmp; \ 93 \ 94 __asm__ __volatile__( \ 95 " .set mips64r2 \n" \ 96 " dla %0, 1f \n" \ 97 " jr.hb %0 \n" \ 98 " .set mips0 \n" \ 99 "1: \n" \ 100 : "=r" (tmp)); \ 101 } while (0) 102 #else 103 #define instruction_hazard() \ 104 do { \ 105 unsigned long tmp; \ 106 \ 107 __asm__ __volatile__( \ 108 " .set mips64r2 \n" \ 109 " la %0, 1f \n" \ 110 " jr.hb %0 \n" \ 111 " .set mips0 \n" \ 112 "1: \n" \ 113 : "=r" (tmp)); \ 114 } while (0) 115 #endif 116 #endif 117 #endif 118 119 #elif (defined(CONFIG_CPU_MIPSR1) && !defined(CONFIG_MIPS_ALCHEMY)) || \ 120 defined(CONFIG_CPU_BMIPS) 121 122 /* 123 * These are slightly complicated by the fact that we guarantee R1 kernels to 124 * run fine on R2 processors. 125 */ 126 127 #define __mtc0_tlbw_hazard \ 128 ___ssnop; \ 129 ___ssnop; \ 130 ___ehb 131 132 #define __tlbw_use_hazard \ 133 ___ssnop; \ 134 ___ssnop; \ 135 ___ssnop; \ 136 ___ehb 137 138 #define __tlb_probe_hazard \ 139 ___ssnop; \ 140 ___ssnop; \ 141 ___ssnop; \ 142 ___ehb 143 144 #define __irq_enable_hazard \ 145 ___ssnop; \ 146 ___ssnop; \ 147 ___ssnop; \ 148 ___ehb 149 150 #define __irq_disable_hazard \ 151 ___ssnop; \ 152 ___ssnop; \ 153 ___ssnop; \ 154 ___ehb 155 156 #define __back_to_back_c0_hazard \ 157 ___ssnop; \ 158 ___ssnop; \ 159 ___ssnop; \ 160 ___ehb 161 162 /* 163 * gcc has a tradition of misscompiling the previous construct using the 164 * address of a label as argument to inline assembler. Gas otoh has the 165 * annoying difference between la and dla which are only usable for 32-bit 166 * rsp. 64-bit code, so can't be used without conditional compilation. 167 * The alterantive is switching the assembler to 64-bit code which happens 168 * to work right even for 32-bit code ... 169 */ 170 #define __instruction_hazard() \ 171 do { \ 172 unsigned long tmp; \ 173 \ 174 __asm__ __volatile__( \ 175 " .set mips64r2 \n" \ 176 " dla %0, 1f \n" \ 177 " jr.hb %0 \n" \ 178 " .set mips0 \n" \ 179 "1: \n" \ 180 : "=r" (tmp)); \ 181 } while (0) 182 183 #define instruction_hazard() \ 184 do { \ 185 if (cpu_has_mips_r2 || cpu_has_mips_r6) \ 186 __instruction_hazard(); \ 187 } while (0) 188 189 #elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \ 190 defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_CPU_R10000) || \ 191 defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR) 192 193 /* 194 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. 195 */ 196 197 #define __mtc0_tlbw_hazard 198 199 #define __tlbw_use_hazard 200 201 #define __tlb_probe_hazard 202 203 #define __irq_enable_hazard 204 205 #define __irq_disable_hazard 206 207 #define __back_to_back_c0_hazard 208 209 #define instruction_hazard() do { } while (0) 210 211 #elif defined(CONFIG_CPU_SB1) 212 213 /* 214 * Mostly like R4000 for historic reasons 215 */ 216 #define __mtc0_tlbw_hazard 217 218 #define __tlbw_use_hazard 219 220 #define __tlb_probe_hazard 221 222 #define __irq_enable_hazard 223 224 #define __irq_disable_hazard \ 225 ___ssnop; \ 226 ___ssnop; \ 227 ___ssnop 228 229 #define __back_to_back_c0_hazard 230 231 #define instruction_hazard() do { } while (0) 232 233 #else 234 235 /* 236 * Finally the catchall case for all other processors including R4000, R4400, 237 * R4600, R4700, R5000, RM7000, NEC VR41xx etc. 238 * 239 * The taken branch will result in a two cycle penalty for the two killed 240 * instructions on R4000 / R4400. Other processors only have a single cycle 241 * hazard so this is nice trick to have an optimal code for a range of 242 * processors. 243 */ 244 #define __mtc0_tlbw_hazard \ 245 nop; \ 246 nop 247 248 #define __tlbw_use_hazard \ 249 nop; \ 250 nop; \ 251 nop 252 253 #define __tlb_probe_hazard \ 254 nop; \ 255 nop; \ 256 nop 257 258 #define __irq_enable_hazard \ 259 ___ssnop; \ 260 ___ssnop; \ 261 ___ssnop 262 263 #define __irq_disable_hazard \ 264 nop; \ 265 nop; \ 266 nop 267 268 #define __back_to_back_c0_hazard \ 269 ___ssnop; \ 270 ___ssnop; \ 271 ___ssnop 272 273 #define instruction_hazard() do { } while (0) 274 275 #endif 276 277 278 /* FPU hazards */ 279 280 #if defined(CONFIG_CPU_SB1) 281 282 #define __enable_fpu_hazard \ 283 .set push; \ 284 .set mips64; \ 285 .set noreorder; \ 286 ___ssnop; \ 287 bnezl $0, .+4; \ 288 ___ssnop; \ 289 .set pop 290 291 #define __disable_fpu_hazard 292 293 #elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 294 295 #define __enable_fpu_hazard \ 296 ___ehb 297 298 #define __disable_fpu_hazard \ 299 ___ehb 300 301 #else 302 303 #define __enable_fpu_hazard \ 304 nop; \ 305 nop; \ 306 nop; \ 307 nop 308 309 #define __disable_fpu_hazard \ 310 ___ehb 311 312 #endif 313 314 #ifdef __ASSEMBLY__ 315 316 #define _ssnop ___ssnop 317 #define _ehb ___ehb 318 #define mtc0_tlbw_hazard __mtc0_tlbw_hazard 319 #define tlbw_use_hazard __tlbw_use_hazard 320 #define tlb_probe_hazard __tlb_probe_hazard 321 #define irq_enable_hazard __irq_enable_hazard 322 #define irq_disable_hazard __irq_disable_hazard 323 #define back_to_back_c0_hazard __back_to_back_c0_hazard 324 #define enable_fpu_hazard __enable_fpu_hazard 325 #define disable_fpu_hazard __disable_fpu_hazard 326 327 #else 328 329 #define _ssnop() \ 330 do { \ 331 __asm__ __volatile__( \ 332 __stringify(___ssnop) \ 333 ); \ 334 } while (0) 335 336 #define _ehb() \ 337 do { \ 338 __asm__ __volatile__( \ 339 __stringify(___ehb) \ 340 ); \ 341 } while (0) 342 343 344 #define mtc0_tlbw_hazard() \ 345 do { \ 346 __asm__ __volatile__( \ 347 __stringify(__mtc0_tlbw_hazard) \ 348 ); \ 349 } while (0) 350 351 352 #define tlbw_use_hazard() \ 353 do { \ 354 __asm__ __volatile__( \ 355 __stringify(__tlbw_use_hazard) \ 356 ); \ 357 } while (0) 358 359 360 #define tlb_probe_hazard() \ 361 do { \ 362 __asm__ __volatile__( \ 363 __stringify(__tlb_probe_hazard) \ 364 ); \ 365 } while (0) 366 367 368 #define irq_enable_hazard() \ 369 do { \ 370 __asm__ __volatile__( \ 371 __stringify(__irq_enable_hazard) \ 372 ); \ 373 } while (0) 374 375 376 #define irq_disable_hazard() \ 377 do { \ 378 __asm__ __volatile__( \ 379 __stringify(__irq_disable_hazard) \ 380 ); \ 381 } while (0) 382 383 384 #define back_to_back_c0_hazard() \ 385 do { \ 386 __asm__ __volatile__( \ 387 __stringify(__back_to_back_c0_hazard) \ 388 ); \ 389 } while (0) 390 391 392 #define enable_fpu_hazard() \ 393 do { \ 394 __asm__ __volatile__( \ 395 __stringify(__enable_fpu_hazard) \ 396 ); \ 397 } while (0) 398 399 400 #define disable_fpu_hazard() \ 401 do { \ 402 __asm__ __volatile__( \ 403 __stringify(__disable_fpu_hazard) \ 404 ); \ 405 } while (0) 406 407 /* 408 * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine. 409 */ 410 extern void mips_ihb(void); 411 412 #endif /* __ASSEMBLY__ */ 413 414 #endif /* _ASM_HAZARDS_H */ 415