1 /* 2 * This file is subject to the terms and conditions of the GNU General Public 3 * License. See the file "COPYING" in the main directory of this archive 4 * for more details. 5 * 6 * Copyright (C) 2003, 04, 07 Ralf Baechle <ralf@linux-mips.org> 7 * Copyright (C) MIPS Technologies, Inc. 8 * written by Ralf Baechle <ralf@linux-mips.org> 9 */ 10 #ifndef _ASM_HAZARDS_H 11 #define _ASM_HAZARDS_H 12 13 #include <linux/stringify.h> 14 #include <asm/compiler.h> 15 16 #define ___ssnop \ 17 sll $0, $0, 1 18 19 #define ___ehb \ 20 sll $0, $0, 3 21 22 /* 23 * TLB hazards 24 */ 25 #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) && !defined(CONFIG_CPU_CAVIUM_OCTEON) 26 27 /* 28 * MIPSR2 defines ehb for hazard avoidance 29 */ 30 31 #define __mtc0_tlbw_hazard \ 32 ___ehb 33 34 #define __mtc0_tlbr_hazard \ 35 ___ehb 36 37 #define __tlbw_use_hazard \ 38 ___ehb 39 40 #define __tlb_read_hazard \ 41 ___ehb 42 43 #define __tlb_probe_hazard \ 44 ___ehb 45 46 #define __irq_enable_hazard \ 47 ___ehb 48 49 #define __irq_disable_hazard \ 50 ___ehb 51 52 #define __back_to_back_c0_hazard \ 53 ___ehb 54 55 /* 56 * gcc has a tradition of misscompiling the previous construct using the 57 * address of a label as argument to inline assembler. Gas otoh has the 58 * annoying difference between la and dla which are only usable for 32-bit 59 * rsp. 64-bit code, so can't be used without conditional compilation. 60 * The alterantive is switching the assembler to 64-bit code which happens 61 * to work right even for 32-bit code ... 62 */ 63 #define instruction_hazard() \ 64 do { \ 65 unsigned long tmp; \ 66 \ 67 __asm__ __volatile__( \ 68 " .set "MIPS_ISA_LEVEL" \n" \ 69 " dla %0, 1f \n" \ 70 " jr.hb %0 \n" \ 71 " .set mips0 \n" \ 72 "1: \n" \ 73 : "=r" (tmp)); \ 74 } while (0) 75 76 #elif (defined(CONFIG_CPU_MIPSR1) && !defined(CONFIG_MIPS_ALCHEMY)) || \ 77 defined(CONFIG_CPU_BMIPS) 78 79 /* 80 * These are slightly complicated by the fact that we guarantee R1 kernels to 81 * run fine on R2 processors. 82 */ 83 84 #define __mtc0_tlbw_hazard \ 85 ___ssnop; \ 86 ___ssnop; \ 87 ___ehb 88 89 #define __mtc0_tlbr_hazard \ 90 ___ssnop; \ 91 ___ssnop; \ 92 ___ehb 93 94 #define __tlbw_use_hazard \ 95 ___ssnop; \ 96 ___ssnop; \ 97 ___ssnop; \ 98 ___ehb 99 100 #define __tlb_read_hazard \ 101 ___ssnop; \ 102 ___ssnop; \ 103 ___ssnop; \ 104 ___ehb 105 106 #define __tlb_probe_hazard \ 107 ___ssnop; \ 108 ___ssnop; \ 109 ___ssnop; \ 110 ___ehb 111 112 #define __irq_enable_hazard \ 113 ___ssnop; \ 114 ___ssnop; \ 115 ___ssnop; \ 116 ___ehb 117 118 #define __irq_disable_hazard \ 119 ___ssnop; \ 120 ___ssnop; \ 121 ___ssnop; \ 122 ___ehb 123 124 #define __back_to_back_c0_hazard \ 125 ___ssnop; \ 126 ___ssnop; \ 127 ___ssnop; \ 128 ___ehb 129 130 /* 131 * gcc has a tradition of misscompiling the previous construct using the 132 * address of a label as argument to inline assembler. Gas otoh has the 133 * annoying difference between la and dla which are only usable for 32-bit 134 * rsp. 64-bit code, so can't be used without conditional compilation. 135 * The alterantive is switching the assembler to 64-bit code which happens 136 * to work right even for 32-bit code ... 137 */ 138 #define __instruction_hazard() \ 139 do { \ 140 unsigned long tmp; \ 141 \ 142 __asm__ __volatile__( \ 143 " .set mips64r2 \n" \ 144 " dla %0, 1f \n" \ 145 " jr.hb %0 \n" \ 146 " .set mips0 \n" \ 147 "1: \n" \ 148 : "=r" (tmp)); \ 149 } while (0) 150 151 #define instruction_hazard() \ 152 do { \ 153 if (cpu_has_mips_r2_r6) \ 154 __instruction_hazard(); \ 155 } while (0) 156 157 #elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \ 158 defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_CPU_R10000) || \ 159 defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR) 160 161 /* 162 * R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer. 163 */ 164 165 #define __mtc0_tlbw_hazard 166 167 #define __mtc0_tlbr_hazard 168 169 #define __tlbw_use_hazard 170 171 #define __tlb_read_hazard 172 173 #define __tlb_probe_hazard 174 175 #define __irq_enable_hazard 176 177 #define __irq_disable_hazard 178 179 #define __back_to_back_c0_hazard 180 181 #define instruction_hazard() do { } while (0) 182 183 #elif defined(CONFIG_CPU_SB1) 184 185 /* 186 * Mostly like R4000 for historic reasons 187 */ 188 #define __mtc0_tlbw_hazard 189 190 #define __mtc0_tlbr_hazard 191 192 #define __tlbw_use_hazard 193 194 #define __tlb_read_hazard 195 196 #define __tlb_probe_hazard 197 198 #define __irq_enable_hazard 199 200 #define __irq_disable_hazard \ 201 ___ssnop; \ 202 ___ssnop; \ 203 ___ssnop 204 205 #define __back_to_back_c0_hazard 206 207 #define instruction_hazard() do { } while (0) 208 209 #else 210 211 /* 212 * Finally the catchall case for all other processors including R4000, R4400, 213 * R4600, R4700, R5000, RM7000, NEC VR41xx etc. 214 * 215 * The taken branch will result in a two cycle penalty for the two killed 216 * instructions on R4000 / R4400. Other processors only have a single cycle 217 * hazard so this is nice trick to have an optimal code for a range of 218 * processors. 219 */ 220 #define __mtc0_tlbw_hazard \ 221 nop; \ 222 nop 223 224 #define __mtc0_tlbr_hazard \ 225 nop; \ 226 nop 227 228 #define __tlbw_use_hazard \ 229 nop; \ 230 nop; \ 231 nop 232 233 #define __tlb_read_hazard \ 234 nop; \ 235 nop; \ 236 nop 237 238 #define __tlb_probe_hazard \ 239 nop; \ 240 nop; \ 241 nop 242 243 #define __irq_enable_hazard \ 244 ___ssnop; \ 245 ___ssnop; \ 246 ___ssnop 247 248 #define __irq_disable_hazard \ 249 nop; \ 250 nop; \ 251 nop 252 253 #define __back_to_back_c0_hazard \ 254 ___ssnop; \ 255 ___ssnop; \ 256 ___ssnop 257 258 #define instruction_hazard() do { } while (0) 259 260 #endif 261 262 263 /* FPU hazards */ 264 265 #if defined(CONFIG_CPU_SB1) 266 267 #define __enable_fpu_hazard \ 268 .set push; \ 269 .set mips64; \ 270 .set noreorder; \ 271 ___ssnop; \ 272 bnezl $0, .+4; \ 273 ___ssnop; \ 274 .set pop 275 276 #define __disable_fpu_hazard 277 278 #elif defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) 279 280 #define __enable_fpu_hazard \ 281 ___ehb 282 283 #define __disable_fpu_hazard \ 284 ___ehb 285 286 #else 287 288 #define __enable_fpu_hazard \ 289 nop; \ 290 nop; \ 291 nop; \ 292 nop 293 294 #define __disable_fpu_hazard \ 295 ___ehb 296 297 #endif 298 299 #ifdef __ASSEMBLY__ 300 301 #define _ssnop ___ssnop 302 #define _ehb ___ehb 303 #define mtc0_tlbw_hazard __mtc0_tlbw_hazard 304 #define mtc0_tlbr_hazard __mtc0_tlbr_hazard 305 #define tlbw_use_hazard __tlbw_use_hazard 306 #define tlb_read_hazard __tlb_read_hazard 307 #define tlb_probe_hazard __tlb_probe_hazard 308 #define irq_enable_hazard __irq_enable_hazard 309 #define irq_disable_hazard __irq_disable_hazard 310 #define back_to_back_c0_hazard __back_to_back_c0_hazard 311 #define enable_fpu_hazard __enable_fpu_hazard 312 #define disable_fpu_hazard __disable_fpu_hazard 313 314 #else 315 316 #define _ssnop() \ 317 do { \ 318 __asm__ __volatile__( \ 319 __stringify(___ssnop) \ 320 ); \ 321 } while (0) 322 323 #define _ehb() \ 324 do { \ 325 __asm__ __volatile__( \ 326 __stringify(___ehb) \ 327 ); \ 328 } while (0) 329 330 331 #define mtc0_tlbw_hazard() \ 332 do { \ 333 __asm__ __volatile__( \ 334 __stringify(__mtc0_tlbw_hazard) \ 335 ); \ 336 } while (0) 337 338 339 #define mtc0_tlbr_hazard() \ 340 do { \ 341 __asm__ __volatile__( \ 342 __stringify(__mtc0_tlbr_hazard) \ 343 ); \ 344 } while (0) 345 346 347 #define tlbw_use_hazard() \ 348 do { \ 349 __asm__ __volatile__( \ 350 __stringify(__tlbw_use_hazard) \ 351 ); \ 352 } while (0) 353 354 355 #define tlb_read_hazard() \ 356 do { \ 357 __asm__ __volatile__( \ 358 __stringify(__tlb_read_hazard) \ 359 ); \ 360 } while (0) 361 362 363 #define tlb_probe_hazard() \ 364 do { \ 365 __asm__ __volatile__( \ 366 __stringify(__tlb_probe_hazard) \ 367 ); \ 368 } while (0) 369 370 371 #define irq_enable_hazard() \ 372 do { \ 373 __asm__ __volatile__( \ 374 __stringify(__irq_enable_hazard) \ 375 ); \ 376 } while (0) 377 378 379 #define irq_disable_hazard() \ 380 do { \ 381 __asm__ __volatile__( \ 382 __stringify(__irq_disable_hazard) \ 383 ); \ 384 } while (0) 385 386 387 #define back_to_back_c0_hazard() \ 388 do { \ 389 __asm__ __volatile__( \ 390 __stringify(__back_to_back_c0_hazard) \ 391 ); \ 392 } while (0) 393 394 395 #define enable_fpu_hazard() \ 396 do { \ 397 __asm__ __volatile__( \ 398 __stringify(__enable_fpu_hazard) \ 399 ); \ 400 } while (0) 401 402 403 #define disable_fpu_hazard() \ 404 do { \ 405 __asm__ __volatile__( \ 406 __stringify(__disable_fpu_hazard) \ 407 ); \ 408 } while (0) 409 410 /* 411 * MIPS R2 instruction hazard barrier. Needs to be called as a subroutine. 412 */ 413 extern void mips_ihb(void); 414 415 #endif /* __ASSEMBLY__ */ 416 417 #endif /* _ASM_HAZARDS_H */ 418