1/* SPDX-License-Identifier: GPL-2.0 */ 2#include <linux/threads.h> 3#include <asm/processor.h> 4#include <asm/page.h> 5#include <asm/cputable.h> 6#include <asm/thread_info.h> 7#include <asm/ppc_asm.h> 8#include <asm/asm-offsets.h> 9#include <asm/mmu.h> 10 11/* 12 * Structure for storing CPU registers on the save area. 13 */ 14#define SL_SP 0 15#define SL_PC 4 16#define SL_MSR 8 17#define SL_SDR1 0xc 18#define SL_SPRG0 0x10 /* 4 sprg's */ 19#define SL_DBAT0 0x20 20#define SL_IBAT0 0x28 21#define SL_DBAT1 0x30 22#define SL_IBAT1 0x38 23#define SL_DBAT2 0x40 24#define SL_IBAT2 0x48 25#define SL_DBAT3 0x50 26#define SL_IBAT3 0x58 27#define SL_DBAT4 0x60 28#define SL_IBAT4 0x68 29#define SL_DBAT5 0x70 30#define SL_IBAT5 0x78 31#define SL_DBAT6 0x80 32#define SL_IBAT6 0x88 33#define SL_DBAT7 0x90 34#define SL_IBAT7 0x98 35#define SL_TB 0xa0 36#define SL_R2 0xa8 37#define SL_CR 0xac 38#define SL_LR 0xb0 39#define SL_R12 0xb4 /* r12 to r31 */ 40#define SL_SIZE (SL_R12 + 80) 41 42 .section .data 43 .align 5 44 45_GLOBAL(swsusp_save_area) 46 .space SL_SIZE 47 48 49 .section .text 50 .align 5 51 52_GLOBAL(swsusp_arch_suspend) 53 54 lis r11,swsusp_save_area@h 55 ori r11,r11,swsusp_save_area@l 56 57 mflr r0 58 stw r0,SL_LR(r11) 59 mfcr r0 60 stw r0,SL_CR(r11) 61 stw r1,SL_SP(r11) 62 stw r2,SL_R2(r11) 63 stmw r12,SL_R12(r11) 64 65 /* Save MSR & SDR1 */ 66 mfmsr r4 67 stw r4,SL_MSR(r11) 68 mfsdr1 r4 69 stw r4,SL_SDR1(r11) 70 71 /* Get a stable timebase and save it */ 721: mftbu r4 73 stw r4,SL_TB(r11) 74 mftb r5 75 stw r5,SL_TB+4(r11) 76 mftbu r3 77 cmpw r3,r4 78 bne 1b 79 80 /* Save SPRGs */ 81 mfsprg r4,0 82 stw r4,SL_SPRG0(r11) 83 mfsprg r4,1 84 stw r4,SL_SPRG0+4(r11) 85 mfsprg r4,2 86 stw r4,SL_SPRG0+8(r11) 87 mfsprg r4,3 88 stw r4,SL_SPRG0+12(r11) 89 90 /* Save BATs */ 91 mfdbatu r4,0 92 stw r4,SL_DBAT0(r11) 93 mfdbatl r4,0 94 stw r4,SL_DBAT0+4(r11) 95 mfdbatu r4,1 96 stw r4,SL_DBAT1(r11) 97 mfdbatl r4,1 98 stw r4,SL_DBAT1+4(r11) 99 mfdbatu r4,2 100 stw r4,SL_DBAT2(r11) 101 mfdbatl r4,2 102 stw r4,SL_DBAT2+4(r11) 103 mfdbatu r4,3 104 stw r4,SL_DBAT3(r11) 105 mfdbatl r4,3 106 stw r4,SL_DBAT3+4(r11) 107 mfibatu r4,0 108 stw r4,SL_IBAT0(r11) 109 mfibatl r4,0 110 stw r4,SL_IBAT0+4(r11) 111 mfibatu r4,1 112 stw r4,SL_IBAT1(r11) 113 mfibatl r4,1 114 stw r4,SL_IBAT1+4(r11) 115 mfibatu r4,2 116 stw r4,SL_IBAT2(r11) 117 mfibatl r4,2 118 stw r4,SL_IBAT2+4(r11) 119 mfibatu r4,3 120 stw r4,SL_IBAT3(r11) 121 mfibatl r4,3 122 stw r4,SL_IBAT3+4(r11) 123 124BEGIN_MMU_FTR_SECTION 125 mfspr r4,SPRN_DBAT4U 126 stw r4,SL_DBAT4(r11) 127 mfspr r4,SPRN_DBAT4L 128 stw r4,SL_DBAT4+4(r11) 129 mfspr r4,SPRN_DBAT5U 130 stw r4,SL_DBAT5(r11) 131 mfspr r4,SPRN_DBAT5L 132 stw r4,SL_DBAT5+4(r11) 133 mfspr r4,SPRN_DBAT6U 134 stw r4,SL_DBAT6(r11) 135 mfspr r4,SPRN_DBAT6L 136 stw r4,SL_DBAT6+4(r11) 137 mfspr r4,SPRN_DBAT7U 138 stw r4,SL_DBAT7(r11) 139 mfspr r4,SPRN_DBAT7L 140 stw r4,SL_DBAT7+4(r11) 141 mfspr r4,SPRN_IBAT4U 142 stw r4,SL_IBAT4(r11) 143 mfspr r4,SPRN_IBAT4L 144 stw r4,SL_IBAT4+4(r11) 145 mfspr r4,SPRN_IBAT5U 146 stw r4,SL_IBAT5(r11) 147 mfspr r4,SPRN_IBAT5L 148 stw r4,SL_IBAT5+4(r11) 149 mfspr r4,SPRN_IBAT6U 150 stw r4,SL_IBAT6(r11) 151 mfspr r4,SPRN_IBAT6L 152 stw r4,SL_IBAT6+4(r11) 153 mfspr r4,SPRN_IBAT7U 154 stw r4,SL_IBAT7(r11) 155 mfspr r4,SPRN_IBAT7L 156 stw r4,SL_IBAT7+4(r11) 157END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) 158 159#if 0 160 /* Backup various CPU config stuffs */ 161 bl __save_cpu_setup 162#endif 163 /* Call the low level suspend stuff (we should probably have made 164 * a stackframe... 165 */ 166 bl swsusp_save 167 168 /* Restore LR from the save area */ 169 lis r11,swsusp_save_area@h 170 ori r11,r11,swsusp_save_area@l 171 lwz r0,SL_LR(r11) 172 mtlr r0 173 174 blr 175 176 177/* Resume code */ 178_GLOBAL(swsusp_arch_resume) 179 180#ifdef CONFIG_ALTIVEC 181 /* Stop pending alitvec streams and memory accesses */ 182BEGIN_FTR_SECTION 183 DSSALL 184END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC) 185#endif 186 sync 187 188 /* Disable MSR:DR to make sure we don't take a TLB or 189 * hash miss during the copy, as our hash table will 190 * for a while be unusable. For .text, we assume we are 191 * covered by a BAT. This works only for non-G5 at this 192 * point. G5 will need a better approach, possibly using 193 * a small temporary hash table filled with large mappings, 194 * disabling the MMU completely isn't a good option for 195 * performance reasons. 196 * (Note that 750's may have the same performance issue as 197 * the G5 in this case, we should investigate using moving 198 * BATs for these CPUs) 199 */ 200 mfmsr r0 201 sync 202 rlwinm r0,r0,0,28,26 /* clear MSR_DR */ 203 mtmsr r0 204 sync 205 isync 206 207 /* Load ptr the list of pages to copy in r3 */ 208 lis r11,(restore_pblist - KERNELBASE)@h 209 ori r11,r11,restore_pblist@l 210 lwz r10,0(r11) 211 212 /* Copy the pages. This is a very basic implementation, to 213 * be replaced by something more cache efficient */ 2141: 215 tophys(r3,r10) 216 li r0,256 217 mtctr r0 218 lwz r11,pbe_address(r3) /* source */ 219 tophys(r5,r11) 220 lwz r10,pbe_orig_address(r3) /* destination */ 221 tophys(r6,r10) 2222: 223 lwz r8,0(r5) 224 lwz r9,4(r5) 225 lwz r10,8(r5) 226 lwz r11,12(r5) 227 addi r5,r5,16 228 stw r8,0(r6) 229 stw r9,4(r6) 230 stw r10,8(r6) 231 stw r11,12(r6) 232 addi r6,r6,16 233 bdnz 2b 234 lwz r10,pbe_next(r3) 235 cmpwi 0,r10,0 236 bne 1b 237 238 /* Do a very simple cache flush/inval of the L1 to ensure 239 * coherency of the icache 240 */ 241 lis r3,0x0002 242 mtctr r3 243 li r3, 0 2441: 245 lwz r0,0(r3) 246 addi r3,r3,0x0020 247 bdnz 1b 248 isync 249 sync 250 251 /* Now flush those cache lines */ 252 lis r3,0x0002 253 mtctr r3 254 li r3, 0 2551: 256 dcbf 0,r3 257 addi r3,r3,0x0020 258 bdnz 1b 259 sync 260 261 /* Ok, we are now running with the kernel data of the old 262 * kernel fully restored. We can get to the save area 263 * easily now. As for the rest of the code, it assumes the 264 * loader kernel and the booted one are exactly identical 265 */ 266 lis r11,swsusp_save_area@h 267 ori r11,r11,swsusp_save_area@l 268 tophys(r11,r11) 269 270#if 0 271 /* Restore various CPU config stuffs */ 272 bl __restore_cpu_setup 273#endif 274 /* Restore the BATs, and SDR1. Then we can turn on the MMU. 275 * This is a bit hairy as we are running out of those BATs, 276 * but first, our code is probably in the icache, and we are 277 * writing the same value to the BAT, so that should be fine, 278 * though a better solution will have to be found long-term 279 */ 280 lwz r4,SL_SDR1(r11) 281 mtsdr1 r4 282 lwz r4,SL_SPRG0(r11) 283 mtsprg 0,r4 284 lwz r4,SL_SPRG0+4(r11) 285 mtsprg 1,r4 286 lwz r4,SL_SPRG0+8(r11) 287 mtsprg 2,r4 288 lwz r4,SL_SPRG0+12(r11) 289 mtsprg 3,r4 290 291#if 0 292 lwz r4,SL_DBAT0(r11) 293 mtdbatu 0,r4 294 lwz r4,SL_DBAT0+4(r11) 295 mtdbatl 0,r4 296 lwz r4,SL_DBAT1(r11) 297 mtdbatu 1,r4 298 lwz r4,SL_DBAT1+4(r11) 299 mtdbatl 1,r4 300 lwz r4,SL_DBAT2(r11) 301 mtdbatu 2,r4 302 lwz r4,SL_DBAT2+4(r11) 303 mtdbatl 2,r4 304 lwz r4,SL_DBAT3(r11) 305 mtdbatu 3,r4 306 lwz r4,SL_DBAT3+4(r11) 307 mtdbatl 3,r4 308 lwz r4,SL_IBAT0(r11) 309 mtibatu 0,r4 310 lwz r4,SL_IBAT0+4(r11) 311 mtibatl 0,r4 312 lwz r4,SL_IBAT1(r11) 313 mtibatu 1,r4 314 lwz r4,SL_IBAT1+4(r11) 315 mtibatl 1,r4 316 lwz r4,SL_IBAT2(r11) 317 mtibatu 2,r4 318 lwz r4,SL_IBAT2+4(r11) 319 mtibatl 2,r4 320 lwz r4,SL_IBAT3(r11) 321 mtibatu 3,r4 322 lwz r4,SL_IBAT3+4(r11) 323 mtibatl 3,r4 324BEGIN_MMU_FTR_SECTION 325 lwz r4,SL_DBAT4(r11) 326 mtspr SPRN_DBAT4U,r4 327 lwz r4,SL_DBAT4+4(r11) 328 mtspr SPRN_DBAT4L,r4 329 lwz r4,SL_DBAT5(r11) 330 mtspr SPRN_DBAT5U,r4 331 lwz r4,SL_DBAT5+4(r11) 332 mtspr SPRN_DBAT5L,r4 333 lwz r4,SL_DBAT6(r11) 334 mtspr SPRN_DBAT6U,r4 335 lwz r4,SL_DBAT6+4(r11) 336 mtspr SPRN_DBAT6L,r4 337 lwz r4,SL_DBAT7(r11) 338 mtspr SPRN_DBAT7U,r4 339 lwz r4,SL_DBAT7+4(r11) 340 mtspr SPRN_DBAT7L,r4 341 lwz r4,SL_IBAT4(r11) 342 mtspr SPRN_IBAT4U,r4 343 lwz r4,SL_IBAT4+4(r11) 344 mtspr SPRN_IBAT4L,r4 345 lwz r4,SL_IBAT5(r11) 346 mtspr SPRN_IBAT5U,r4 347 lwz r4,SL_IBAT5+4(r11) 348 mtspr SPRN_IBAT5L,r4 349 lwz r4,SL_IBAT6(r11) 350 mtspr SPRN_IBAT6U,r4 351 lwz r4,SL_IBAT6+4(r11) 352 mtspr SPRN_IBAT6L,r4 353 lwz r4,SL_IBAT7(r11) 354 mtspr SPRN_IBAT7U,r4 355 lwz r4,SL_IBAT7+4(r11) 356 mtspr SPRN_IBAT7L,r4 357END_MMU_FTR_SECTION_IFSET(MMU_FTR_USE_HIGH_BATS) 358#endif 359 360 /* Flush all TLBs */ 361 lis r4,0x1000 3621: addic. r4,r4,-0x1000 363 tlbie r4 364 bgt 1b 365 sync 366 367 /* restore the MSR and turn on the MMU */ 368 lwz r3,SL_MSR(r11) 369 bl turn_on_mmu 370 tovirt(r11,r11) 371 372 /* Restore TB */ 373 li r3,0 374 mttbl r3 375 lwz r3,SL_TB(r11) 376 lwz r4,SL_TB+4(r11) 377 mttbu r3 378 mttbl r4 379 380 /* Kick decrementer */ 381 li r0,1 382 mtdec r0 383 384 /* Restore the callee-saved registers and return */ 385 lwz r0,SL_CR(r11) 386 mtcr r0 387 lwz r2,SL_R2(r11) 388 lmw r12,SL_R12(r11) 389 lwz r1,SL_SP(r11) 390 lwz r0,SL_LR(r11) 391 mtlr r0 392 393 // XXX Note: we don't really need to call swsusp_resume 394 395 li r3,0 396 blr 397 398/* FIXME:This construct is actually not useful since we don't shut 399 * down the instruction MMU, we could just flip back MSR-DR on. 400 */ 401turn_on_mmu: 402 mflr r4 403 mtsrr0 r4 404 mtsrr1 r3 405 sync 406 isync 407 rfi 408 409