• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1/*
2 * Copyright 2018-2020 NXP
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 *
6 */
7
8.section .text, "ax"
9
10#include <asm_macros.S>
11
12#include <lib/psci/psci.h>
13#include <nxp_timer.h>
14#include <plat_gic.h>
15#include <pmu.h>
16
17#include <bl31_data.h>
18#include <plat_psci.h>
19#include <platform_def.h>
20
21.global soc_init_start
22.global soc_init_percpu
23.global soc_init_finish
24.global _set_platform_security
25.global _soc_set_start_addr
26
27.global _soc_core_release
28.global _soc_ck_disabled
29.global _soc_core_restart
30.global _soc_core_prep_off
31.global _soc_core_entr_off
32.global _soc_core_exit_off
33.global _soc_sys_reset
34.global _soc_sys_off
35.global _soc_core_prep_stdby
36.global _soc_core_entr_stdby
37.global _soc_core_exit_stdby
38.global _soc_core_prep_pwrdn
39.global _soc_core_entr_pwrdn
40.global _soc_core_exit_pwrdn
41.global _soc_clstr_prep_stdby
42.global _soc_clstr_exit_stdby
43.global _soc_clstr_prep_pwrdn
44.global _soc_clstr_exit_pwrdn
45.global _soc_sys_prep_stdby
46.global _soc_sys_exit_stdby
47.global _soc_sys_prep_pwrdn
48.global _soc_sys_pwrdn_wfi
49.global _soc_sys_exit_pwrdn
50
51.equ TZPC_BASE,			  0x02200000
52.equ TZPCDECPROT_0_SET_BASE, 0x02200804
53.equ TZPCDECPROT_1_SET_BASE, 0x02200810
54.equ TZPCDECPROT_2_SET_BASE, 0x0220081C
55
56#define CLUSTER_3_CORES_MASK 0xC0
57#define CLUSTER_3_IN_RESET  1
58#define CLUSTER_3_NORMAL	0
59
60/* cluster 3 handling no longer based on frequency, but rather on RCW[850],
61 * which is bit 18 of RCWSR27
62 */
63#define CLUSTER_3_RCW_BIT  0x40000
64
65/* retry count for clock-stop acks */
66.equ CLOCK_RETRY_CNT,  800
67
68/* disable prefetching in the A72 core */
69#define  CPUACTLR_DIS_LS_HW_PRE	0x100000000000000
70#define  CPUACTLR_DIS_L2_TLB_PRE   0x200000
71
72/* Function starts the initialization tasks of the soc,
73 * using secondary cores if they are available
74 *
75 * Called from C, saving the non-volatile regs
76 * save these as pairs of registers to maintain the
77 * required 16-byte alignment on the stack
78 *
79 * in:
80 * out:
81 * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11
82 */
83func soc_init_start
84	stp  x4,  x5,  [sp, #-16]!
85	stp  x6,  x7,  [sp, #-16]!
86	stp  x8,  x9,  [sp, #-16]!
87	stp  x10, x11, [sp, #-16]!
88	stp  x12, x13, [sp, #-16]!
89	stp  x18, x30, [sp, #-16]!
90
91	/* make sure the personality has been
92	 * established by releasing cores that
93	 * are marked "to-be-disabled" from reset
94	 */
95	bl  release_disabled  		/* 0-9 */
96
97	/* init the task flags */
98	bl  _init_task_flags   		/* 0-1 */
99
100	/* set SCRATCHRW7 to 0x0 */
101	ldr  x0, =DCFG_SCRATCHRW7_OFFSET
102	mov  x1, xzr
103	bl   _write_reg_dcfg
104
1051:
106	/* restore the aarch32/64 non-volatile registers */
107	ldp  x18, x30, [sp], #16
108	ldp  x12, x13, [sp], #16
109	ldp  x10, x11, [sp], #16
110	ldp  x8,  x9,  [sp], #16
111	ldp  x6,  x7,  [sp], #16
112	ldp  x4,  x5,  [sp], #16
113	ret
114endfunc soc_init_start
115
116
117/* Function performs any soc-specific initialization that is needed on
118 * a per-core basis.
119 * in:  none
120 * out: none
121 * uses x0, x1, x2, x3
122 */
123func soc_init_percpu
124	stp  x4,  x30,  [sp, #-16]!
125
126	bl   plat_my_core_mask
127	mov  x2, x0				/* x2 = core mask */
128
129	/* Check if this core is marked for prefetch disable
130	 */
131	mov   x0, #PREFETCH_DIS_OFFSET
132	bl	_get_global_data		/* 0-1 */
133	tst   x0, x2
134	b.eq  1f
135	bl	_disable_ldstr_pfetch_A72	/* 0 */
1361:
137	mov  x0, #NXP_PMU_ADDR
138	bl enable_timer_base_to_cluster
139	ldp  x4,  x30,  [sp], #16
140	ret
141endfunc soc_init_percpu
142
143
144/* Function completes the initialization tasks of the soc
145 * in:
146 * out:
147 * uses x0, x1, x2, x3, x4
148 */
149func soc_init_finish
150	stp  x4,  x30,  [sp, #-16]!
151
152	ldp   x4,  x30,  [sp], #16
153	ret
154endfunc soc_init_finish
155
156
157/* Function sets the security mechanisms in the SoC to implement the
158 * Platform Security Policy
159 */
160func _set_platform_security
161	mov  x8, x30
162
163#if (!SUPPRESS_TZC)
164	/* initialize the tzpc */
165	bl   init_tzpc
166#endif
167
168#if (!SUPPRESS_SEC)
169	/* initialize secmon */
170#ifdef NXP_SNVS_ENABLED
171	mov x0, #NXP_SNVS_ADDR
172	bl  init_sec_mon
173#endif
174#endif
175
176	mov  x30, x8
177	ret
178endfunc _set_platform_security
179
180
181/* Function writes a 64-bit address to bootlocptrh/l
182 * in:  x0, 64-bit address to write to BOOTLOCPTRL/H
183 * uses x0, x1, x2
184 */
185func _soc_set_start_addr
186	/* Get the 64-bit base address of the dcfg block */
187	ldr  x2, =NXP_DCFG_ADDR
188
189	/* write the 32-bit BOOTLOCPTRL register */
190	mov  x1, x0
191	str  w1, [x2, #DCFG_BOOTLOCPTRL_OFFSET]
192
193	/* write the 32-bit BOOTLOCPTRH register */
194	lsr  x1, x0, #32
195	str  w1, [x2, #DCFG_BOOTLOCPTRH_OFFSET]
196	ret
197endfunc _soc_set_start_addr
198
199/* Function releases a secondary core from reset
200 * in:   x0 = core_mask_lsb
201 * out:  none
202 * uses: x0, x1, x2, x3
203 */
204func _soc_core_release
205	mov   x3, x30
206
207	ldr  x1, =NXP_SEC_REGFILE_ADDR
208	/* write to CORE_HOLD to tell
209	 * the bootrom that this core is
210	 * expected to run.
211	 */
212	str  w0, [x1, #CORE_HOLD_OFFSET]
213
214	/* read-modify-write BRRL to release core */
215	mov  x1, #NXP_RESET_ADDR
216	ldr  w2, [x1, #BRR_OFFSET]
217
218	/* x0 = core mask */
219	orr  w2, w2, w0
220	str  w2, [x1, #BRR_OFFSET]
221	dsb  sy
222	isb
223
224	/* send event */
225	sev
226	isb
227
228	mov   x30, x3
229	ret
230endfunc _soc_core_release
231
232
233/* Function determines if a core is disabled via COREDISABLEDSR
234 * in:  w0  = core_mask_lsb
235 * out: w0  = 0, core not disabled
236 *	  w0 != 0, core disabled
237 * uses x0, x1
238 */
239func _soc_ck_disabled
240
241	/* get base addr of dcfg block */
242	ldr  x1, =NXP_DCFG_ADDR
243
244	/* read COREDISABLEDSR */
245	ldr  w1, [x1, #DCFG_COREDISABLEDSR_OFFSET]
246
247	/* test core bit */
248	and  w0, w1, w0
249
250	ret
251endfunc _soc_ck_disabled
252
253
254/* Part of CPU_ON
255 * Function restarts a core shutdown via _soc_core_entr_off
256 * in:  x0 = core mask lsb (of the target cpu)
257 * out: x0 == 0, on success
258 *	  x0 != 0, on failure
259 * uses x0, x1, x2, x3, x4, x5, x6
260 */
261func _soc_core_restart
262	mov  x6, x30
263	mov  x4, x0
264
265	/* pgm GICD_CTLR - enable secure grp0  */
266	mov  x5, #NXP_GICD_ADDR
267	ldr  w2, [x5, #GICD_CTLR_OFFSET]
268	orr  w2, w2, #GICD_CTLR_EN_GRP_0
269	str  w2, [x5, #GICD_CTLR_OFFSET]
270	dsb sy
271	isb
272
273	/* poll on RWP til write completes */
2744:
275	ldr  w2, [x5, #GICD_CTLR_OFFSET]
276	tst  w2, #GICD_CTLR_RWP
277	b.ne 4b
278
279	/* x4 = core mask lsb
280	* x5 = gicd base addr
281	*/
282	mov  x0, x4
283	bl   get_mpidr_value
284
285	/* x0 = mpidr of target core
286	* x4 = core mask lsb of target core
287	* x5 = gicd base addr
288	*/
289
290	/* generate target list bit */
291	and  x1, x0, #MPIDR_AFFINITY0_MASK
292	mov  x2, #1
293	lsl  x2, x2, x1
294
295	/* get the affinity1 field */
296	and  x1, x0, #MPIDR_AFFINITY1_MASK
297	lsl  x1, x1, #8
298	orr  x2, x2, x1
299
300	/* insert the INTID for SGI15 */
301	orr  x2, x2, #ICC_SGI0R_EL1_INTID
302
303	/* fire the SGI */
304	msr  ICC_SGI0R_EL1, x2
305	dsb  sy
306	isb
307
308	/* load '0' on success */
309	mov  x0, xzr
310
311	mov  x30, x6
312	ret
313endfunc _soc_core_restart
314
315
316/* Part of CPU_OFF
317 * Function programs SoC & GIC registers in preparation for shutting down
318 * the core
319 * in:  x0 = core mask lsb
320 * out: none
321 * uses x0, x1, x2, x3, x4, x5, x6, x7
322 */
323func _soc_core_prep_off
324	mov  x8, x30
325	mov  x7, x0		/* x7 = core mask lsb */
326
327	mrs  x1, CORTEX_A72_ECTLR_EL1
328
329	/* set smp and disable L2 snoops in cpuectlr */
330	orr  x1, x1, #CPUECTLR_SMPEN_EN
331	orr  x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH
332	bic  x1, x1, #CPUECTLR_INS_PREFETCH_MASK
333	bic  x1, x1, #CPUECTLR_DAT_PREFETCH_MASK
334
335	/* set retention control in cpuectlr */
336	bic  x1, x1, #CPUECTLR_TIMER_MASK
337	orr  x1, x1, #CPUECTLR_TIMER_8TICKS
338	msr  CORTEX_A72_ECTLR_EL1, x1
339
340	/* get redistributor rd base addr for this core */
341	mov  x0, x7
342	bl   get_gic_rd_base
343	mov  x6, x0
344
345	/* get redistributor sgi base addr for this core */
346	mov  x0, x7
347	bl   get_gic_sgi_base
348	mov  x5, x0
349
350	/* x5 = gicr sgi base addr
351 	 * x6 = gicr rd  base addr
352	 * x7 = core mask lsb
353	 */
354
355	/* disable SGI 15 at redistributor - GICR_ICENABLER0 */
356	mov  w3, #GICR_ICENABLER0_SGI15
357	str  w3, [x5, #GICR_ICENABLER0_OFFSET]
3582:
359	/* poll on rwp bit in GICR_CTLR */
360	ldr  w4, [x6, #GICR_CTLR_OFFSET]
361	tst  w4, #GICR_CTLR_RWP
362	b.ne 2b
363
364	/* disable GRP1 interrupts at cpu interface */
365	msr  ICC_IGRPEN1_EL3, xzr
366
367	/* disable GRP0 ints at cpu interface */
368	msr  ICC_IGRPEN0_EL1, xzr
369
370	/* program the redistributor - poll on GICR_CTLR.RWP as needed */
371
372	/* define SGI 15 as Grp0 - GICR_IGROUPR0 */
373	ldr  w4, [x5, #GICR_IGROUPR0_OFFSET]
374	bic  w4, w4, #GICR_IGROUPR0_SGI15
375	str  w4, [x5, #GICR_IGROUPR0_OFFSET]
376
377	/* define SGI 15 as Grp0 - GICR_IGRPMODR0 */
378	ldr  w3, [x5, #GICR_IGRPMODR0_OFFSET]
379	bic  w3, w3, #GICR_IGRPMODR0_SGI15
380	str  w3, [x5, #GICR_IGRPMODR0_OFFSET]
381
382	/* set priority of SGI 15 to highest (0x0) - GICR_IPRIORITYR3 */
383	ldr  w4, [x5, #GICR_IPRIORITYR3_OFFSET]
384	bic  w4, w4, #GICR_IPRIORITYR3_SGI15_MASK
385	str  w4, [x5, #GICR_IPRIORITYR3_OFFSET]
386
387	/* enable SGI 15 at redistributor - GICR_ISENABLER0 */
388	mov  w3, #GICR_ISENABLER0_SGI15
389	str  w3, [x5, #GICR_ISENABLER0_OFFSET]
390	dsb  sy
391	isb
3923:
393	/* poll on rwp bit in GICR_CTLR */
394	ldr  w4, [x6, #GICR_CTLR_OFFSET]
395	tst  w4, #GICR_CTLR_RWP
396	b.ne 3b
397
398	/* quiesce the debug interfaces */
399	mrs  x3, osdlr_el1
400	orr  x3, x3, #OSDLR_EL1_DLK_LOCK
401	msr  osdlr_el1, x3
402	isb
403
404	/* enable grp0 ints */
405	mov  x3, #ICC_IGRPEN0_EL1_EN
406	msr  ICC_IGRPEN0_EL1, x3
407
408	/* x5 = gicr sgi base addr
409	 * x6 = gicr rd  base addr
410	 * x7 = core mask lsb
411	 */
412
413	/* clear any pending interrupts */
414	mvn  w1, wzr
415	str  w1, [x5, #GICR_ICPENDR0_OFFSET]
416
417	/* make sure system counter is enabled */
418	ldr  x3, =NXP_TIMER_ADDR
419	ldr  w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
420	tst  w0, #SYS_COUNTER_CNTCR_EN
421	b.ne 4f
422	orr  w0, w0, #SYS_COUNTER_CNTCR_EN
423	str  w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
4244:
425	/* enable the core timer and mask timer interrupt */
426	mov  x1, #CNTP_CTL_EL0_EN
427	orr  x1, x1, #CNTP_CTL_EL0_IMASK
428	msr  cntp_ctl_el0, x1
429
430	isb
431	mov  x30, x8
432	ret
433endfunc _soc_core_prep_off
434
435
436/* Part of CPU_OFF:
437 * Function performs the final steps to shutdown the core
438 * in:  x0 = core mask lsb
439 * out: none
440 * uses x0, x1, x2, x3, x4, x5
441 */
442func _soc_core_entr_off
443	mov  x5, x30
444	mov  x4, x0
445
4461:
447	/* enter low-power state by executing wfi */
448	wfi
449
450	/* see if SGI15 woke us up */
451	mrs  x2, ICC_IAR0_EL1
452	mov  x3, #ICC_IAR0_EL1_SGI15
453	cmp  x2, x3
454	b.ne 2f
455
456	/* deactivate the intrrupts. */
457	msr ICC_EOIR0_EL1, x2
458
4592:
460	/* check if core is turned ON */
461	mov  x0, x4
462	/* Fetched the core state in x0 */
463	bl   _getCoreState
464
465	cmp  x0, #CORE_WAKEUP
466	b.ne 1b
467
468	/* Reached here, exited the wfi */
469
470	mov  x30, x5
471	ret
472endfunc _soc_core_entr_off
473
474
475/* Part of CPU_OFF:
476 * Function starts the process of starting a core back up
477 * in:  x0 = core mask lsb
478 * out: none
479 * uses x0, x1, x2, x3, x4, x5, x6
480 */
481func _soc_core_exit_off
482	mov  x6, x30
483	mov  x5, x0
484
485	/* disable forwarding of GRP0 ints at cpu interface */
486	msr  ICC_IGRPEN0_EL1, xzr
487
488	/* get redistributor sgi base addr for this core */
489	mov  x0, x5
490	bl   get_gic_sgi_base
491	mov  x4, x0
492
493	/* x4 = gicr sgi base addr
494	 * x5 = core mask
495	 */
496
497	/* disable SGI 15 at redistributor - GICR_ICENABLER0 */
498	mov  w1, #GICR_ICENABLER0_SGI15
499	str  w1, [x4, #GICR_ICENABLER0_OFFSET]
500
501	/* get redistributor rd base addr for this core */
502	mov  x0, x5
503	bl   get_gic_rd_base
504	mov  x4, x0
505
5062:
507	/* poll on rwp bit in GICR_CTLR */
508	ldr  w2, [x4, #GICR_CTLR_OFFSET]
509	tst  w2, #GICR_CTLR_RWP
510	b.ne 2b
511
512	/* unlock the debug interfaces */
513	mrs  x3, osdlr_el1
514	bic  x3, x3, #OSDLR_EL1_DLK_LOCK
515	msr  osdlr_el1, x3
516	isb
517
518	dsb sy
519	isb
520	mov  x30, x6
521	ret
522endfunc _soc_core_exit_off
523
524
525/* Function requests a reset of the entire SOC
526 * in:  none
527 * out: none
528 * uses: x0, x1, x2, x3, x4, x5, x6
529 */
530func _soc_sys_reset
531	mov  x6, x30
532
533	ldr  x2, =NXP_RST_ADDR
534	/* clear the RST_REQ_MSK and SW_RST_REQ */
535
536	mov  w0, #0x00000000
537	str  w0, [x2, #RSTCNTL_OFFSET]
538
539	/* initiate the sw reset request */
540	mov  w0, #SW_RST_REQ_INIT
541	str  w0, [x2, #RSTCNTL_OFFSET]
542
543	/* In case this address range is mapped as cacheable,
544	 * flush the write out of the dcaches.
545	 */
546	add  x2, x2, #RSTCNTL_OFFSET
547	dc   cvac, x2
548	dsb  st
549	isb
550
551	/* Function does not return */
552	b  .
553endfunc _soc_sys_reset
554
555
556/* Part of SYSTEM_OFF:
557 * Function turns off the SoC clocks
558 * Note: Function is not intended to return, and the only allowable
559 *	   recovery is POR
560 * in:  none
561 * out: none
562 * uses x0, x1, x2, x3
563 */
564func _soc_sys_off
565
566	/* A-009810: LPM20 entry sequence might cause
567	 * spurious timeout reset request
568	 * workaround: MASK RESET REQ RPTOE
569	 */
570	ldr  x0, =NXP_RESET_ADDR
571	ldr  w1, =RSTRQMR_RPTOE_MASK
572	str  w1, [x0, #RST_RSTRQMR1_OFFSET]
573
574	/* disable sec, QBman, spi and qspi */
575	ldr  x2, =NXP_DCFG_ADDR
576	ldr  x0, =DCFG_DEVDISR1_OFFSET
577	ldr  w1, =DCFG_DEVDISR1_SEC
578	str  w1, [x2, x0]
579	ldr  x0, =DCFG_DEVDISR3_OFFSET
580	ldr  w1, =DCFG_DEVDISR3_QBMAIN
581	str  w1, [x2, x0]
582	ldr  x0, =DCFG_DEVDISR4_OFFSET
583	ldr  w1, =DCFG_DEVDISR4_SPI_QSPI
584	str  w1, [x2, x0]
585
586	/* set TPMWAKEMR0 */
587	ldr  x0, =TPMWAKEMR0_ADDR
588	mov  w1, #0x1
589	str  w1, [x0]
590
591	/* disable icache, dcache, mmu @ EL1 */
592	mov  x1, #SCTLR_I_C_M_MASK
593	mrs  x0, sctlr_el1
594	bic  x0, x0, x1
595	msr  sctlr_el1, x0
596
597	/* disable L2 prefetches */
598	mrs  x0, CORTEX_A72_ECTLR_EL1
599	bic  x1, x1, #CPUECTLR_TIMER_MASK
600	orr  x0, x0, #CPUECTLR_SMPEN_EN
601	orr  x0, x0, #CPUECTLR_TIMER_8TICKS
602	msr  CORTEX_A72_ECTLR_EL1, x0
603	isb
604
605	/* disable CCN snoop domain */
606	mov  x1, #NXP_CCN_HN_F_0_ADDR
607	ldr  x0, =CCN_HN_F_SNP_DMN_CTL_MASK
608	str  x0, [x1, #CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET]
6093:
610	ldr  w2, [x1, #CCN_HN_F_SNP_DMN_CTL_OFFSET]
611	cmp  w2, #0x2
612	b.ne 3b
613
614	mov  x3, #NXP_PMU_ADDR
615
6164:
617	ldr  w1, [x3, #PMU_PCPW20SR_OFFSET]
618	cmp  w1, #PMU_IDLE_CORE_MASK
619	b.ne 4b
620
621	mov  w1, #PMU_IDLE_CLUSTER_MASK
622	str  w1, [x3, #PMU_CLAINACTSETR_OFFSET]
623
6241:
625	ldr  w1, [x3, #PMU_PCPW20SR_OFFSET]
626	cmp  w1, #PMU_IDLE_CORE_MASK
627	b.ne 1b
628
629	mov  w1, #PMU_FLUSH_CLUSTER_MASK
630	str  w1, [x3, #PMU_CLL2FLUSHSETR_OFFSET]
631
6322:
633	ldr  w1, [x3, #PMU_CLL2FLUSHSR_OFFSET]
634	cmp  w1, #PMU_FLUSH_CLUSTER_MASK
635	b.ne 2b
636
637	mov  w1, #PMU_FLUSH_CLUSTER_MASK
638	str  w1, [x3, #PMU_CLSL2FLUSHCLRR_OFFSET]
639
640	mov  w1, #PMU_FLUSH_CLUSTER_MASK
641	str  w1, [x3, #PMU_CLSINACTSETR_OFFSET]
642
643	mov  x2, #DAIF_SET_MASK
644	mrs  x1, spsr_el1
645	orr  x1, x1, x2
646	msr  spsr_el1, x1
647
648	mrs  x1, spsr_el2
649	orr  x1, x1, x2
650	msr  spsr_el2, x1
651
652	/* force the debug interface to be quiescent */
653	mrs  x0, osdlr_el1
654	orr  x0, x0, #0x1
655	msr  osdlr_el1, x0
656
657	/* invalidate all TLB entries at all 3 exception levels */
658	tlbi alle1
659	tlbi alle2
660	tlbi alle3
661
662	/* x3 = pmu base addr */
663
664	/* request lpm20 */
665	ldr  x0, =PMU_POWMGTCSR_OFFSET
666	ldr  w1, =PMU_POWMGTCSR_VAL
667	str  w1, [x3, x0]
668
6695:
670	wfe
671	b.eq  5b
672endfunc _soc_sys_off
673
674
675/* Part of CPU_SUSPEND
676 * Function puts the calling core into standby state
677 * in:  x0 = core mask lsb
678 * out: none
679 * uses x0
680 */
681func _soc_core_entr_stdby
682
683	dsb  sy
684	isb
685	wfi
686
687	ret
688endfunc _soc_core_entr_stdby
689
690
691/* Part of CPU_SUSPEND
692 * Function performs SoC-specific programming prior to standby
693 * in:  x0 = core mask lsb
694 * out: none
695 * uses x0, x1
696 */
697func _soc_core_prep_stdby
698
699	/* clear CORTEX_A72_ECTLR_EL1[2:0] */
700	mrs  x1, CORTEX_A72_ECTLR_EL1
701	bic  x1, x1, #CPUECTLR_TIMER_MASK
702	msr  CORTEX_A72_ECTLR_EL1, x1
703
704	ret
705endfunc _soc_core_prep_stdby
706
707
708/* Part of CPU_SUSPEND
709 * Function performs any SoC-specific cleanup after standby state
710 * in:  x0 = core mask lsb
711 * out: none
712 * uses none
713 */
714func _soc_core_exit_stdby
715
716	ret
717endfunc _soc_core_exit_stdby
718
719
720/* Part of CPU_SUSPEND
721 * Function performs SoC-specific programming prior to power-down
722 * in:  x0 = core mask lsb
723 * out: none
724 * uses none
725 */
726func _soc_core_prep_pwrdn
727
728	/* make sure system counter is enabled */
729	ldr  x2, =NXP_TIMER_ADDR
730	ldr  w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
731	tst  w0, #SYS_COUNTER_CNTCR_EN
732	b.ne 1f
733	orr  w0, w0, #SYS_COUNTER_CNTCR_EN
734	str  w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
7351:
736
737	/* enable dynamic retention control (CPUECTLR[2:0])
738	 * set the SMPEN bit (CPUECTLR[6])
739	 */
740	mrs  x1, CORTEX_A72_ECTLR_EL1
741	bic  x1, x1, #CPUECTLR_RET_MASK
742	orr  x1, x1, #CPUECTLR_TIMER_8TICKS
743	orr  x1, x1, #CPUECTLR_SMPEN_EN
744	msr  CORTEX_A72_ECTLR_EL1, x1
745
746	isb
747	ret
748endfunc _soc_core_prep_pwrdn
749
750
751/* Part of CPU_SUSPEND
752 * Function puts the calling core into a power-down state
753 * in:  x0 = core mask lsb
754 * out: none
755 * uses x0
756 */
757func _soc_core_entr_pwrdn
758
759	/* X0 = core mask lsb */
760
761	dsb  sy
762	isb
763	wfi
764
765	ret
766endfunc _soc_core_entr_pwrdn
767
768
769/* Part of CPU_SUSPEND
770 * Function performs any SoC-specific cleanup after power-down state
771 * in:  x0 = core mask lsb
772 * out: none
773 * uses none
774 */
775func _soc_core_exit_pwrdn
776
777	ret
778endfunc _soc_core_exit_pwrdn
779
780
781/* Part of CPU_SUSPEND
782 * Function performs SoC-specific programming prior to standby
783 * in:  x0 = core mask lsb
784 * out: none
785 * uses x0, x1
786 */
787func _soc_clstr_prep_stdby
788
789	/* clear CORTEX_A72_ECTLR_EL1[2:0] */
790	mrs  x1, CORTEX_A72_ECTLR_EL1
791	bic  x1, x1, #CPUECTLR_TIMER_MASK
792	msr  CORTEX_A72_ECTLR_EL1, x1
793
794	ret
795endfunc _soc_clstr_prep_stdby
796
797
798/* Part of CPU_SUSPEND
799 * Function performs any SoC-specific cleanup after standby state
800 * in:  x0 = core mask lsb
801 * out: none
802 * uses none
803 */
804func _soc_clstr_exit_stdby
805
806	ret
807endfunc _soc_clstr_exit_stdby
808
809
810/* Part of CPU_SUSPEND
811 * Function performs SoC-specific programming prior to power-down
812 * in:  x0 = core mask lsb
813 * out: none
814 * uses none
815 */
816func _soc_clstr_prep_pwrdn
817
818	/* make sure system counter is enabled */
819	ldr  x2, =NXP_TIMER_ADDR
820	ldr  w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
821	tst  w0, #SYS_COUNTER_CNTCR_EN
822	b.ne 1f
823	orr  w0, w0, #SYS_COUNTER_CNTCR_EN
824	str  w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
8251:
826
827	/* enable dynamic retention control (CPUECTLR[2:0])
828	 * set the SMPEN bit (CPUECTLR[6])
829	 */
830	mrs  x1, CORTEX_A72_ECTLR_EL1
831	bic  x1, x1, #CPUECTLR_RET_MASK
832	orr  x1, x1, #CPUECTLR_TIMER_8TICKS
833	orr  x1, x1, #CPUECTLR_SMPEN_EN
834	msr  CORTEX_A72_ECTLR_EL1, x1
835
836	isb
837	ret
838endfunc _soc_clstr_prep_pwrdn
839
840
841/* Part of CPU_SUSPEND
842 * Function performs any SoC-specific cleanup after power-down state
843 * in:  x0 = core mask lsb
844 * out: none
845 * uses none
846 */
847func _soc_clstr_exit_pwrdn
848
849	ret
850endfunc _soc_clstr_exit_pwrdn
851
852
853/* Part of CPU_SUSPEND
854 * Function performs SoC-specific programming prior to standby
855 * in:  x0 = core mask lsb
856 * out: none
857 * uses x0, x1
858 */
859func _soc_sys_prep_stdby
860
861	/* clear CORTEX_A72_ECTLR_EL1[2:0] */
862	mrs  x1, CORTEX_A72_ECTLR_EL1
863	bic  x1, x1, #CPUECTLR_TIMER_MASK
864	msr  CORTEX_A72_ECTLR_EL1, x1
865	ret
866endfunc _soc_sys_prep_stdby
867
868
869/* Part of CPU_SUSPEND
870 * Function performs any SoC-specific cleanup after standby state
871 * in:  x0 = core mask lsb
872 * out: none
873 * uses none
874 */
875func _soc_sys_exit_stdby
876
877	ret
878endfunc _soc_sys_exit_stdby
879
880
881/* Part of CPU_SUSPEND
882 * Function performs SoC-specific programming prior to
883 * suspend-to-power-down
884 * in:  x0 = core mask lsb
885 * out: none
886 * uses x0, x1
887 */
888func _soc_sys_prep_pwrdn
889
890	mrs   x1, CORTEX_A72_ECTLR_EL1
891	/* make sure the smp bit is set */
892	orr   x1, x1, #CPUECTLR_SMPEN_MASK
893	/* set the retention control */
894	orr   x1, x1, #CPUECTLR_RET_8CLK
895	/* disable tablewalk prefetch */
896	orr   x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH
897	msr   CORTEX_A72_ECTLR_EL1, x1
898	isb
899
900	ret
901endfunc _soc_sys_prep_pwrdn
902
903
904/* Part of CPU_SUSPEND
905 * Function puts the calling core, and potentially the soc, into a
906 * low-power state
907 * in:  x0 = core mask lsb
908 * out: x0 = 0, success
909 *	  x0 < 0, failure
910 * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14,
911 *	  x15, x16, x17, x18, x19, x20, x21, x28
912 */
913func _soc_sys_pwrdn_wfi
914	mov  x28, x30
915
916	/* disable cluster snooping in the CCN-508 */
917	ldr  x1, =NXP_CCN_HN_F_0_ADDR
918	ldr  x7, [x1, #CCN_HN_F_SNP_DMN_CTL_OFFSET]
919	mov  x6, #CCN_HNF_NODE_COUNT
9201:
921	str  x7, [x1, #CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET]
922	sub  x6, x6, #1
923	add  x1, x1, #CCN_HNF_OFFSET
924	cbnz x6, 1b
925
926	/* x0  = core mask
927	 * x7  = hnf sdcr
928	 */
929
930	ldr  x1, =NXP_PMU_CCSR_ADDR
931	ldr  x2, =NXP_PMU_DCSR_ADDR
932
933	/* enable the stop-request-override */
934	mov  x3, #PMU_POWMGTDCR0_OFFSET
935	mov  x4, #POWMGTDCR_STP_OV_EN
936	str  w4, [x2, x3]
937
938	/* x0  = core mask
939	 * x1  = NXP_PMU_CCSR_ADDR
940	 * x2  = NXP_PMU_DCSR_ADDR
941	 * x7  = hnf sdcr
942	 */
943
944	/* disable prefetching in the A72 core */
945	mrs  x8, CORTEX_A72_CPUACTLR_EL1
946	tst  x8, #CPUACTLR_DIS_LS_HW_PRE
947	b.ne 2f
948	dsb  sy
949	isb
950	/* disable data prefetch */
951	orr  x16, x8, #CPUACTLR_DIS_LS_HW_PRE
952	/* disable tlb prefetch */
953	orr  x16, x16, #CPUACTLR_DIS_L2_TLB_PRE
954	msr  CORTEX_A72_CPUACTLR_EL1, x16
955	isb
956
957	/* x0  = core mask
958	 * x1  = NXP_PMU_CCSR_ADDR
959	 * x2  = NXP_PMU_DCSR_ADDR
960	 * x7  = hnf sdcr
961	 * x8  = cpuactlr
962	 */
963
9642:
965	/* save hnf-sdcr and cpuactlr to stack */
966	stp  x7,  x8,  [sp, #-16]!
967
968	/* x0  = core mask
969	 * x1  = NXP_PMU_CCSR_ADDR
970	 * x2  = NXP_PMU_DCSR_ADDR
971	 */
972
973	/* save the IPSTPCRn registers to stack */
974	mov  x15, #PMU_IPSTPCR0_OFFSET
975	ldr  w9,  [x1, x15]
976	mov  x16, #PMU_IPSTPCR1_OFFSET
977	ldr  w10, [x1, x16]
978	mov  x17, #PMU_IPSTPCR2_OFFSET
979	ldr  w11, [x1, x17]
980	mov  x18, #PMU_IPSTPCR3_OFFSET
981	ldr  w12, [x1, x18]
982	mov  x19, #PMU_IPSTPCR4_OFFSET
983	ldr  w13, [x1, x19]
984	mov  x20, #PMU_IPSTPCR5_OFFSET
985	ldr  w14, [x1, x20]
986
987	stp  x9,  x10,  [sp, #-16]!
988	stp  x11, x12,  [sp, #-16]!
989	stp  x13, x14,  [sp, #-16]!
990
991	/* x0  = core mask
992	 * x1  = NXP_PMU_CCSR_ADDR
993	 * x2  = NXP_PMU_DCSR_ADDR
994	 * x15 = PMU_IPSTPCR0_OFFSET
995	 * x16 = PMU_IPSTPCR1_OFFSET
996	 * x17 = PMU_IPSTPCR2_OFFSET
997	 * x18 = PMU_IPSTPCR3_OFFSET
998	 * x19 = PMU_IPSTPCR4_OFFSET
999	 * x20 = PMU_IPSTPCR5_OFFSET
1000	 */
1001
1002	/* load the full clock mask for IPSTPCR0 */
1003	ldr  x3, =DEVDISR1_MASK
1004	/* get the exclusions */
1005	mov  x21, #PMU_IPPDEXPCR0_OFFSET
1006	ldr  w4, [x1, x21]
1007	/* apply the exclusions to the mask */
1008	bic  w7, w3, w4
1009	/* stop the clocks in IPSTPCR0 */
1010	str  w7, [x1, x15]
1011
1012	/* use same procedure for IPSTPCR1-IPSTPCR5 */
1013
1014	/* stop the clocks in IPSTPCR1 */
1015	ldr  x5, =DEVDISR2_MASK
1016	mov  x21, #PMU_IPPDEXPCR1_OFFSET
1017	ldr  w6, [x1, x21]
1018	bic  w8, w5, w6
1019	str  w8, [x1, x16]
1020
1021	/* stop the clocks in IPSTPCR2 */
1022	ldr  x3, =DEVDISR3_MASK
1023	mov  x21, #PMU_IPPDEXPCR2_OFFSET
1024	ldr  w4, [x1, x21]
1025	bic  w9, w3, w4
1026	str  w9, [x1, x17]
1027
1028	/* stop the clocks in IPSTPCR3 */
1029	ldr  x5,  =DEVDISR4_MASK
1030	mov  x21, #PMU_IPPDEXPCR3_OFFSET
1031	ldr  w6,  [x1, x21]
1032	bic  w10, w5, w6
1033	str  w10, [x1, x18]
1034
1035	/* stop the clocks in IPSTPCR4
1036	 *   - exclude the ddr clocks as we are currently executing
1037	 *	 out of *some* memory, might be ddr
1038	 *   - exclude the OCRAM clk so that we retain any code/data in
1039	 *	 OCRAM
1040	 *   - may need to exclude the debug clock if we are testing
1041	 */
1042	ldr  x3, =DEVDISR5_MASK
1043	mov  w6, #DEVDISR5_MASK_ALL_MEM
1044	bic  w3, w3, w6
1045
1046	mov  w5, #POLICY_DEBUG_ENABLE
1047	cbz  w5, 3f
1048	mov  w6, #DEVDISR5_MASK_DBG
1049	bic  w3, w3, w6
10503:
1051	mov  x21, #PMU_IPPDEXPCR4_OFFSET
1052	ldr  w4,  [x1, x21]
1053	bic  w11, w3, w4
1054	str  w11, [x1, x19]
1055
1056	/* stop the clocks in IPSTPCR5 */
1057	ldr  x5,  =DEVDISR6_MASK
1058	mov  x21, #PMU_IPPDEXPCR5_OFFSET
1059	ldr  w6,  [x1, x21]
1060	bic  w12, w5, w6
1061	str  w12, [x1, x20]
1062
1063	/* x0  = core mask
1064	 * x1  = NXP_PMU_CCSR_ADDR
1065	 * x2  = NXP_PMU_DCSR_ADDR
1066	 * x7  = IPSTPCR0
1067	 * x8  = IPSTPCR1
1068	 * x9  = IPSTPCR2
1069	 * x10 = IPSTPCR3
1070	 * x11 = IPSTPCR4
1071	 * x12 = IPSTPCR5
1072	 */
1073
1074	/* poll until the clocks are stopped in IPSTPACKSR0 */
1075	mov  w4,  #CLOCK_RETRY_CNT
1076	mov  x21, #PMU_IPSTPACKSR0_OFFSET
10774:
1078	ldr  w5, [x1, x21]
1079	cmp  w5, w7
1080	b.eq 5f
1081	sub  w4, w4, #1
1082	cbnz w4, 4b
1083
1084	/* poll until the clocks are stopped in IPSTPACKSR1 */
10855:
1086	mov  w4,  #CLOCK_RETRY_CNT
1087	mov  x21, #PMU_IPSTPACKSR1_OFFSET
10886:
1089	ldr  w5, [x1, x21]
1090	cmp  w5, w8
1091	b.eq 7f
1092	sub  w4, w4, #1
1093	cbnz w4, 6b
1094
1095	/* poll until the clocks are stopped in IPSTPACKSR2 */
10967:
1097	mov  w4,  #CLOCK_RETRY_CNT
1098	mov  x21, #PMU_IPSTPACKSR2_OFFSET
10998:
1100	ldr  w5, [x1, x21]
1101	cmp  w5, w9
1102	b.eq 9f
1103	sub  w4, w4, #1
1104	cbnz w4, 8b
1105
1106	/* poll until the clocks are stopped in IPSTPACKSR3 */
11079:
1108	mov  w4,  #CLOCK_RETRY_CNT
1109	mov  x21, #PMU_IPSTPACKSR3_OFFSET
111010:
1111	ldr  w5, [x1, x21]
1112	cmp  w5, w10
1113	b.eq 11f
1114	sub  w4, w4, #1
1115	cbnz w4, 10b
1116
1117	/* poll until the clocks are stopped in IPSTPACKSR4 */
111811:
1119	mov  w4,  #CLOCK_RETRY_CNT
1120	mov  x21, #PMU_IPSTPACKSR4_OFFSET
112112:
1122	ldr  w5, [x1, x21]
1123	cmp  w5, w11
1124	b.eq 13f
1125	sub  w4, w4, #1
1126	cbnz w4, 12b
1127
1128	/* poll until the clocks are stopped in IPSTPACKSR5 */
112913:
1130	mov  w4,  #CLOCK_RETRY_CNT
1131	mov  x21, #PMU_IPSTPACKSR5_OFFSET
113214:
1133	ldr  w5, [x1, x21]
1134	cmp  w5, w12
1135	b.eq 15f
1136	sub  w4, w4, #1
1137	cbnz w4, 14b
1138
1139	/* x0  = core mask
1140	 * x1  = NXP_PMU_CCSR_ADDR
1141	 * x2  = NXP_PMU_DCSR_ADDR
1142	 * x7  = IPSTPCR0
1143	 * x8  = IPSTPCR1
1144	 * x9  = IPSTPCR2
1145	 * x10 = IPSTPCR3
1146	 * x11 = IPSTPCR4
1147	 * x12 = IPSTPCR5
1148	 */
1149
115015:
1151	mov  x3, #NXP_DCFG_ADDR
1152
1153	/* save the devdisr registers to stack */
1154	ldr  w13, [x3, #DCFG_DEVDISR1_OFFSET]
1155	ldr  w14, [x3, #DCFG_DEVDISR2_OFFSET]
1156	ldr  w15, [x3, #DCFG_DEVDISR3_OFFSET]
1157	ldr  w16, [x3, #DCFG_DEVDISR4_OFFSET]
1158	ldr  w17, [x3, #DCFG_DEVDISR5_OFFSET]
1159	ldr  w18, [x3, #DCFG_DEVDISR6_OFFSET]
1160
1161	stp  x13, x14,  [sp, #-16]!
1162	stp  x15, x16,  [sp, #-16]!
1163	stp  x17, x18,  [sp, #-16]!
1164
1165	/* power down the IP in DEVDISR1 - corresponds to IPSTPCR0 */
1166	str  w7,  [x3, #DCFG_DEVDISR1_OFFSET]
1167
1168	/* power down the IP in DEVDISR2 - corresponds to IPSTPCR1 */
1169	str  w8, [x3, #DCFG_DEVDISR2_OFFSET]
1170
1171	/* power down the IP in DEVDISR3 - corresponds to IPSTPCR2 */
1172	str  w9,  [x3, #DCFG_DEVDISR3_OFFSET]
1173
1174	/* power down the IP in DEVDISR4 - corresponds to IPSTPCR3 */
1175	str  w10, [x3, #DCFG_DEVDISR4_OFFSET]
1176
1177	/* power down the IP in DEVDISR5 - corresponds to IPSTPCR4 */
1178	str  w11, [x3, #DCFG_DEVDISR5_OFFSET]
1179
1180	/* power down the IP in DEVDISR6 - corresponds to IPSTPCR5 */
1181	str  w12, [x3, #DCFG_DEVDISR6_OFFSET]
1182
1183	/* setup register values for the cache-only sequence */
1184	mov  x4, #NXP_DDR_ADDR
1185	mov  x5, #NXP_DDR2_ADDR
1186	mov  x6, x11
1187	mov  x7, x17
1188	ldr  x12, =PMU_CLAINACTSETR_OFFSET
1189	ldr  x13, =PMU_CLSINACTSETR_OFFSET
1190	ldr  x14, =PMU_CLAINACTCLRR_OFFSET
1191	ldr  x15, =PMU_CLSINACTCLRR_OFFSET
1192
1193	/* x0  = core mask
1194	 * x1  = NXP_PMU_CCSR_ADDR
1195	 * x2  = NXP_PMU_DCSR_ADDR
1196	 * x3  = NXP_DCFG_ADDR
1197	 * x4  = NXP_DDR_ADDR
1198	 * x5  = NXP_DDR2_ADDR
1199	 * w6  = IPSTPCR4
1200	 * w7  = DEVDISR5
1201	 * x12 = PMU_CLAINACTSETR_OFFSET
1202	 * x13 = PMU_CLSINACTSETR_OFFSET
1203	 * x14 = PMU_CLAINACTCLRR_OFFSET
1204	 * x15 = PMU_CLSINACTCLRR_OFFSET
1205	 */
1206
1207	mov  x8, #POLICY_DEBUG_ENABLE
1208	cbnz x8, 29f
1209	/* force the debug interface to be quiescent */
1210	mrs  x9, OSDLR_EL1
1211	orr  x9, x9, #0x1
1212	msr  OSDLR_EL1, x9
1213
1214	/* enter the cache-only sequence */
121529:
1216	bl   final_pwrdown
1217
1218	/* when we are here, the core has come out of wfi and the
1219	 * ddr is back up
1220	 */
1221
1222	mov  x8, #POLICY_DEBUG_ENABLE
1223	cbnz x8, 30f
1224	/* restart the debug interface */
1225	mrs  x9, OSDLR_EL1
1226	mov  x10, #1
1227	bic  x9, x9, x10
1228	msr  OSDLR_EL1, x9
1229
1230	/* get saved DEVDISR regs off stack */
123130:
1232	ldp  x17, x18, [sp], #16
1233	ldp  x15, x16, [sp], #16
1234	ldp  x13, x14, [sp], #16
1235	/* restore DEVDISR regs */
1236	str  w18, [x3, #DCFG_DEVDISR6_OFFSET]
1237	str  w17, [x3, #DCFG_DEVDISR5_OFFSET]
1238	str  w16, [x3, #DCFG_DEVDISR4_OFFSET]
1239	str  w15, [x3, #DCFG_DEVDISR3_OFFSET]
1240	str  w14, [x3, #DCFG_DEVDISR2_OFFSET]
1241	str  w13, [x3, #DCFG_DEVDISR1_OFFSET]
1242	isb
1243
1244	/* get saved IPSTPCRn regs off stack */
1245	ldp  x13, x14, [sp], #16
1246	ldp  x11, x12, [sp], #16
1247	ldp  x9,  x10, [sp], #16
1248
1249	/* restore IPSTPCRn regs */
1250	mov  x15, #PMU_IPSTPCR5_OFFSET
1251	str  w14, [x1, x15]
1252	mov  x16, #PMU_IPSTPCR4_OFFSET
1253	str  w13, [x1, x16]
1254	mov  x17, #PMU_IPSTPCR3_OFFSET
1255	str  w12, [x1, x17]
1256	mov  x18, #PMU_IPSTPCR2_OFFSET
1257	str  w11, [x1, x18]
1258	mov  x19, #PMU_IPSTPCR1_OFFSET
1259	str  w10, [x1, x19]
1260	mov  x20, #PMU_IPSTPCR0_OFFSET
1261	str  w9,  [x1, x20]
1262	isb
1263
1264	/* poll on IPSTPACKCRn regs til IP clocks are restarted */
1265	mov  w4,  #CLOCK_RETRY_CNT
1266	mov  x15, #PMU_IPSTPACKSR5_OFFSET
126716:
1268	ldr  w5, [x1, x15]
1269	and  w5, w5, w14
1270	cbz  w5, 17f
1271	sub  w4, w4, #1
1272	cbnz w4, 16b
1273
127417:
1275	mov  w4,  #CLOCK_RETRY_CNT
1276	mov  x15, #PMU_IPSTPACKSR4_OFFSET
127718:
1278	ldr  w5, [x1, x15]
1279	and  w5, w5, w13
1280	cbz  w5, 19f
1281	sub  w4, w4, #1
1282	cbnz w4, 18b
1283
128419:
1285	mov  w4,  #CLOCK_RETRY_CNT
1286	mov  x15, #PMU_IPSTPACKSR3_OFFSET
128720:
1288	ldr  w5, [x1, x15]
1289	and  w5, w5, w12
1290	cbz  w5, 21f
1291	sub  w4, w4, #1
1292	cbnz w4, 20b
1293
129421:
1295	mov  w4,  #CLOCK_RETRY_CNT
1296	mov  x15, #PMU_IPSTPACKSR2_OFFSET
129722:
1298	ldr  w5, [x1, x15]
1299	and  w5, w5, w11
1300	cbz  w5, 23f
1301	sub  w4, w4, #1
1302	cbnz w4, 22b
1303
130423:
1305	mov  w4,  #CLOCK_RETRY_CNT
1306	mov  x15, #PMU_IPSTPACKSR1_OFFSET
130724:
1308	ldr  w5, [x1, x15]
1309	and  w5, w5, w10
1310	cbz  w5, 25f
1311	sub  w4, w4, #1
1312	cbnz w4, 24b
1313
131425:
1315	mov  w4,  #CLOCK_RETRY_CNT
1316	mov  x15, #PMU_IPSTPACKSR0_OFFSET
131726:
1318	ldr  w5, [x1, x15]
1319	and  w5, w5, w9
1320	cbz  w5, 27f
1321	sub  w4, w4, #1
1322	cbnz w4, 26b
1323
132427:
1325	/* disable the stop-request-override */
1326	mov  x8, #PMU_POWMGTDCR0_OFFSET
1327	mov  w9, #POWMGTDCR_STP_OV_EN
1328	str  w9, [x2, x8]
1329	isb
1330
1331	/* get hnf-sdcr and cpuactlr off stack */
1332	ldp  x7, x8, [sp], #16
1333
1334	/* restore cpuactlr */
1335	msr  CORTEX_A72_CPUACTLR_EL1, x8
1336	isb
1337
1338	/* restore snooping in the hnf nodes */
1339	ldr  x9, =NXP_CCN_HN_F_0_ADDR
1340	mov  x6, #CCN_HNF_NODE_COUNT
134128:
1342	str  x7, [x9, #CCN_HN_F_SNP_DMN_CTL_SET_OFFSET]
1343	sub  x6, x6, #1
1344	add  x9, x9, #CCN_HNF_OFFSET
1345	cbnz x6, 28b
1346	isb
1347
1348	mov  x30, x28
1349	ret
1350endfunc _soc_sys_pwrdn_wfi
1351
1352
1353/* Part of CPU_SUSPEND
1354 * Function performs any SoC-specific cleanup after power-down
1355 * in:  x0 = core mask lsb
1356 * out: none
1357 * uses x0,
1358 */
1359func _soc_sys_exit_pwrdn
1360
1361	mrs   x1, CORTEX_A72_ECTLR_EL1
1362	/* make sure the smp bit is set */
1363	orr   x1, x1, #CPUECTLR_SMPEN_MASK
1364	/* clr the retention control */
1365	mov   x2, #CPUECTLR_RET_8CLK
1366	bic   x1, x1, x2
1367	/* enable tablewalk prefetch */
1368	mov   x2, #CPUECTLR_DISABLE_TWALK_PREFETCH
1369	bic   x1, x1, x2
1370	msr   CORTEX_A72_ECTLR_EL1, x1
1371	isb
1372
1373	ret
1374endfunc _soc_sys_exit_pwrdn
1375
1376
1377/* Function will pwrdown ddr and the final core - it will do this
1378 * by loading itself into the icache and then executing from there
1379 * in:
1380 *   x0  = core mask
1381 *   x1  = NXP_PMU_CCSR_ADDR
1382 *   x2  = NXP_PMU_DCSR_ADDR
1383 *   x3  = NXP_DCFG_ADDR
1384 *   x4  = NXP_DDR_ADDR
1385 *   x5  = NXP_DDR2_ADDR
1386 *   w6  = IPSTPCR4
1387 *   w7  = DEVDISR5
1388 *   x12 = PMU_CLAINACTSETR_OFFSET
1389 *   x13 = PMU_CLSINACTSETR_OFFSET
1390 *   x14 = PMU_CLAINACTCLRR_OFFSET
1391 *   x15 = PMU_CLSINACTCLRR_OFFSET
1392 * out: none
1393 * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x13, x14, x15, x16,
1394 *	  x17, x18
1395 */
1396
1397/* 4Kb aligned */
1398.align 12
1399func final_pwrdown
1400
1401	mov  x0, xzr
1402	b	touch_line_0
1403start_line_0:
1404	mov  x0, #1
1405	/* put ddr controller 1 into self-refresh */
1406	ldr  w8, [x4, #DDR_CFG_2_OFFSET]
1407	orr  w8, w8, #CFG_2_FORCE_REFRESH
1408	str  w8, [x4, #DDR_CFG_2_OFFSET]
1409
1410	/* put ddr controller 2 into self-refresh */
1411	ldr  w8, [x5, #DDR_CFG_2_OFFSET]
1412	orr  w8, w8, #CFG_2_FORCE_REFRESH
1413	str  w8, [x5, #DDR_CFG_2_OFFSET]
1414
1415	/* stop the clocks in both ddr controllers */
1416	mov  w10, #DEVDISR5_MASK_DDR
1417	mov  x16, #PMU_IPSTPCR4_OFFSET
1418	orr  w9,  w6, w10
1419	str  w9,  [x1, x16]
1420	isb
1421
1422	mov  x17, #PMU_IPSTPACKSR4_OFFSET
1423touch_line_0:
1424	cbz  x0, touch_line_1
1425
1426start_line_1:
1427	/* poll IPSTPACKSR4 until
1428	 * ddr controller clocks are stopped.
1429	 */
14301:
1431	ldr  w8, [x1, x17]
1432	and  w8, w8, w10
1433	cmp  w8, w10
1434	b.ne 1b
1435
1436	/* shut down power to the ddr controllers */
1437	orr w9, w7, #DEVDISR5_MASK_DDR
1438	str w9, [x3, #DCFG_DEVDISR5_OFFSET]
1439
1440	/* disable cluster acp ports */
1441	mov  w8, #CLAINACT_DISABLE_ACP
1442	str  w8, [x1, x12]
1443
1444	/* disable skyros ports */
1445	mov  w9, #CLSINACT_DISABLE_SKY
1446	str  w9, [x1, x13]
1447	isb
1448
1449touch_line_1:
1450	cbz  x0, touch_line_2
1451
1452start_line_2:
1453	isb
14543:
1455	wfi
1456
1457	/* if we are here then we are awake
1458	 * - bring this device back up
1459	 */
1460
1461	/* enable skyros ports */
1462	mov  w9, #CLSINACT_DISABLE_SKY
1463	str  w9, [x1, x15]
1464
1465	/* enable acp ports */
1466	mov  w8, #CLAINACT_DISABLE_ACP
1467	str  w8, [x1, x14]
1468	isb
1469
1470	/* bring up the ddr controllers */
1471	str w7, [x3, #DCFG_DEVDISR5_OFFSET]
1472	isb
1473	str w6,  [x1, x16]
1474	isb
1475
1476	nop
1477touch_line_2:
1478	cbz  x0, touch_line_3
1479
1480start_line_3:
1481	/* poll IPSTPACKSR4 until
1482	 * ddr controller clocks are running
1483	 */
1484	mov w10, #DEVDISR5_MASK_DDR
14852:
1486	ldr  w8, [x1, x17]
1487	and  w8, w8, w10
1488	cbnz w8, 2b
1489
1490	/* take ddr controller 2 out of self-refresh */
1491	mov w8, #CFG_2_FORCE_REFRESH
1492	ldr w9, [x5, #DDR_CFG_2_OFFSET]
1493	bic w9, w9, w8
1494	str w9, [x5, #DDR_CFG_2_OFFSET]
1495
1496	/* take ddr controller 1 out of self-refresh */
1497	ldr w9, [x4, #DDR_CFG_2_OFFSET]
1498	bic w9, w9, w8
1499	str w9, [x4, #DDR_CFG_2_OFFSET]
1500	isb
1501
1502	nop
1503	nop
1504	nop
1505touch_line_3:
1506	cbz  x0, start_line_0
1507
1508	/* execute here after ddr is back up */
1509
1510	ret
1511endfunc final_pwrdown
1512
1513/* Function returns CLUSTER_3_NORMAL if the cores of cluster 3 are
1514 * to be handled normally, and it returns CLUSTER_3_IN_RESET if the cores
1515 * are to be held in reset
1516 * in:  none
1517 * out: x0 = #CLUSTER_3_NORMAL,   cluster 3 treated normal
1518 *	  x0 = #CLUSTER_3_IN_RESET, cluster 3 cores held in reset
1519 * uses x0, x1, x2
1520 */
1521func cluster3InReset
1522
1523	/* default return is treat cores normal */
1524	mov  x0, #CLUSTER_3_NORMAL
1525
1526	/* read RCW_SR27 register */
1527	mov  x1, #NXP_DCFG_ADDR
1528	ldr  w2, [x1, #RCW_SR27_OFFSET]
1529
1530	/* test the cluster 3 bit */
1531	tst  w2, #CLUSTER_3_RCW_BIT
1532	b.eq 1f
1533
1534	/* if we are here, then the bit was set */
1535	mov  x0, #CLUSTER_3_IN_RESET
15361:
1537	ret
1538endfunc cluster3InReset
1539
1540
1541/* Function checks to see if cores which are to be disabled have been
1542 * released from reset - if not, it releases them
1543 * Note: there may be special handling of cluster 3 cores depending upon the
1544 *	   sys clk frequency
1545 * in:  none
1546 * out: none
1547 * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
1548 */
1549func release_disabled
1550	mov  x9, x30
1551
1552	/* check if we need to keep cluster 3 cores in reset */
1553	bl   cluster3InReset		/*  0-2  */
1554	mov  x8, x0
1555
1556	/* x8 = cluster 3 handling */
1557
1558	/* read COREDISABLESR */
1559	mov  x0, #NXP_DCFG_ADDR
1560	ldr  w4, [x0, #DCFG_COREDISABLEDSR_OFFSET]
1561	cmp  x8, #CLUSTER_3_IN_RESET
1562	b.ne 4f
1563
1564	/* the cluster 3 cores are to be held in reset, so remove
1565	 * them from the disable mask
1566	 */
1567	bic  x4, x4, #CLUSTER_3_CORES_MASK
15684:
1569	/* get the number of cpus on this device */
1570	mov   x6, #PLATFORM_CORE_COUNT
1571
1572	mov  x0, #NXP_RESET_ADDR
1573	ldr  w5, [x0, #BRR_OFFSET]
1574
1575	/* load the core mask for the first core */
1576	mov  x7, #1
1577
1578	/* x4 = COREDISABLESR
1579	 * x5 = BRR
1580	 * x6 = loop count
1581	 * x7 = core mask bit
1582	 */
15832:
1584	/* check if the core is to be disabled */
1585	tst  x4, x7
1586	b.eq 1f
1587
1588	/* see if disabled cores have already been released from reset */
1589	tst  x5, x7
1590	b.ne 5f
1591
1592	/* if core has not been released, then release it (0-3) */
1593	mov  x0, x7
1594	bl   _soc_core_release
1595
1596	/* record the core state in the data area (0-3) */
1597	mov  x0, x7
1598	mov  x1, #CORE_STATE_DATA
1599	mov  x2, #CORE_DISABLED
1600	bl   _setCoreData
1601
16021:
1603	/* see if this is a cluster 3 core */
1604	mov   x3, #CLUSTER_3_CORES_MASK
1605	tst   x3, x7
1606	b.eq  5f
1607
1608	/* this is a cluster 3 core - see if it needs to be held in reset */
1609	cmp  x8, #CLUSTER_3_IN_RESET
1610	b.ne 5f
1611
1612	/* record the core state as disabled in the data area (0-3) */
1613	mov  x0, x7
1614	mov  x1, #CORE_STATE_DATA
1615	mov  x2, #CORE_DISABLED
1616	bl   _setCoreData
1617
16185:
1619	/* decrement the counter */
1620	subs  x6, x6, #1
1621	b.le  3f
1622
1623	/* shift the core mask to the next core */
1624	lsl   x7, x7, #1
1625	/* continue */
1626	b	 2b
16273:
1628	cmp  x8, #CLUSTER_3_IN_RESET
1629	b.ne 6f
1630
1631	/* we need to hold the cluster 3 cores in reset,
1632	 * so mark them in the COREDISR and COREDISABLEDSR registers as
1633	 * "disabled", and the rest of the sw stack will leave them alone
1634	 * thinking that they have been disabled
1635	 */
1636	mov  x0, #NXP_DCFG_ADDR
1637	ldr  w1, [x0, #DCFG_COREDISR_OFFSET]
1638	orr  w1, w1, #CLUSTER_3_CORES_MASK
1639	str  w1, [x0, #DCFG_COREDISR_OFFSET]
1640
1641	ldr  w2, [x0, #DCFG_COREDISABLEDSR_OFFSET]
1642	orr  w2, w2, #CLUSTER_3_CORES_MASK
1643	str  w2, [x0, #DCFG_COREDISABLEDSR_OFFSET]
1644	dsb  sy
1645	isb
1646
1647#if (PSCI_TEST)
1648	/* x0 = NXP_DCFG_ADDR : read COREDISABLESR */
1649	ldr  w4, [x0, #DCFG_COREDISABLEDSR_OFFSET]
1650	/* read COREDISR */
1651	ldr  w3, [x0, #DCFG_COREDISR_OFFSET]
1652#endif
1653
16546:
1655	mov  x30, x9
1656	ret
1657
1658endfunc release_disabled
1659
1660
1661/* Function setc up the TrustZone Address Space Controller (TZASC)
1662 * in:  none
1663 * out: none
1664 * uses x0, x1
1665 */
1666func init_tzpc
1667
1668	/* set Non Secure access for all devices protected via TZPC */
1669
1670	/* decode Protection-0 Set Reg */
1671	ldr	x1, =TZPCDECPROT_0_SET_BASE
1672	/* set decode region to NS, Bits[7:0] */
1673	mov	w0, #0xFF
1674	str	w0, [x1]
1675
1676	/* decode Protection-1 Set Reg */
1677	ldr	x1, =TZPCDECPROT_1_SET_BASE
1678	/* set decode region to NS, Bits[7:0] */
1679	mov	w0, #0xFF
1680	str	w0, [x1]
1681
1682	/* decode Protection-2 Set Reg */
1683	ldr	x1, =TZPCDECPROT_2_SET_BASE
1684	/* set decode region to NS, Bits[7:0] */
1685	mov	w0, #0xFF
1686	str	w0, [x1]
1687
1688	/* entire SRAM as NS */
1689	/* secure RAM region size Reg */
1690	ldr	x1, =TZPC_BASE
1691	/* 0x00000000 = no secure region */
1692	mov	w0, #0x00000000
1693	str	w0, [x1]
1694
1695	ret
1696endfunc init_tzpc
1697
1698/* write a register in the DCFG block
1699 * in:  x0 = offset
1700 * in:  w1 = value to write
1701 * uses x0, x1, x2
1702 */
1703func _write_reg_dcfg
1704	ldr  x2, =NXP_DCFG_ADDR
1705	str  w1, [x2, x0]
1706	ret
1707endfunc _write_reg_dcfg
1708
1709
1710/* read a register in the DCFG block
1711 * in:  x0 = offset
1712 * out: w0 = value read
1713 * uses x0, x1, x2
1714 */
1715func _read_reg_dcfg
1716	ldr  x2, =NXP_DCFG_ADDR
1717	ldr  w1, [x2, x0]
1718	mov  w0, w1
1719	ret
1720endfunc _read_reg_dcfg
1721
1722
1723/* Function returns an mpidr value for a core, given a core_mask_lsb
1724 * in:  x0 = core mask lsb
1725 * out: x0 = affinity2:affinity1:affinity0, where affinity is 8-bits
1726 * uses x0, x1
1727 */
1728func get_mpidr_value
1729
1730	/* convert a core mask to an SoC core number */
1731	clz  w0, w0
1732	mov  w1, #31
1733	sub  w0, w1, w0
1734
1735	/* get the mpidr core number from the SoC core number */
1736	mov  w1, wzr
1737	tst  x0, #1
1738	b.eq 1f
1739	orr  w1, w1, #1
1740
17411:
1742	/* extract the cluster number */
1743	lsr  w0, w0, #1
1744	orr  w0, w1, w0, lsl #8
1745
1746	ret
1747endfunc get_mpidr_value
1748
1749
1750/* Function returns the redistributor base address for the core specified
1751 * in x1
1752 * in:  x0 - core mask lsb of specified core
1753 * out: x0 = redistributor rd base address for specified core
1754 * uses x0, x1, x2
1755 */
1756func get_gic_rd_base
1757	clz  w1, w0
1758	mov  w2, #0x20
1759	sub  w2, w2, w1
1760	sub  w2, w2, #1
1761
1762	ldr  x0, =NXP_GICR_ADDR
1763	mov  x1, #GIC_RD_OFFSET
1764
1765	/* x2 = core number
1766	 * loop counter
1767	 */
17682:
1769	cbz  x2, 1f
1770	add  x0, x0, x1
1771	sub  x2, x2, #1
1772	b	2b
17731:
1774	ret
1775endfunc get_gic_rd_base
1776
1777
1778/* Function returns the redistributor base address for the core specified
1779 * in x1
1780 * in:  x0 - core mask lsb of specified core
1781 * out: x0 = redistributor sgi base address for specified core
1782 * uses x0, x1, x2
1783 */
1784func get_gic_sgi_base
1785	clz  w1, w0
1786	mov  w2, #0x20
1787	sub  w2, w2, w1
1788	sub  w2, w2, #1
1789
1790	ldr  x0, =NXP_GICR_SGI_ADDR
1791	mov  x1, #GIC_SGI_OFFSET
1792
1793	/* loop counter */
17942:
1795	cbz  x2, 1f		/* x2 = core number */
1796	add  x0, x0, x1
1797	sub  x2, x2, #1
1798	b	2b
17991:
1800	ret
1801endfunc get_gic_sgi_base
1802
1803/* Function writes a register in the RESET block
1804 * in:  x0 = offset
1805 * in:  w1 = value to write
1806 * uses x0, x1, x2
1807 */
1808func _write_reg_reset
1809	ldr  x2, =NXP_RESET_ADDR
1810	str  w1, [x2, x0]
1811	ret
1812endfunc _write_reg_reset
1813
1814
1815/* Function reads a register in the RESET block
1816 * in:  x0 = offset
1817 * out: w0 = value read
1818 * uses x0, x1
1819 */
1820func _read_reg_reset
1821	ldr  x1, =NXP_RESET_ADDR
1822	ldr  w0, [x1, x0]
1823	ret
1824endfunc _read_reg_reset
1825