xref: /arm-trusted-firmware/plat/nxp/soc-lx2160a/aarch64/lx2160a.S (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu/*
2*91f16700Schasinglulu * Copyright 2018-2020 NXP
3*91f16700Schasinglulu *
4*91f16700Schasinglulu * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu *
6*91f16700Schasinglulu */
7*91f16700Schasinglulu
8*91f16700Schasinglulu.section .text, "ax"
9*91f16700Schasinglulu
10*91f16700Schasinglulu#include <asm_macros.S>
11*91f16700Schasinglulu
12*91f16700Schasinglulu#include <lib/psci/psci.h>
13*91f16700Schasinglulu#include <nxp_timer.h>
14*91f16700Schasinglulu#include <plat_gic.h>
15*91f16700Schasinglulu#include <pmu.h>
16*91f16700Schasinglulu
17*91f16700Schasinglulu#include <bl31_data.h>
18*91f16700Schasinglulu#include <plat_psci.h>
19*91f16700Schasinglulu#include <platform_def.h>
20*91f16700Schasinglulu
21*91f16700Schasinglulu.global soc_init_start
22*91f16700Schasinglulu.global soc_init_percpu
23*91f16700Schasinglulu.global soc_init_finish
24*91f16700Schasinglulu.global _set_platform_security
25*91f16700Schasinglulu.global _soc_set_start_addr
26*91f16700Schasinglulu
27*91f16700Schasinglulu.global _soc_core_release
28*91f16700Schasinglulu.global _soc_ck_disabled
29*91f16700Schasinglulu.global _soc_core_restart
30*91f16700Schasinglulu.global _soc_core_prep_off
31*91f16700Schasinglulu.global _soc_core_entr_off
32*91f16700Schasinglulu.global _soc_core_exit_off
33*91f16700Schasinglulu.global _soc_sys_reset
34*91f16700Schasinglulu.global _soc_sys_off
35*91f16700Schasinglulu.global _soc_core_prep_stdby
36*91f16700Schasinglulu.global _soc_core_entr_stdby
37*91f16700Schasinglulu.global _soc_core_exit_stdby
38*91f16700Schasinglulu.global _soc_core_prep_pwrdn
39*91f16700Schasinglulu.global _soc_core_entr_pwrdn
40*91f16700Schasinglulu.global _soc_core_exit_pwrdn
41*91f16700Schasinglulu.global _soc_clstr_prep_stdby
42*91f16700Schasinglulu.global _soc_clstr_exit_stdby
43*91f16700Schasinglulu.global _soc_clstr_prep_pwrdn
44*91f16700Schasinglulu.global _soc_clstr_exit_pwrdn
45*91f16700Schasinglulu.global _soc_sys_prep_stdby
46*91f16700Schasinglulu.global _soc_sys_exit_stdby
47*91f16700Schasinglulu.global _soc_sys_prep_pwrdn
48*91f16700Schasinglulu.global _soc_sys_pwrdn_wfi
49*91f16700Schasinglulu.global _soc_sys_exit_pwrdn
50*91f16700Schasinglulu
51*91f16700Schasinglulu.equ TZPC_BASE,			  0x02200000
52*91f16700Schasinglulu.equ TZPCDECPROT_0_SET_BASE, 0x02200804
53*91f16700Schasinglulu.equ TZPCDECPROT_1_SET_BASE, 0x02200810
54*91f16700Schasinglulu.equ TZPCDECPROT_2_SET_BASE, 0x0220081C
55*91f16700Schasinglulu
56*91f16700Schasinglulu#define CLUSTER_3_CORES_MASK 0xC0
57*91f16700Schasinglulu#define CLUSTER_3_IN_RESET  1
58*91f16700Schasinglulu#define CLUSTER_3_NORMAL	0
59*91f16700Schasinglulu
60*91f16700Schasinglulu/* cluster 3 handling no longer based on frequency, but rather on RCW[850],
61*91f16700Schasinglulu * which is bit 18 of RCWSR27
62*91f16700Schasinglulu */
63*91f16700Schasinglulu#define CLUSTER_3_RCW_BIT  0x40000
64*91f16700Schasinglulu
65*91f16700Schasinglulu/* retry count for clock-stop acks */
66*91f16700Schasinglulu.equ CLOCK_RETRY_CNT,  800
67*91f16700Schasinglulu
68*91f16700Schasinglulu/* disable prefetching in the A72 core */
69*91f16700Schasinglulu#define  CPUACTLR_DIS_LS_HW_PRE	0x100000000000000
70*91f16700Schasinglulu#define  CPUACTLR_DIS_L2_TLB_PRE   0x200000
71*91f16700Schasinglulu
72*91f16700Schasinglulu/* Function starts the initialization tasks of the soc,
73*91f16700Schasinglulu * using secondary cores if they are available
74*91f16700Schasinglulu *
75*91f16700Schasinglulu * Called from C, saving the non-volatile regs
76*91f16700Schasinglulu * save these as pairs of registers to maintain the
77*91f16700Schasinglulu * required 16-byte alignment on the stack
78*91f16700Schasinglulu *
79*91f16700Schasinglulu * in:
80*91f16700Schasinglulu * out:
81*91f16700Schasinglulu * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11
82*91f16700Schasinglulu */
83*91f16700Schasinglulufunc soc_init_start
84*91f16700Schasinglulu	stp  x4,  x5,  [sp, #-16]!
85*91f16700Schasinglulu	stp  x6,  x7,  [sp, #-16]!
86*91f16700Schasinglulu	stp  x8,  x9,  [sp, #-16]!
87*91f16700Schasinglulu	stp  x10, x11, [sp, #-16]!
88*91f16700Schasinglulu	stp  x12, x13, [sp, #-16]!
89*91f16700Schasinglulu	stp  x18, x30, [sp, #-16]!
90*91f16700Schasinglulu
91*91f16700Schasinglulu	/* make sure the personality has been
92*91f16700Schasinglulu	 * established by releasing cores that
93*91f16700Schasinglulu	 * are marked "to-be-disabled" from reset
94*91f16700Schasinglulu	 */
95*91f16700Schasinglulu	bl  release_disabled  		/* 0-9 */
96*91f16700Schasinglulu
97*91f16700Schasinglulu	/* init the task flags */
98*91f16700Schasinglulu	bl  _init_task_flags   		/* 0-1 */
99*91f16700Schasinglulu
100*91f16700Schasinglulu	/* set SCRATCHRW7 to 0x0 */
101*91f16700Schasinglulu	ldr  x0, =DCFG_SCRATCHRW7_OFFSET
102*91f16700Schasinglulu	mov  x1, xzr
103*91f16700Schasinglulu	bl   _write_reg_dcfg
104*91f16700Schasinglulu
105*91f16700Schasinglulu1:
106*91f16700Schasinglulu	/* restore the aarch32/64 non-volatile registers */
107*91f16700Schasinglulu	ldp  x18, x30, [sp], #16
108*91f16700Schasinglulu	ldp  x12, x13, [sp], #16
109*91f16700Schasinglulu	ldp  x10, x11, [sp], #16
110*91f16700Schasinglulu	ldp  x8,  x9,  [sp], #16
111*91f16700Schasinglulu	ldp  x6,  x7,  [sp], #16
112*91f16700Schasinglulu	ldp  x4,  x5,  [sp], #16
113*91f16700Schasinglulu	ret
114*91f16700Schasingluluendfunc soc_init_start
115*91f16700Schasinglulu
116*91f16700Schasinglulu
117*91f16700Schasinglulu/* Function performs any soc-specific initialization that is needed on
118*91f16700Schasinglulu * a per-core basis.
119*91f16700Schasinglulu * in:  none
120*91f16700Schasinglulu * out: none
121*91f16700Schasinglulu * uses x0, x1, x2, x3
122*91f16700Schasinglulu */
123*91f16700Schasinglulufunc soc_init_percpu
124*91f16700Schasinglulu	stp  x4,  x30,  [sp, #-16]!
125*91f16700Schasinglulu
126*91f16700Schasinglulu	bl   plat_my_core_mask
127*91f16700Schasinglulu	mov  x2, x0				/* x2 = core mask */
128*91f16700Schasinglulu
129*91f16700Schasinglulu	/* Check if this core is marked for prefetch disable
130*91f16700Schasinglulu	 */
131*91f16700Schasinglulu	mov   x0, #PREFETCH_DIS_OFFSET
132*91f16700Schasinglulu	bl	_get_global_data		/* 0-1 */
133*91f16700Schasinglulu	tst   x0, x2
134*91f16700Schasinglulu	b.eq  1f
135*91f16700Schasinglulu	bl	_disable_ldstr_pfetch_A72	/* 0 */
136*91f16700Schasinglulu1:
137*91f16700Schasinglulu	mov  x0, #NXP_PMU_ADDR
138*91f16700Schasinglulu	bl enable_timer_base_to_cluster
139*91f16700Schasinglulu	ldp  x4,  x30,  [sp], #16
140*91f16700Schasinglulu	ret
141*91f16700Schasingluluendfunc soc_init_percpu
142*91f16700Schasinglulu
143*91f16700Schasinglulu
144*91f16700Schasinglulu/* Function completes the initialization tasks of the soc
145*91f16700Schasinglulu * in:
146*91f16700Schasinglulu * out:
147*91f16700Schasinglulu * uses x0, x1, x2, x3, x4
148*91f16700Schasinglulu */
149*91f16700Schasinglulufunc soc_init_finish
150*91f16700Schasinglulu	stp  x4,  x30,  [sp, #-16]!
151*91f16700Schasinglulu
152*91f16700Schasinglulu	ldp   x4,  x30,  [sp], #16
153*91f16700Schasinglulu	ret
154*91f16700Schasingluluendfunc soc_init_finish
155*91f16700Schasinglulu
156*91f16700Schasinglulu
157*91f16700Schasinglulu/* Function sets the security mechanisms in the SoC to implement the
158*91f16700Schasinglulu * Platform Security Policy
159*91f16700Schasinglulu */
160*91f16700Schasinglulufunc _set_platform_security
161*91f16700Schasinglulu	mov  x8, x30
162*91f16700Schasinglulu
163*91f16700Schasinglulu#if (!SUPPRESS_TZC)
164*91f16700Schasinglulu	/* initialize the tzpc */
165*91f16700Schasinglulu	bl   init_tzpc
166*91f16700Schasinglulu#endif
167*91f16700Schasinglulu
168*91f16700Schasinglulu#if (!SUPPRESS_SEC)
169*91f16700Schasinglulu	/* initialize secmon */
170*91f16700Schasinglulu#ifdef NXP_SNVS_ENABLED
171*91f16700Schasinglulu	mov x0, #NXP_SNVS_ADDR
172*91f16700Schasinglulu	bl  init_sec_mon
173*91f16700Schasinglulu#endif
174*91f16700Schasinglulu#endif
175*91f16700Schasinglulu
176*91f16700Schasinglulu	mov  x30, x8
177*91f16700Schasinglulu	ret
178*91f16700Schasingluluendfunc _set_platform_security
179*91f16700Schasinglulu
180*91f16700Schasinglulu
181*91f16700Schasinglulu/* Function writes a 64-bit address to bootlocptrh/l
182*91f16700Schasinglulu * in:  x0, 64-bit address to write to BOOTLOCPTRL/H
183*91f16700Schasinglulu * uses x0, x1, x2
184*91f16700Schasinglulu */
185*91f16700Schasinglulufunc _soc_set_start_addr
186*91f16700Schasinglulu	/* Get the 64-bit base address of the dcfg block */
187*91f16700Schasinglulu	ldr  x2, =NXP_DCFG_ADDR
188*91f16700Schasinglulu
189*91f16700Schasinglulu	/* write the 32-bit BOOTLOCPTRL register */
190*91f16700Schasinglulu	mov  x1, x0
191*91f16700Schasinglulu	str  w1, [x2, #DCFG_BOOTLOCPTRL_OFFSET]
192*91f16700Schasinglulu
193*91f16700Schasinglulu	/* write the 32-bit BOOTLOCPTRH register */
194*91f16700Schasinglulu	lsr  x1, x0, #32
195*91f16700Schasinglulu	str  w1, [x2, #DCFG_BOOTLOCPTRH_OFFSET]
196*91f16700Schasinglulu	ret
197*91f16700Schasingluluendfunc _soc_set_start_addr
198*91f16700Schasinglulu
199*91f16700Schasinglulu/* Function releases a secondary core from reset
200*91f16700Schasinglulu * in:   x0 = core_mask_lsb
201*91f16700Schasinglulu * out:  none
202*91f16700Schasinglulu * uses: x0, x1, x2, x3
203*91f16700Schasinglulu */
204*91f16700Schasinglulufunc _soc_core_release
205*91f16700Schasinglulu	mov   x3, x30
206*91f16700Schasinglulu
207*91f16700Schasinglulu	ldr  x1, =NXP_SEC_REGFILE_ADDR
208*91f16700Schasinglulu	/* write to CORE_HOLD to tell
209*91f16700Schasinglulu	 * the bootrom that this core is
210*91f16700Schasinglulu	 * expected to run.
211*91f16700Schasinglulu	 */
212*91f16700Schasinglulu	str  w0, [x1, #CORE_HOLD_OFFSET]
213*91f16700Schasinglulu
214*91f16700Schasinglulu	/* read-modify-write BRRL to release core */
215*91f16700Schasinglulu	mov  x1, #NXP_RESET_ADDR
216*91f16700Schasinglulu	ldr  w2, [x1, #BRR_OFFSET]
217*91f16700Schasinglulu
218*91f16700Schasinglulu	/* x0 = core mask */
219*91f16700Schasinglulu	orr  w2, w2, w0
220*91f16700Schasinglulu	str  w2, [x1, #BRR_OFFSET]
221*91f16700Schasinglulu	dsb  sy
222*91f16700Schasinglulu	isb
223*91f16700Schasinglulu
224*91f16700Schasinglulu	/* send event */
225*91f16700Schasinglulu	sev
226*91f16700Schasinglulu	isb
227*91f16700Schasinglulu
228*91f16700Schasinglulu	mov   x30, x3
229*91f16700Schasinglulu	ret
230*91f16700Schasingluluendfunc _soc_core_release
231*91f16700Schasinglulu
232*91f16700Schasinglulu
233*91f16700Schasinglulu/* Function determines if a core is disabled via COREDISABLEDSR
234*91f16700Schasinglulu * in:  w0  = core_mask_lsb
235*91f16700Schasinglulu * out: w0  = 0, core not disabled
236*91f16700Schasinglulu *	  w0 != 0, core disabled
237*91f16700Schasinglulu * uses x0, x1
238*91f16700Schasinglulu */
239*91f16700Schasinglulufunc _soc_ck_disabled
240*91f16700Schasinglulu
241*91f16700Schasinglulu	/* get base addr of dcfg block */
242*91f16700Schasinglulu	ldr  x1, =NXP_DCFG_ADDR
243*91f16700Schasinglulu
244*91f16700Schasinglulu	/* read COREDISABLEDSR */
245*91f16700Schasinglulu	ldr  w1, [x1, #DCFG_COREDISABLEDSR_OFFSET]
246*91f16700Schasinglulu
247*91f16700Schasinglulu	/* test core bit */
248*91f16700Schasinglulu	and  w0, w1, w0
249*91f16700Schasinglulu
250*91f16700Schasinglulu	ret
251*91f16700Schasingluluendfunc _soc_ck_disabled
252*91f16700Schasinglulu
253*91f16700Schasinglulu
254*91f16700Schasinglulu/* Part of CPU_ON
255*91f16700Schasinglulu * Function restarts a core shutdown via _soc_core_entr_off
256*91f16700Schasinglulu * in:  x0 = core mask lsb (of the target cpu)
257*91f16700Schasinglulu * out: x0 == 0, on success
258*91f16700Schasinglulu *	  x0 != 0, on failure
259*91f16700Schasinglulu * uses x0, x1, x2, x3, x4, x5, x6
260*91f16700Schasinglulu */
261*91f16700Schasinglulufunc _soc_core_restart
262*91f16700Schasinglulu	mov  x6, x30
263*91f16700Schasinglulu	mov  x4, x0
264*91f16700Schasinglulu
265*91f16700Schasinglulu	/* pgm GICD_CTLR - enable secure grp0  */
266*91f16700Schasinglulu	mov  x5, #NXP_GICD_ADDR
267*91f16700Schasinglulu	ldr  w2, [x5, #GICD_CTLR_OFFSET]
268*91f16700Schasinglulu	orr  w2, w2, #GICD_CTLR_EN_GRP_0
269*91f16700Schasinglulu	str  w2, [x5, #GICD_CTLR_OFFSET]
270*91f16700Schasinglulu	dsb sy
271*91f16700Schasinglulu	isb
272*91f16700Schasinglulu
273*91f16700Schasinglulu	/* poll on RWP til write completes */
274*91f16700Schasinglulu4:
275*91f16700Schasinglulu	ldr  w2, [x5, #GICD_CTLR_OFFSET]
276*91f16700Schasinglulu	tst  w2, #GICD_CTLR_RWP
277*91f16700Schasinglulu	b.ne 4b
278*91f16700Schasinglulu
279*91f16700Schasinglulu	/* x4 = core mask lsb
280*91f16700Schasinglulu	* x5 = gicd base addr
281*91f16700Schasinglulu	*/
282*91f16700Schasinglulu	mov  x0, x4
283*91f16700Schasinglulu	bl   get_mpidr_value
284*91f16700Schasinglulu
285*91f16700Schasinglulu	/* x0 = mpidr of target core
286*91f16700Schasinglulu	* x4 = core mask lsb of target core
287*91f16700Schasinglulu	* x5 = gicd base addr
288*91f16700Schasinglulu	*/
289*91f16700Schasinglulu
290*91f16700Schasinglulu	/* generate target list bit */
291*91f16700Schasinglulu	and  x1, x0, #MPIDR_AFFINITY0_MASK
292*91f16700Schasinglulu	mov  x2, #1
293*91f16700Schasinglulu	lsl  x2, x2, x1
294*91f16700Schasinglulu
295*91f16700Schasinglulu	/* get the affinity1 field */
296*91f16700Schasinglulu	and  x1, x0, #MPIDR_AFFINITY1_MASK
297*91f16700Schasinglulu	lsl  x1, x1, #8
298*91f16700Schasinglulu	orr  x2, x2, x1
299*91f16700Schasinglulu
300*91f16700Schasinglulu	/* insert the INTID for SGI15 */
301*91f16700Schasinglulu	orr  x2, x2, #ICC_SGI0R_EL1_INTID
302*91f16700Schasinglulu
303*91f16700Schasinglulu	/* fire the SGI */
304*91f16700Schasinglulu	msr  ICC_SGI0R_EL1, x2
305*91f16700Schasinglulu	dsb  sy
306*91f16700Schasinglulu	isb
307*91f16700Schasinglulu
308*91f16700Schasinglulu	/* load '0' on success */
309*91f16700Schasinglulu	mov  x0, xzr
310*91f16700Schasinglulu
311*91f16700Schasinglulu	mov  x30, x6
312*91f16700Schasinglulu	ret
313*91f16700Schasingluluendfunc _soc_core_restart
314*91f16700Schasinglulu
315*91f16700Schasinglulu
316*91f16700Schasinglulu/* Part of CPU_OFF
317*91f16700Schasinglulu * Function programs SoC & GIC registers in preparation for shutting down
318*91f16700Schasinglulu * the core
319*91f16700Schasinglulu * in:  x0 = core mask lsb
320*91f16700Schasinglulu * out: none
321*91f16700Schasinglulu * uses x0, x1, x2, x3, x4, x5, x6, x7
322*91f16700Schasinglulu */
323*91f16700Schasinglulufunc _soc_core_prep_off
324*91f16700Schasinglulu	mov  x8, x30
325*91f16700Schasinglulu	mov  x7, x0		/* x7 = core mask lsb */
326*91f16700Schasinglulu
327*91f16700Schasinglulu	mrs  x1, CORTEX_A72_ECTLR_EL1
328*91f16700Schasinglulu
329*91f16700Schasinglulu	/* set smp and disable L2 snoops in cpuectlr */
330*91f16700Schasinglulu	orr  x1, x1, #CPUECTLR_SMPEN_EN
331*91f16700Schasinglulu	orr  x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH
332*91f16700Schasinglulu	bic  x1, x1, #CPUECTLR_INS_PREFETCH_MASK
333*91f16700Schasinglulu	bic  x1, x1, #CPUECTLR_DAT_PREFETCH_MASK
334*91f16700Schasinglulu
335*91f16700Schasinglulu	/* set retention control in cpuectlr */
336*91f16700Schasinglulu	bic  x1, x1, #CPUECTLR_TIMER_MASK
337*91f16700Schasinglulu	orr  x1, x1, #CPUECTLR_TIMER_8TICKS
338*91f16700Schasinglulu	msr  CORTEX_A72_ECTLR_EL1, x1
339*91f16700Schasinglulu
340*91f16700Schasinglulu	/* get redistributor rd base addr for this core */
341*91f16700Schasinglulu	mov  x0, x7
342*91f16700Schasinglulu	bl   get_gic_rd_base
343*91f16700Schasinglulu	mov  x6, x0
344*91f16700Schasinglulu
345*91f16700Schasinglulu	/* get redistributor sgi base addr for this core */
346*91f16700Schasinglulu	mov  x0, x7
347*91f16700Schasinglulu	bl   get_gic_sgi_base
348*91f16700Schasinglulu	mov  x5, x0
349*91f16700Schasinglulu
350*91f16700Schasinglulu	/* x5 = gicr sgi base addr
351*91f16700Schasinglulu 	 * x6 = gicr rd  base addr
352*91f16700Schasinglulu	 * x7 = core mask lsb
353*91f16700Schasinglulu	 */
354*91f16700Schasinglulu
355*91f16700Schasinglulu	/* disable SGI 15 at redistributor - GICR_ICENABLER0 */
356*91f16700Schasinglulu	mov  w3, #GICR_ICENABLER0_SGI15
357*91f16700Schasinglulu	str  w3, [x5, #GICR_ICENABLER0_OFFSET]
358*91f16700Schasinglulu2:
359*91f16700Schasinglulu	/* poll on rwp bit in GICR_CTLR */
360*91f16700Schasinglulu	ldr  w4, [x6, #GICR_CTLR_OFFSET]
361*91f16700Schasinglulu	tst  w4, #GICR_CTLR_RWP
362*91f16700Schasinglulu	b.ne 2b
363*91f16700Schasinglulu
364*91f16700Schasinglulu	/* disable GRP1 interrupts at cpu interface */
365*91f16700Schasinglulu	msr  ICC_IGRPEN1_EL3, xzr
366*91f16700Schasinglulu
367*91f16700Schasinglulu	/* disable GRP0 ints at cpu interface */
368*91f16700Schasinglulu	msr  ICC_IGRPEN0_EL1, xzr
369*91f16700Schasinglulu
370*91f16700Schasinglulu	/* program the redistributor - poll on GICR_CTLR.RWP as needed */
371*91f16700Schasinglulu
372*91f16700Schasinglulu	/* define SGI 15 as Grp0 - GICR_IGROUPR0 */
373*91f16700Schasinglulu	ldr  w4, [x5, #GICR_IGROUPR0_OFFSET]
374*91f16700Schasinglulu	bic  w4, w4, #GICR_IGROUPR0_SGI15
375*91f16700Schasinglulu	str  w4, [x5, #GICR_IGROUPR0_OFFSET]
376*91f16700Schasinglulu
377*91f16700Schasinglulu	/* define SGI 15 as Grp0 - GICR_IGRPMODR0 */
378*91f16700Schasinglulu	ldr  w3, [x5, #GICR_IGRPMODR0_OFFSET]
379*91f16700Schasinglulu	bic  w3, w3, #GICR_IGRPMODR0_SGI15
380*91f16700Schasinglulu	str  w3, [x5, #GICR_IGRPMODR0_OFFSET]
381*91f16700Schasinglulu
382*91f16700Schasinglulu	/* set priority of SGI 15 to highest (0x0) - GICR_IPRIORITYR3 */
383*91f16700Schasinglulu	ldr  w4, [x5, #GICR_IPRIORITYR3_OFFSET]
384*91f16700Schasinglulu	bic  w4, w4, #GICR_IPRIORITYR3_SGI15_MASK
385*91f16700Schasinglulu	str  w4, [x5, #GICR_IPRIORITYR3_OFFSET]
386*91f16700Schasinglulu
387*91f16700Schasinglulu	/* enable SGI 15 at redistributor - GICR_ISENABLER0 */
388*91f16700Schasinglulu	mov  w3, #GICR_ISENABLER0_SGI15
389*91f16700Schasinglulu	str  w3, [x5, #GICR_ISENABLER0_OFFSET]
390*91f16700Schasinglulu	dsb  sy
391*91f16700Schasinglulu	isb
392*91f16700Schasinglulu3:
393*91f16700Schasinglulu	/* poll on rwp bit in GICR_CTLR */
394*91f16700Schasinglulu	ldr  w4, [x6, #GICR_CTLR_OFFSET]
395*91f16700Schasinglulu	tst  w4, #GICR_CTLR_RWP
396*91f16700Schasinglulu	b.ne 3b
397*91f16700Schasinglulu
398*91f16700Schasinglulu	/* quiesce the debug interfaces */
399*91f16700Schasinglulu	mrs  x3, osdlr_el1
400*91f16700Schasinglulu	orr  x3, x3, #OSDLR_EL1_DLK_LOCK
401*91f16700Schasinglulu	msr  osdlr_el1, x3
402*91f16700Schasinglulu	isb
403*91f16700Schasinglulu
404*91f16700Schasinglulu	/* enable grp0 ints */
405*91f16700Schasinglulu	mov  x3, #ICC_IGRPEN0_EL1_EN
406*91f16700Schasinglulu	msr  ICC_IGRPEN0_EL1, x3
407*91f16700Schasinglulu
408*91f16700Schasinglulu	/* x5 = gicr sgi base addr
409*91f16700Schasinglulu	 * x6 = gicr rd  base addr
410*91f16700Schasinglulu	 * x7 = core mask lsb
411*91f16700Schasinglulu	 */
412*91f16700Schasinglulu
413*91f16700Schasinglulu	/* clear any pending interrupts */
414*91f16700Schasinglulu	mvn  w1, wzr
415*91f16700Schasinglulu	str  w1, [x5, #GICR_ICPENDR0_OFFSET]
416*91f16700Schasinglulu
417*91f16700Schasinglulu	/* make sure system counter is enabled */
418*91f16700Schasinglulu	ldr  x3, =NXP_TIMER_ADDR
419*91f16700Schasinglulu	ldr  w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
420*91f16700Schasinglulu	tst  w0, #SYS_COUNTER_CNTCR_EN
421*91f16700Schasinglulu	b.ne 4f
422*91f16700Schasinglulu	orr  w0, w0, #SYS_COUNTER_CNTCR_EN
423*91f16700Schasinglulu	str  w0, [x3, #SYS_COUNTER_CNTCR_OFFSET]
424*91f16700Schasinglulu4:
425*91f16700Schasinglulu	/* enable the core timer and mask timer interrupt */
426*91f16700Schasinglulu	mov  x1, #CNTP_CTL_EL0_EN
427*91f16700Schasinglulu	orr  x1, x1, #CNTP_CTL_EL0_IMASK
428*91f16700Schasinglulu	msr  cntp_ctl_el0, x1
429*91f16700Schasinglulu
430*91f16700Schasinglulu	isb
431*91f16700Schasinglulu	mov  x30, x8
432*91f16700Schasinglulu	ret
433*91f16700Schasingluluendfunc _soc_core_prep_off
434*91f16700Schasinglulu
435*91f16700Schasinglulu
436*91f16700Schasinglulu/* Part of CPU_OFF:
437*91f16700Schasinglulu * Function performs the final steps to shutdown the core
438*91f16700Schasinglulu * in:  x0 = core mask lsb
439*91f16700Schasinglulu * out: none
440*91f16700Schasinglulu * uses x0, x1, x2, x3, x4, x5
441*91f16700Schasinglulu */
442*91f16700Schasinglulufunc _soc_core_entr_off
443*91f16700Schasinglulu	mov  x5, x30
444*91f16700Schasinglulu	mov  x4, x0
445*91f16700Schasinglulu
446*91f16700Schasinglulu1:
447*91f16700Schasinglulu	/* enter low-power state by executing wfi */
448*91f16700Schasinglulu	wfi
449*91f16700Schasinglulu
450*91f16700Schasinglulu	/* see if SGI15 woke us up */
451*91f16700Schasinglulu	mrs  x2, ICC_IAR0_EL1
452*91f16700Schasinglulu	mov  x3, #ICC_IAR0_EL1_SGI15
453*91f16700Schasinglulu	cmp  x2, x3
454*91f16700Schasinglulu	b.ne 2f
455*91f16700Schasinglulu
456*91f16700Schasinglulu	/* deactivate the intrrupts. */
457*91f16700Schasinglulu	msr ICC_EOIR0_EL1, x2
458*91f16700Schasinglulu
459*91f16700Schasinglulu2:
460*91f16700Schasinglulu	/* check if core is turned ON */
461*91f16700Schasinglulu	mov  x0, x4
462*91f16700Schasinglulu	/* Fetched the core state in x0 */
463*91f16700Schasinglulu	bl   _getCoreState
464*91f16700Schasinglulu
465*91f16700Schasinglulu	cmp  x0, #CORE_WAKEUP
466*91f16700Schasinglulu	b.ne 1b
467*91f16700Schasinglulu
468*91f16700Schasinglulu	/* Reached here, exited the wfi */
469*91f16700Schasinglulu
470*91f16700Schasinglulu	mov  x30, x5
471*91f16700Schasinglulu	ret
472*91f16700Schasingluluendfunc _soc_core_entr_off
473*91f16700Schasinglulu
474*91f16700Schasinglulu
475*91f16700Schasinglulu/* Part of CPU_OFF:
476*91f16700Schasinglulu * Function starts the process of starting a core back up
477*91f16700Schasinglulu * in:  x0 = core mask lsb
478*91f16700Schasinglulu * out: none
479*91f16700Schasinglulu * uses x0, x1, x2, x3, x4, x5, x6
480*91f16700Schasinglulu */
481*91f16700Schasinglulufunc _soc_core_exit_off
482*91f16700Schasinglulu	mov  x6, x30
483*91f16700Schasinglulu	mov  x5, x0
484*91f16700Schasinglulu
485*91f16700Schasinglulu	/* disable forwarding of GRP0 ints at cpu interface */
486*91f16700Schasinglulu	msr  ICC_IGRPEN0_EL1, xzr
487*91f16700Schasinglulu
488*91f16700Schasinglulu	/* get redistributor sgi base addr for this core */
489*91f16700Schasinglulu	mov  x0, x5
490*91f16700Schasinglulu	bl   get_gic_sgi_base
491*91f16700Schasinglulu	mov  x4, x0
492*91f16700Schasinglulu
493*91f16700Schasinglulu	/* x4 = gicr sgi base addr
494*91f16700Schasinglulu	 * x5 = core mask
495*91f16700Schasinglulu	 */
496*91f16700Schasinglulu
497*91f16700Schasinglulu	/* disable SGI 15 at redistributor - GICR_ICENABLER0 */
498*91f16700Schasinglulu	mov  w1, #GICR_ICENABLER0_SGI15
499*91f16700Schasinglulu	str  w1, [x4, #GICR_ICENABLER0_OFFSET]
500*91f16700Schasinglulu
501*91f16700Schasinglulu	/* get redistributor rd base addr for this core */
502*91f16700Schasinglulu	mov  x0, x5
503*91f16700Schasinglulu	bl   get_gic_rd_base
504*91f16700Schasinglulu	mov  x4, x0
505*91f16700Schasinglulu
506*91f16700Schasinglulu2:
507*91f16700Schasinglulu	/* poll on rwp bit in GICR_CTLR */
508*91f16700Schasinglulu	ldr  w2, [x4, #GICR_CTLR_OFFSET]
509*91f16700Schasinglulu	tst  w2, #GICR_CTLR_RWP
510*91f16700Schasinglulu	b.ne 2b
511*91f16700Schasinglulu
512*91f16700Schasinglulu	/* unlock the debug interfaces */
513*91f16700Schasinglulu	mrs  x3, osdlr_el1
514*91f16700Schasinglulu	bic  x3, x3, #OSDLR_EL1_DLK_LOCK
515*91f16700Schasinglulu	msr  osdlr_el1, x3
516*91f16700Schasinglulu	isb
517*91f16700Schasinglulu
518*91f16700Schasinglulu	dsb sy
519*91f16700Schasinglulu	isb
520*91f16700Schasinglulu	mov  x30, x6
521*91f16700Schasinglulu	ret
522*91f16700Schasingluluendfunc _soc_core_exit_off
523*91f16700Schasinglulu
524*91f16700Schasinglulu
525*91f16700Schasinglulu/* Function requests a reset of the entire SOC
526*91f16700Schasinglulu * in:  none
527*91f16700Schasinglulu * out: none
528*91f16700Schasinglulu * uses: x0, x1, x2, x3, x4, x5, x6
529*91f16700Schasinglulu */
530*91f16700Schasinglulufunc _soc_sys_reset
531*91f16700Schasinglulu	mov  x6, x30
532*91f16700Schasinglulu
533*91f16700Schasinglulu	ldr  x2, =NXP_RST_ADDR
534*91f16700Schasinglulu	/* clear the RST_REQ_MSK and SW_RST_REQ */
535*91f16700Schasinglulu
536*91f16700Schasinglulu	mov  w0, #0x00000000
537*91f16700Schasinglulu	str  w0, [x2, #RSTCNTL_OFFSET]
538*91f16700Schasinglulu
539*91f16700Schasinglulu	/* initiate the sw reset request */
540*91f16700Schasinglulu	mov  w0, #SW_RST_REQ_INIT
541*91f16700Schasinglulu	str  w0, [x2, #RSTCNTL_OFFSET]
542*91f16700Schasinglulu
543*91f16700Schasinglulu	/* In case this address range is mapped as cacheable,
544*91f16700Schasinglulu	 * flush the write out of the dcaches.
545*91f16700Schasinglulu	 */
546*91f16700Schasinglulu	add  x2, x2, #RSTCNTL_OFFSET
547*91f16700Schasinglulu	dc   cvac, x2
548*91f16700Schasinglulu	dsb  st
549*91f16700Schasinglulu	isb
550*91f16700Schasinglulu
551*91f16700Schasinglulu	/* Function does not return */
552*91f16700Schasinglulu	b  .
553*91f16700Schasingluluendfunc _soc_sys_reset
554*91f16700Schasinglulu
555*91f16700Schasinglulu
556*91f16700Schasinglulu/* Part of SYSTEM_OFF:
557*91f16700Schasinglulu * Function turns off the SoC clocks
558*91f16700Schasinglulu * Note: Function is not intended to return, and the only allowable
559*91f16700Schasinglulu *	   recovery is POR
560*91f16700Schasinglulu * in:  none
561*91f16700Schasinglulu * out: none
562*91f16700Schasinglulu * uses x0, x1, x2, x3
563*91f16700Schasinglulu */
564*91f16700Schasinglulufunc _soc_sys_off
565*91f16700Schasinglulu
566*91f16700Schasinglulu	/* disable sec, QBman, spi and qspi */
567*91f16700Schasinglulu	ldr  x2, =NXP_DCFG_ADDR
568*91f16700Schasinglulu	ldr  x0, =DCFG_DEVDISR1_OFFSET
569*91f16700Schasinglulu	ldr  w1, =DCFG_DEVDISR1_SEC
570*91f16700Schasinglulu	str  w1, [x2, x0]
571*91f16700Schasinglulu	ldr  x0, =DCFG_DEVDISR3_OFFSET
572*91f16700Schasinglulu	ldr  w1, =DCFG_DEVDISR3_QBMAIN
573*91f16700Schasinglulu	str  w1, [x2, x0]
574*91f16700Schasinglulu	ldr  x0, =DCFG_DEVDISR4_OFFSET
575*91f16700Schasinglulu	ldr  w1, =DCFG_DEVDISR4_SPI_QSPI
576*91f16700Schasinglulu	str  w1, [x2, x0]
577*91f16700Schasinglulu
578*91f16700Schasinglulu	/* set TPMWAKEMR0 */
579*91f16700Schasinglulu	ldr  x0, =TPMWAKEMR0_ADDR
580*91f16700Schasinglulu	mov  w1, #0x1
581*91f16700Schasinglulu	str  w1, [x0]
582*91f16700Schasinglulu
583*91f16700Schasinglulu	/* disable icache, dcache, mmu @ EL1 */
584*91f16700Schasinglulu	mov  x1, #SCTLR_I_C_M_MASK
585*91f16700Schasinglulu	mrs  x0, sctlr_el1
586*91f16700Schasinglulu	bic  x0, x0, x1
587*91f16700Schasinglulu	msr  sctlr_el1, x0
588*91f16700Schasinglulu
589*91f16700Schasinglulu	/* disable L2 prefetches */
590*91f16700Schasinglulu	mrs  x0, CORTEX_A72_ECTLR_EL1
591*91f16700Schasinglulu	bic  x1, x1, #CPUECTLR_TIMER_MASK
592*91f16700Schasinglulu	orr  x0, x0, #CPUECTLR_SMPEN_EN
593*91f16700Schasinglulu	orr  x0, x0, #CPUECTLR_TIMER_8TICKS
594*91f16700Schasinglulu	msr  CORTEX_A72_ECTLR_EL1, x0
595*91f16700Schasinglulu	isb
596*91f16700Schasinglulu
597*91f16700Schasinglulu	/* disable CCN snoop domain */
598*91f16700Schasinglulu	mov  x1, #NXP_CCN_HN_F_0_ADDR
599*91f16700Schasinglulu	ldr  x0, =CCN_HN_F_SNP_DMN_CTL_MASK
600*91f16700Schasinglulu	str  x0, [x1, #CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET]
601*91f16700Schasinglulu3:
602*91f16700Schasinglulu	ldr  w2, [x1, #CCN_HN_F_SNP_DMN_CTL_OFFSET]
603*91f16700Schasinglulu	cmp  w2, #0x2
604*91f16700Schasinglulu	b.ne 3b
605*91f16700Schasinglulu
606*91f16700Schasinglulu	mov  x3, #NXP_PMU_ADDR
607*91f16700Schasinglulu
608*91f16700Schasinglulu4:
609*91f16700Schasinglulu	ldr  w1, [x3, #PMU_PCPW20SR_OFFSET]
610*91f16700Schasinglulu	cmp  w1, #PMU_IDLE_CORE_MASK
611*91f16700Schasinglulu	b.ne 4b
612*91f16700Schasinglulu
613*91f16700Schasinglulu	mov  w1, #PMU_IDLE_CLUSTER_MASK
614*91f16700Schasinglulu	str  w1, [x3, #PMU_CLAINACTSETR_OFFSET]
615*91f16700Schasinglulu
616*91f16700Schasinglulu1:
617*91f16700Schasinglulu	ldr  w1, [x3, #PMU_PCPW20SR_OFFSET]
618*91f16700Schasinglulu	cmp  w1, #PMU_IDLE_CORE_MASK
619*91f16700Schasinglulu	b.ne 1b
620*91f16700Schasinglulu
621*91f16700Schasinglulu	mov  w1, #PMU_FLUSH_CLUSTER_MASK
622*91f16700Schasinglulu	str  w1, [x3, #PMU_CLL2FLUSHSETR_OFFSET]
623*91f16700Schasinglulu
624*91f16700Schasinglulu2:
625*91f16700Schasinglulu	ldr  w1, [x3, #PMU_CLL2FLUSHSR_OFFSET]
626*91f16700Schasinglulu	cmp  w1, #PMU_FLUSH_CLUSTER_MASK
627*91f16700Schasinglulu	b.ne 2b
628*91f16700Schasinglulu
629*91f16700Schasinglulu	mov  w1, #PMU_FLUSH_CLUSTER_MASK
630*91f16700Schasinglulu	str  w1, [x3, #PMU_CLSL2FLUSHCLRR_OFFSET]
631*91f16700Schasinglulu
632*91f16700Schasinglulu	mov  w1, #PMU_FLUSH_CLUSTER_MASK
633*91f16700Schasinglulu	str  w1, [x3, #PMU_CLSINACTSETR_OFFSET]
634*91f16700Schasinglulu
635*91f16700Schasinglulu	mov  x2, #DAIF_SET_MASK
636*91f16700Schasinglulu	mrs  x1, spsr_el1
637*91f16700Schasinglulu	orr  x1, x1, x2
638*91f16700Schasinglulu	msr  spsr_el1, x1
639*91f16700Schasinglulu
640*91f16700Schasinglulu	mrs  x1, spsr_el2
641*91f16700Schasinglulu	orr  x1, x1, x2
642*91f16700Schasinglulu	msr  spsr_el2, x1
643*91f16700Schasinglulu
644*91f16700Schasinglulu	/* force the debug interface to be quiescent */
645*91f16700Schasinglulu	mrs  x0, osdlr_el1
646*91f16700Schasinglulu	orr  x0, x0, #0x1
647*91f16700Schasinglulu	msr  osdlr_el1, x0
648*91f16700Schasinglulu
649*91f16700Schasinglulu	/* invalidate all TLB entries at all 3 exception levels */
650*91f16700Schasinglulu	tlbi alle1
651*91f16700Schasinglulu	tlbi alle2
652*91f16700Schasinglulu	tlbi alle3
653*91f16700Schasinglulu
654*91f16700Schasinglulu	/* x3 = pmu base addr */
655*91f16700Schasinglulu
656*91f16700Schasinglulu	/* request lpm20 */
657*91f16700Schasinglulu	ldr  x0, =PMU_POWMGTCSR_OFFSET
658*91f16700Schasinglulu	ldr  w1, =PMU_POWMGTCSR_VAL
659*91f16700Schasinglulu	str  w1, [x3, x0]
660*91f16700Schasinglulu
661*91f16700Schasinglulu5:
662*91f16700Schasinglulu	wfe
663*91f16700Schasinglulu	b.eq  5b
664*91f16700Schasingluluendfunc _soc_sys_off
665*91f16700Schasinglulu
666*91f16700Schasinglulu
667*91f16700Schasinglulu/* Part of CPU_SUSPEND
668*91f16700Schasinglulu * Function puts the calling core into standby state
669*91f16700Schasinglulu * in:  x0 = core mask lsb
670*91f16700Schasinglulu * out: none
671*91f16700Schasinglulu * uses x0
672*91f16700Schasinglulu */
673*91f16700Schasinglulufunc _soc_core_entr_stdby
674*91f16700Schasinglulu
675*91f16700Schasinglulu	dsb  sy
676*91f16700Schasinglulu	isb
677*91f16700Schasinglulu	wfi
678*91f16700Schasinglulu
679*91f16700Schasinglulu	ret
680*91f16700Schasingluluendfunc _soc_core_entr_stdby
681*91f16700Schasinglulu
682*91f16700Schasinglulu
683*91f16700Schasinglulu/* Part of CPU_SUSPEND
684*91f16700Schasinglulu * Function performs SoC-specific programming prior to standby
685*91f16700Schasinglulu * in:  x0 = core mask lsb
686*91f16700Schasinglulu * out: none
687*91f16700Schasinglulu * uses x0, x1
688*91f16700Schasinglulu */
689*91f16700Schasinglulufunc _soc_core_prep_stdby
690*91f16700Schasinglulu
691*91f16700Schasinglulu	/* clear CORTEX_A72_ECTLR_EL1[2:0] */
692*91f16700Schasinglulu	mrs  x1, CORTEX_A72_ECTLR_EL1
693*91f16700Schasinglulu	bic  x1, x1, #CPUECTLR_TIMER_MASK
694*91f16700Schasinglulu	msr  CORTEX_A72_ECTLR_EL1, x1
695*91f16700Schasinglulu
696*91f16700Schasinglulu	ret
697*91f16700Schasingluluendfunc _soc_core_prep_stdby
698*91f16700Schasinglulu
699*91f16700Schasinglulu
700*91f16700Schasinglulu/* Part of CPU_SUSPEND
701*91f16700Schasinglulu * Function performs any SoC-specific cleanup after standby state
702*91f16700Schasinglulu * in:  x0 = core mask lsb
703*91f16700Schasinglulu * out: none
704*91f16700Schasinglulu * uses none
705*91f16700Schasinglulu */
706*91f16700Schasinglulufunc _soc_core_exit_stdby
707*91f16700Schasinglulu
708*91f16700Schasinglulu	ret
709*91f16700Schasingluluendfunc _soc_core_exit_stdby
710*91f16700Schasinglulu
711*91f16700Schasinglulu
712*91f16700Schasinglulu/* Part of CPU_SUSPEND
713*91f16700Schasinglulu * Function performs SoC-specific programming prior to power-down
714*91f16700Schasinglulu * in:  x0 = core mask lsb
715*91f16700Schasinglulu * out: none
716*91f16700Schasinglulu * uses none
717*91f16700Schasinglulu */
718*91f16700Schasinglulufunc _soc_core_prep_pwrdn
719*91f16700Schasinglulu
720*91f16700Schasinglulu	/* make sure system counter is enabled */
721*91f16700Schasinglulu	ldr  x2, =NXP_TIMER_ADDR
722*91f16700Schasinglulu	ldr  w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
723*91f16700Schasinglulu	tst  w0, #SYS_COUNTER_CNTCR_EN
724*91f16700Schasinglulu	b.ne 1f
725*91f16700Schasinglulu	orr  w0, w0, #SYS_COUNTER_CNTCR_EN
726*91f16700Schasinglulu	str  w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
727*91f16700Schasinglulu1:
728*91f16700Schasinglulu
729*91f16700Schasinglulu	/* enable dynamic retention control (CPUECTLR[2:0])
730*91f16700Schasinglulu	 * set the SMPEN bit (CPUECTLR[6])
731*91f16700Schasinglulu	 */
732*91f16700Schasinglulu	mrs  x1, CORTEX_A72_ECTLR_EL1
733*91f16700Schasinglulu	bic  x1, x1, #CPUECTLR_RET_MASK
734*91f16700Schasinglulu	orr  x1, x1, #CPUECTLR_TIMER_8TICKS
735*91f16700Schasinglulu	orr  x1, x1, #CPUECTLR_SMPEN_EN
736*91f16700Schasinglulu	msr  CORTEX_A72_ECTLR_EL1, x1
737*91f16700Schasinglulu
738*91f16700Schasinglulu	isb
739*91f16700Schasinglulu	ret
740*91f16700Schasingluluendfunc _soc_core_prep_pwrdn
741*91f16700Schasinglulu
742*91f16700Schasinglulu
743*91f16700Schasinglulu/* Part of CPU_SUSPEND
744*91f16700Schasinglulu * Function puts the calling core into a power-down state
745*91f16700Schasinglulu * in:  x0 = core mask lsb
746*91f16700Schasinglulu * out: none
747*91f16700Schasinglulu * uses x0
748*91f16700Schasinglulu */
749*91f16700Schasinglulufunc _soc_core_entr_pwrdn
750*91f16700Schasinglulu
751*91f16700Schasinglulu	/* X0 = core mask lsb */
752*91f16700Schasinglulu
753*91f16700Schasinglulu	dsb  sy
754*91f16700Schasinglulu	isb
755*91f16700Schasinglulu	wfi
756*91f16700Schasinglulu
757*91f16700Schasinglulu	ret
758*91f16700Schasingluluendfunc _soc_core_entr_pwrdn
759*91f16700Schasinglulu
760*91f16700Schasinglulu
761*91f16700Schasinglulu/* Part of CPU_SUSPEND
762*91f16700Schasinglulu * Function performs any SoC-specific cleanup after power-down state
763*91f16700Schasinglulu * in:  x0 = core mask lsb
764*91f16700Schasinglulu * out: none
765*91f16700Schasinglulu * uses none
766*91f16700Schasinglulu */
767*91f16700Schasinglulufunc _soc_core_exit_pwrdn
768*91f16700Schasinglulu
769*91f16700Schasinglulu	ret
770*91f16700Schasingluluendfunc _soc_core_exit_pwrdn
771*91f16700Schasinglulu
772*91f16700Schasinglulu
773*91f16700Schasinglulu/* Part of CPU_SUSPEND
774*91f16700Schasinglulu * Function performs SoC-specific programming prior to standby
775*91f16700Schasinglulu * in:  x0 = core mask lsb
776*91f16700Schasinglulu * out: none
777*91f16700Schasinglulu * uses x0, x1
778*91f16700Schasinglulu */
779*91f16700Schasinglulufunc _soc_clstr_prep_stdby
780*91f16700Schasinglulu
781*91f16700Schasinglulu	/* clear CORTEX_A72_ECTLR_EL1[2:0] */
782*91f16700Schasinglulu	mrs  x1, CORTEX_A72_ECTLR_EL1
783*91f16700Schasinglulu	bic  x1, x1, #CPUECTLR_TIMER_MASK
784*91f16700Schasinglulu	msr  CORTEX_A72_ECTLR_EL1, x1
785*91f16700Schasinglulu
786*91f16700Schasinglulu	ret
787*91f16700Schasingluluendfunc _soc_clstr_prep_stdby
788*91f16700Schasinglulu
789*91f16700Schasinglulu
790*91f16700Schasinglulu/* Part of CPU_SUSPEND
791*91f16700Schasinglulu * Function performs any SoC-specific cleanup after standby state
792*91f16700Schasinglulu * in:  x0 = core mask lsb
793*91f16700Schasinglulu * out: none
794*91f16700Schasinglulu * uses none
795*91f16700Schasinglulu */
796*91f16700Schasinglulufunc _soc_clstr_exit_stdby
797*91f16700Schasinglulu
798*91f16700Schasinglulu	ret
799*91f16700Schasingluluendfunc _soc_clstr_exit_stdby
800*91f16700Schasinglulu
801*91f16700Schasinglulu
802*91f16700Schasinglulu/* Part of CPU_SUSPEND
803*91f16700Schasinglulu * Function performs SoC-specific programming prior to power-down
804*91f16700Schasinglulu * in:  x0 = core mask lsb
805*91f16700Schasinglulu * out: none
806*91f16700Schasinglulu * uses none
807*91f16700Schasinglulu */
808*91f16700Schasinglulufunc _soc_clstr_prep_pwrdn
809*91f16700Schasinglulu
810*91f16700Schasinglulu	/* make sure system counter is enabled */
811*91f16700Schasinglulu	ldr  x2, =NXP_TIMER_ADDR
812*91f16700Schasinglulu	ldr  w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
813*91f16700Schasinglulu	tst  w0, #SYS_COUNTER_CNTCR_EN
814*91f16700Schasinglulu	b.ne 1f
815*91f16700Schasinglulu	orr  w0, w0, #SYS_COUNTER_CNTCR_EN
816*91f16700Schasinglulu	str  w0, [x2, #SYS_COUNTER_CNTCR_OFFSET]
817*91f16700Schasinglulu1:
818*91f16700Schasinglulu
819*91f16700Schasinglulu	/* enable dynamic retention control (CPUECTLR[2:0])
820*91f16700Schasinglulu	 * set the SMPEN bit (CPUECTLR[6])
821*91f16700Schasinglulu	 */
822*91f16700Schasinglulu	mrs  x1, CORTEX_A72_ECTLR_EL1
823*91f16700Schasinglulu	bic  x1, x1, #CPUECTLR_RET_MASK
824*91f16700Schasinglulu	orr  x1, x1, #CPUECTLR_TIMER_8TICKS
825*91f16700Schasinglulu	orr  x1, x1, #CPUECTLR_SMPEN_EN
826*91f16700Schasinglulu	msr  CORTEX_A72_ECTLR_EL1, x1
827*91f16700Schasinglulu
828*91f16700Schasinglulu	isb
829*91f16700Schasinglulu	ret
830*91f16700Schasingluluendfunc _soc_clstr_prep_pwrdn
831*91f16700Schasinglulu
832*91f16700Schasinglulu
833*91f16700Schasinglulu/* Part of CPU_SUSPEND
834*91f16700Schasinglulu * Function performs any SoC-specific cleanup after power-down state
835*91f16700Schasinglulu * in:  x0 = core mask lsb
836*91f16700Schasinglulu * out: none
837*91f16700Schasinglulu * uses none
838*91f16700Schasinglulu */
839*91f16700Schasinglulufunc _soc_clstr_exit_pwrdn
840*91f16700Schasinglulu
841*91f16700Schasinglulu	ret
842*91f16700Schasingluluendfunc _soc_clstr_exit_pwrdn
843*91f16700Schasinglulu
844*91f16700Schasinglulu
845*91f16700Schasinglulu/* Part of CPU_SUSPEND
846*91f16700Schasinglulu * Function performs SoC-specific programming prior to standby
847*91f16700Schasinglulu * in:  x0 = core mask lsb
848*91f16700Schasinglulu * out: none
849*91f16700Schasinglulu * uses x0, x1
850*91f16700Schasinglulu */
851*91f16700Schasinglulufunc _soc_sys_prep_stdby
852*91f16700Schasinglulu
853*91f16700Schasinglulu	/* clear CORTEX_A72_ECTLR_EL1[2:0] */
854*91f16700Schasinglulu	mrs  x1, CORTEX_A72_ECTLR_EL1
855*91f16700Schasinglulu	bic  x1, x1, #CPUECTLR_TIMER_MASK
856*91f16700Schasinglulu	msr  CORTEX_A72_ECTLR_EL1, x1
857*91f16700Schasinglulu	ret
858*91f16700Schasingluluendfunc _soc_sys_prep_stdby
859*91f16700Schasinglulu
860*91f16700Schasinglulu
861*91f16700Schasinglulu/* Part of CPU_SUSPEND
862*91f16700Schasinglulu * Function performs any SoC-specific cleanup after standby state
863*91f16700Schasinglulu * in:  x0 = core mask lsb
864*91f16700Schasinglulu * out: none
865*91f16700Schasinglulu * uses none
866*91f16700Schasinglulu */
867*91f16700Schasinglulufunc _soc_sys_exit_stdby
868*91f16700Schasinglulu
869*91f16700Schasinglulu	ret
870*91f16700Schasingluluendfunc _soc_sys_exit_stdby
871*91f16700Schasinglulu
872*91f16700Schasinglulu
873*91f16700Schasinglulu/* Part of CPU_SUSPEND
874*91f16700Schasinglulu * Function performs SoC-specific programming prior to
875*91f16700Schasinglulu * suspend-to-power-down
876*91f16700Schasinglulu * in:  x0 = core mask lsb
877*91f16700Schasinglulu * out: none
878*91f16700Schasinglulu * uses x0, x1
879*91f16700Schasinglulu */
880*91f16700Schasinglulufunc _soc_sys_prep_pwrdn
881*91f16700Schasinglulu
882*91f16700Schasinglulu	mrs   x1, CORTEX_A72_ECTLR_EL1
883*91f16700Schasinglulu	/* make sure the smp bit is set */
884*91f16700Schasinglulu	orr   x1, x1, #CPUECTLR_SMPEN_MASK
885*91f16700Schasinglulu	/* set the retention control */
886*91f16700Schasinglulu	orr   x1, x1, #CPUECTLR_RET_8CLK
887*91f16700Schasinglulu	/* disable tablewalk prefetch */
888*91f16700Schasinglulu	orr   x1, x1, #CPUECTLR_DISABLE_TWALK_PREFETCH
889*91f16700Schasinglulu	msr   CORTEX_A72_ECTLR_EL1, x1
890*91f16700Schasinglulu	isb
891*91f16700Schasinglulu
892*91f16700Schasinglulu	ret
893*91f16700Schasingluluendfunc _soc_sys_prep_pwrdn
894*91f16700Schasinglulu
895*91f16700Schasinglulu
896*91f16700Schasinglulu/* Part of CPU_SUSPEND
897*91f16700Schasinglulu * Function puts the calling core, and potentially the soc, into a
898*91f16700Schasinglulu * low-power state
899*91f16700Schasinglulu * in:  x0 = core mask lsb
900*91f16700Schasinglulu * out: x0 = 0, success
901*91f16700Schasinglulu *	  x0 < 0, failure
902*91f16700Schasinglulu * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14,
903*91f16700Schasinglulu *	  x15, x16, x17, x18, x19, x20, x21, x28
904*91f16700Schasinglulu */
905*91f16700Schasinglulufunc _soc_sys_pwrdn_wfi
906*91f16700Schasinglulu	mov  x28, x30
907*91f16700Schasinglulu
908*91f16700Schasinglulu	/* disable cluster snooping in the CCN-508 */
909*91f16700Schasinglulu	ldr  x1, =NXP_CCN_HN_F_0_ADDR
910*91f16700Schasinglulu	ldr  x7, [x1, #CCN_HN_F_SNP_DMN_CTL_OFFSET]
911*91f16700Schasinglulu	mov  x6, #CCN_HNF_NODE_COUNT
912*91f16700Schasinglulu1:
913*91f16700Schasinglulu	str  x7, [x1, #CCN_HN_F_SNP_DMN_CTL_CLR_OFFSET]
914*91f16700Schasinglulu	sub  x6, x6, #1
915*91f16700Schasinglulu	add  x1, x1, #CCN_HNF_OFFSET
916*91f16700Schasinglulu	cbnz x6, 1b
917*91f16700Schasinglulu
918*91f16700Schasinglulu	/* x0  = core mask
919*91f16700Schasinglulu	 * x7  = hnf sdcr
920*91f16700Schasinglulu	 */
921*91f16700Schasinglulu
922*91f16700Schasinglulu	ldr  x1, =NXP_PMU_CCSR_ADDR
923*91f16700Schasinglulu	ldr  x2, =NXP_PMU_DCSR_ADDR
924*91f16700Schasinglulu
925*91f16700Schasinglulu	/* enable the stop-request-override */
926*91f16700Schasinglulu	mov  x3, #PMU_POWMGTDCR0_OFFSET
927*91f16700Schasinglulu	mov  x4, #POWMGTDCR_STP_OV_EN
928*91f16700Schasinglulu	str  w4, [x2, x3]
929*91f16700Schasinglulu
930*91f16700Schasinglulu	/* x0  = core mask
931*91f16700Schasinglulu	 * x1  = NXP_PMU_CCSR_ADDR
932*91f16700Schasinglulu	 * x2  = NXP_PMU_DCSR_ADDR
933*91f16700Schasinglulu	 * x7  = hnf sdcr
934*91f16700Schasinglulu	 */
935*91f16700Schasinglulu
936*91f16700Schasinglulu	/* disable prefetching in the A72 core */
937*91f16700Schasinglulu	mrs  x8, CORTEX_A72_CPUACTLR_EL1
938*91f16700Schasinglulu	tst  x8, #CPUACTLR_DIS_LS_HW_PRE
939*91f16700Schasinglulu	b.ne 2f
940*91f16700Schasinglulu	dsb  sy
941*91f16700Schasinglulu	isb
942*91f16700Schasinglulu	/* disable data prefetch */
943*91f16700Schasinglulu	orr  x16, x8, #CPUACTLR_DIS_LS_HW_PRE
944*91f16700Schasinglulu	/* disable tlb prefetch */
945*91f16700Schasinglulu	orr  x16, x16, #CPUACTLR_DIS_L2_TLB_PRE
946*91f16700Schasinglulu	msr  CORTEX_A72_CPUACTLR_EL1, x16
947*91f16700Schasinglulu	isb
948*91f16700Schasinglulu
949*91f16700Schasinglulu	/* x0  = core mask
950*91f16700Schasinglulu	 * x1  = NXP_PMU_CCSR_ADDR
951*91f16700Schasinglulu	 * x2  = NXP_PMU_DCSR_ADDR
952*91f16700Schasinglulu	 * x7  = hnf sdcr
953*91f16700Schasinglulu	 * x8  = cpuactlr
954*91f16700Schasinglulu	 */
955*91f16700Schasinglulu
956*91f16700Schasinglulu2:
957*91f16700Schasinglulu	/* save hnf-sdcr and cpuactlr to stack */
958*91f16700Schasinglulu	stp  x7,  x8,  [sp, #-16]!
959*91f16700Schasinglulu
960*91f16700Schasinglulu	/* x0  = core mask
961*91f16700Schasinglulu	 * x1  = NXP_PMU_CCSR_ADDR
962*91f16700Schasinglulu	 * x2  = NXP_PMU_DCSR_ADDR
963*91f16700Schasinglulu	 */
964*91f16700Schasinglulu
965*91f16700Schasinglulu	/* save the IPSTPCRn registers to stack */
966*91f16700Schasinglulu	mov  x15, #PMU_IPSTPCR0_OFFSET
967*91f16700Schasinglulu	ldr  w9,  [x1, x15]
968*91f16700Schasinglulu	mov  x16, #PMU_IPSTPCR1_OFFSET
969*91f16700Schasinglulu	ldr  w10, [x1, x16]
970*91f16700Schasinglulu	mov  x17, #PMU_IPSTPCR2_OFFSET
971*91f16700Schasinglulu	ldr  w11, [x1, x17]
972*91f16700Schasinglulu	mov  x18, #PMU_IPSTPCR3_OFFSET
973*91f16700Schasinglulu	ldr  w12, [x1, x18]
974*91f16700Schasinglulu	mov  x19, #PMU_IPSTPCR4_OFFSET
975*91f16700Schasinglulu	ldr  w13, [x1, x19]
976*91f16700Schasinglulu	mov  x20, #PMU_IPSTPCR5_OFFSET
977*91f16700Schasinglulu	ldr  w14, [x1, x20]
978*91f16700Schasinglulu
979*91f16700Schasinglulu	stp  x9,  x10,  [sp, #-16]!
980*91f16700Schasinglulu	stp  x11, x12,  [sp, #-16]!
981*91f16700Schasinglulu	stp  x13, x14,  [sp, #-16]!
982*91f16700Schasinglulu
983*91f16700Schasinglulu	/* x0  = core mask
984*91f16700Schasinglulu	 * x1  = NXP_PMU_CCSR_ADDR
985*91f16700Schasinglulu	 * x2  = NXP_PMU_DCSR_ADDR
986*91f16700Schasinglulu	 * x15 = PMU_IPSTPCR0_OFFSET
987*91f16700Schasinglulu	 * x16 = PMU_IPSTPCR1_OFFSET
988*91f16700Schasinglulu	 * x17 = PMU_IPSTPCR2_OFFSET
989*91f16700Schasinglulu	 * x18 = PMU_IPSTPCR3_OFFSET
990*91f16700Schasinglulu	 * x19 = PMU_IPSTPCR4_OFFSET
991*91f16700Schasinglulu	 * x20 = PMU_IPSTPCR5_OFFSET
992*91f16700Schasinglulu	 */
993*91f16700Schasinglulu
994*91f16700Schasinglulu	/* load the full clock mask for IPSTPCR0 */
995*91f16700Schasinglulu	ldr  x3, =DEVDISR1_MASK
996*91f16700Schasinglulu	/* get the exclusions */
997*91f16700Schasinglulu	mov  x21, #PMU_IPPDEXPCR0_OFFSET
998*91f16700Schasinglulu	ldr  w4, [x1, x21]
999*91f16700Schasinglulu	/* apply the exclusions to the mask */
1000*91f16700Schasinglulu	bic  w7, w3, w4
1001*91f16700Schasinglulu	/* stop the clocks in IPSTPCR0 */
1002*91f16700Schasinglulu	str  w7, [x1, x15]
1003*91f16700Schasinglulu
1004*91f16700Schasinglulu	/* use same procedure for IPSTPCR1-IPSTPCR5 */
1005*91f16700Schasinglulu
1006*91f16700Schasinglulu	/* stop the clocks in IPSTPCR1 */
1007*91f16700Schasinglulu	ldr  x5, =DEVDISR2_MASK
1008*91f16700Schasinglulu	mov  x21, #PMU_IPPDEXPCR1_OFFSET
1009*91f16700Schasinglulu	ldr  w6, [x1, x21]
1010*91f16700Schasinglulu	bic  w8, w5, w6
1011*91f16700Schasinglulu	str  w8, [x1, x16]
1012*91f16700Schasinglulu
1013*91f16700Schasinglulu	/* stop the clocks in IPSTPCR2 */
1014*91f16700Schasinglulu	ldr  x3, =DEVDISR3_MASK
1015*91f16700Schasinglulu	mov  x21, #PMU_IPPDEXPCR2_OFFSET
1016*91f16700Schasinglulu	ldr  w4, [x1, x21]
1017*91f16700Schasinglulu	bic  w9, w3, w4
1018*91f16700Schasinglulu	str  w9, [x1, x17]
1019*91f16700Schasinglulu
1020*91f16700Schasinglulu	/* stop the clocks in IPSTPCR3 */
1021*91f16700Schasinglulu	ldr  x5,  =DEVDISR4_MASK
1022*91f16700Schasinglulu	mov  x21, #PMU_IPPDEXPCR3_OFFSET
1023*91f16700Schasinglulu	ldr  w6,  [x1, x21]
1024*91f16700Schasinglulu	bic  w10, w5, w6
1025*91f16700Schasinglulu	str  w10, [x1, x18]
1026*91f16700Schasinglulu
1027*91f16700Schasinglulu	/* stop the clocks in IPSTPCR4
1028*91f16700Schasinglulu	 *   - exclude the ddr clocks as we are currently executing
1029*91f16700Schasinglulu	 *	 out of *some* memory, might be ddr
1030*91f16700Schasinglulu	 *   - exclude the OCRAM clk so that we retain any code/data in
1031*91f16700Schasinglulu	 *	 OCRAM
1032*91f16700Schasinglulu	 *   - may need to exclude the debug clock if we are testing
1033*91f16700Schasinglulu	 */
1034*91f16700Schasinglulu	ldr  x3, =DEVDISR5_MASK
1035*91f16700Schasinglulu	mov  w6, #DEVDISR5_MASK_ALL_MEM
1036*91f16700Schasinglulu	bic  w3, w3, w6
1037*91f16700Schasinglulu
1038*91f16700Schasinglulu	mov  w5, #POLICY_DEBUG_ENABLE
1039*91f16700Schasinglulu	cbz  w5, 3f
1040*91f16700Schasinglulu	mov  w6, #DEVDISR5_MASK_DBG
1041*91f16700Schasinglulu	bic  w3, w3, w6
1042*91f16700Schasinglulu3:
1043*91f16700Schasinglulu	mov  x21, #PMU_IPPDEXPCR4_OFFSET
1044*91f16700Schasinglulu	ldr  w4,  [x1, x21]
1045*91f16700Schasinglulu	bic  w11, w3, w4
1046*91f16700Schasinglulu	str  w11, [x1, x19]
1047*91f16700Schasinglulu
1048*91f16700Schasinglulu	/* stop the clocks in IPSTPCR5 */
1049*91f16700Schasinglulu	ldr  x5,  =DEVDISR6_MASK
1050*91f16700Schasinglulu	mov  x21, #PMU_IPPDEXPCR5_OFFSET
1051*91f16700Schasinglulu	ldr  w6,  [x1, x21]
1052*91f16700Schasinglulu	bic  w12, w5, w6
1053*91f16700Schasinglulu	str  w12, [x1, x20]
1054*91f16700Schasinglulu
1055*91f16700Schasinglulu	/* x0  = core mask
1056*91f16700Schasinglulu	 * x1  = NXP_PMU_CCSR_ADDR
1057*91f16700Schasinglulu	 * x2  = NXP_PMU_DCSR_ADDR
1058*91f16700Schasinglulu	 * x7  = IPSTPCR0
1059*91f16700Schasinglulu	 * x8  = IPSTPCR1
1060*91f16700Schasinglulu	 * x9  = IPSTPCR2
1061*91f16700Schasinglulu	 * x10 = IPSTPCR3
1062*91f16700Schasinglulu	 * x11 = IPSTPCR4
1063*91f16700Schasinglulu	 * x12 = IPSTPCR5
1064*91f16700Schasinglulu	 */
1065*91f16700Schasinglulu
1066*91f16700Schasinglulu	/* poll until the clocks are stopped in IPSTPACKSR0 */
1067*91f16700Schasinglulu	mov  w4,  #CLOCK_RETRY_CNT
1068*91f16700Schasinglulu	mov  x21, #PMU_IPSTPACKSR0_OFFSET
1069*91f16700Schasinglulu4:
1070*91f16700Schasinglulu	ldr  w5, [x1, x21]
1071*91f16700Schasinglulu	cmp  w5, w7
1072*91f16700Schasinglulu	b.eq 5f
1073*91f16700Schasinglulu	sub  w4, w4, #1
1074*91f16700Schasinglulu	cbnz w4, 4b
1075*91f16700Schasinglulu
1076*91f16700Schasinglulu	/* poll until the clocks are stopped in IPSTPACKSR1 */
1077*91f16700Schasinglulu5:
1078*91f16700Schasinglulu	mov  w4,  #CLOCK_RETRY_CNT
1079*91f16700Schasinglulu	mov  x21, #PMU_IPSTPACKSR1_OFFSET
1080*91f16700Schasinglulu6:
1081*91f16700Schasinglulu	ldr  w5, [x1, x21]
1082*91f16700Schasinglulu	cmp  w5, w8
1083*91f16700Schasinglulu	b.eq 7f
1084*91f16700Schasinglulu	sub  w4, w4, #1
1085*91f16700Schasinglulu	cbnz w4, 6b
1086*91f16700Schasinglulu
1087*91f16700Schasinglulu	/* poll until the clocks are stopped in IPSTPACKSR2 */
1088*91f16700Schasinglulu7:
1089*91f16700Schasinglulu	mov  w4,  #CLOCK_RETRY_CNT
1090*91f16700Schasinglulu	mov  x21, #PMU_IPSTPACKSR2_OFFSET
1091*91f16700Schasinglulu8:
1092*91f16700Schasinglulu	ldr  w5, [x1, x21]
1093*91f16700Schasinglulu	cmp  w5, w9
1094*91f16700Schasinglulu	b.eq 9f
1095*91f16700Schasinglulu	sub  w4, w4, #1
1096*91f16700Schasinglulu	cbnz w4, 8b
1097*91f16700Schasinglulu
1098*91f16700Schasinglulu	/* poll until the clocks are stopped in IPSTPACKSR3 */
1099*91f16700Schasinglulu9:
1100*91f16700Schasinglulu	mov  w4,  #CLOCK_RETRY_CNT
1101*91f16700Schasinglulu	mov  x21, #PMU_IPSTPACKSR3_OFFSET
1102*91f16700Schasinglulu10:
1103*91f16700Schasinglulu	ldr  w5, [x1, x21]
1104*91f16700Schasinglulu	cmp  w5, w10
1105*91f16700Schasinglulu	b.eq 11f
1106*91f16700Schasinglulu	sub  w4, w4, #1
1107*91f16700Schasinglulu	cbnz w4, 10b
1108*91f16700Schasinglulu
1109*91f16700Schasinglulu	/* poll until the clocks are stopped in IPSTPACKSR4 */
1110*91f16700Schasinglulu11:
1111*91f16700Schasinglulu	mov  w4,  #CLOCK_RETRY_CNT
1112*91f16700Schasinglulu	mov  x21, #PMU_IPSTPACKSR4_OFFSET
1113*91f16700Schasinglulu12:
1114*91f16700Schasinglulu	ldr  w5, [x1, x21]
1115*91f16700Schasinglulu	cmp  w5, w11
1116*91f16700Schasinglulu	b.eq 13f
1117*91f16700Schasinglulu	sub  w4, w4, #1
1118*91f16700Schasinglulu	cbnz w4, 12b
1119*91f16700Schasinglulu
1120*91f16700Schasinglulu	/* poll until the clocks are stopped in IPSTPACKSR5 */
1121*91f16700Schasinglulu13:
1122*91f16700Schasinglulu	mov  w4,  #CLOCK_RETRY_CNT
1123*91f16700Schasinglulu	mov  x21, #PMU_IPSTPACKSR5_OFFSET
1124*91f16700Schasinglulu14:
1125*91f16700Schasinglulu	ldr  w5, [x1, x21]
1126*91f16700Schasinglulu	cmp  w5, w12
1127*91f16700Schasinglulu	b.eq 15f
1128*91f16700Schasinglulu	sub  w4, w4, #1
1129*91f16700Schasinglulu	cbnz w4, 14b
1130*91f16700Schasinglulu
1131*91f16700Schasinglulu	/* x0  = core mask
1132*91f16700Schasinglulu	 * x1  = NXP_PMU_CCSR_ADDR
1133*91f16700Schasinglulu	 * x2  = NXP_PMU_DCSR_ADDR
1134*91f16700Schasinglulu	 * x7  = IPSTPCR0
1135*91f16700Schasinglulu	 * x8  = IPSTPCR1
1136*91f16700Schasinglulu	 * x9  = IPSTPCR2
1137*91f16700Schasinglulu	 * x10 = IPSTPCR3
1138*91f16700Schasinglulu	 * x11 = IPSTPCR4
1139*91f16700Schasinglulu	 * x12 = IPSTPCR5
1140*91f16700Schasinglulu	 */
1141*91f16700Schasinglulu
1142*91f16700Schasinglulu15:
1143*91f16700Schasinglulu	mov  x3, #NXP_DCFG_ADDR
1144*91f16700Schasinglulu
1145*91f16700Schasinglulu	/* save the devdisr registers to stack */
1146*91f16700Schasinglulu	ldr  w13, [x3, #DCFG_DEVDISR1_OFFSET]
1147*91f16700Schasinglulu	ldr  w14, [x3, #DCFG_DEVDISR2_OFFSET]
1148*91f16700Schasinglulu	ldr  w15, [x3, #DCFG_DEVDISR3_OFFSET]
1149*91f16700Schasinglulu	ldr  w16, [x3, #DCFG_DEVDISR4_OFFSET]
1150*91f16700Schasinglulu	ldr  w17, [x3, #DCFG_DEVDISR5_OFFSET]
1151*91f16700Schasinglulu	ldr  w18, [x3, #DCFG_DEVDISR6_OFFSET]
1152*91f16700Schasinglulu
1153*91f16700Schasinglulu	stp  x13, x14,  [sp, #-16]!
1154*91f16700Schasinglulu	stp  x15, x16,  [sp, #-16]!
1155*91f16700Schasinglulu	stp  x17, x18,  [sp, #-16]!
1156*91f16700Schasinglulu
1157*91f16700Schasinglulu	/* power down the IP in DEVDISR1 - corresponds to IPSTPCR0 */
1158*91f16700Schasinglulu	str  w7,  [x3, #DCFG_DEVDISR1_OFFSET]
1159*91f16700Schasinglulu
1160*91f16700Schasinglulu	/* power down the IP in DEVDISR2 - corresponds to IPSTPCR1 */
1161*91f16700Schasinglulu	str  w8, [x3, #DCFG_DEVDISR2_OFFSET]
1162*91f16700Schasinglulu
1163*91f16700Schasinglulu	/* power down the IP in DEVDISR3 - corresponds to IPSTPCR2 */
1164*91f16700Schasinglulu	str  w9,  [x3, #DCFG_DEVDISR3_OFFSET]
1165*91f16700Schasinglulu
1166*91f16700Schasinglulu	/* power down the IP in DEVDISR4 - corresponds to IPSTPCR3 */
1167*91f16700Schasinglulu	str  w10, [x3, #DCFG_DEVDISR4_OFFSET]
1168*91f16700Schasinglulu
1169*91f16700Schasinglulu	/* power down the IP in DEVDISR5 - corresponds to IPSTPCR4 */
1170*91f16700Schasinglulu	str  w11, [x3, #DCFG_DEVDISR5_OFFSET]
1171*91f16700Schasinglulu
1172*91f16700Schasinglulu	/* power down the IP in DEVDISR6 - corresponds to IPSTPCR5 */
1173*91f16700Schasinglulu	str  w12, [x3, #DCFG_DEVDISR6_OFFSET]
1174*91f16700Schasinglulu
1175*91f16700Schasinglulu	/* setup register values for the cache-only sequence */
1176*91f16700Schasinglulu	mov  x4, #NXP_DDR_ADDR
1177*91f16700Schasinglulu	mov  x5, #NXP_DDR2_ADDR
1178*91f16700Schasinglulu	mov  x6, x11
1179*91f16700Schasinglulu	mov  x7, x17
1180*91f16700Schasinglulu	ldr  x12, =PMU_CLAINACTSETR_OFFSET
1181*91f16700Schasinglulu	ldr  x13, =PMU_CLSINACTSETR_OFFSET
1182*91f16700Schasinglulu	ldr  x14, =PMU_CLAINACTCLRR_OFFSET
1183*91f16700Schasinglulu	ldr  x15, =PMU_CLSINACTCLRR_OFFSET
1184*91f16700Schasinglulu
1185*91f16700Schasinglulu	/* x0  = core mask
1186*91f16700Schasinglulu	 * x1  = NXP_PMU_CCSR_ADDR
1187*91f16700Schasinglulu	 * x2  = NXP_PMU_DCSR_ADDR
1188*91f16700Schasinglulu	 * x3  = NXP_DCFG_ADDR
1189*91f16700Schasinglulu	 * x4  = NXP_DDR_ADDR
1190*91f16700Schasinglulu	 * x5  = NXP_DDR2_ADDR
1191*91f16700Schasinglulu	 * w6  = IPSTPCR4
1192*91f16700Schasinglulu	 * w7  = DEVDISR5
1193*91f16700Schasinglulu	 * x12 = PMU_CLAINACTSETR_OFFSET
1194*91f16700Schasinglulu	 * x13 = PMU_CLSINACTSETR_OFFSET
1195*91f16700Schasinglulu	 * x14 = PMU_CLAINACTCLRR_OFFSET
1196*91f16700Schasinglulu	 * x15 = PMU_CLSINACTCLRR_OFFSET
1197*91f16700Schasinglulu	 */
1198*91f16700Schasinglulu
1199*91f16700Schasinglulu	mov  x8, #POLICY_DEBUG_ENABLE
1200*91f16700Schasinglulu	cbnz x8, 29f
1201*91f16700Schasinglulu	/* force the debug interface to be quiescent */
1202*91f16700Schasinglulu	mrs  x9, OSDLR_EL1
1203*91f16700Schasinglulu	orr  x9, x9, #0x1
1204*91f16700Schasinglulu	msr  OSDLR_EL1, x9
1205*91f16700Schasinglulu
1206*91f16700Schasinglulu	/* enter the cache-only sequence */
1207*91f16700Schasinglulu29:
1208*91f16700Schasinglulu	bl   final_pwrdown
1209*91f16700Schasinglulu
1210*91f16700Schasinglulu	/* when we are here, the core has come out of wfi and the
1211*91f16700Schasinglulu	 * ddr is back up
1212*91f16700Schasinglulu	 */
1213*91f16700Schasinglulu
1214*91f16700Schasinglulu	mov  x8, #POLICY_DEBUG_ENABLE
1215*91f16700Schasinglulu	cbnz x8, 30f
1216*91f16700Schasinglulu	/* restart the debug interface */
1217*91f16700Schasinglulu	mrs  x9, OSDLR_EL1
1218*91f16700Schasinglulu	mov  x10, #1
1219*91f16700Schasinglulu	bic  x9, x9, x10
1220*91f16700Schasinglulu	msr  OSDLR_EL1, x9
1221*91f16700Schasinglulu
1222*91f16700Schasinglulu	/* get saved DEVDISR regs off stack */
1223*91f16700Schasinglulu30:
1224*91f16700Schasinglulu	ldp  x17, x18, [sp], #16
1225*91f16700Schasinglulu	ldp  x15, x16, [sp], #16
1226*91f16700Schasinglulu	ldp  x13, x14, [sp], #16
1227*91f16700Schasinglulu	/* restore DEVDISR regs */
1228*91f16700Schasinglulu	str  w18, [x3, #DCFG_DEVDISR6_OFFSET]
1229*91f16700Schasinglulu	str  w17, [x3, #DCFG_DEVDISR5_OFFSET]
1230*91f16700Schasinglulu	str  w16, [x3, #DCFG_DEVDISR4_OFFSET]
1231*91f16700Schasinglulu	str  w15, [x3, #DCFG_DEVDISR3_OFFSET]
1232*91f16700Schasinglulu	str  w14, [x3, #DCFG_DEVDISR2_OFFSET]
1233*91f16700Schasinglulu	str  w13, [x3, #DCFG_DEVDISR1_OFFSET]
1234*91f16700Schasinglulu	isb
1235*91f16700Schasinglulu
1236*91f16700Schasinglulu	/* get saved IPSTPCRn regs off stack */
1237*91f16700Schasinglulu	ldp  x13, x14, [sp], #16
1238*91f16700Schasinglulu	ldp  x11, x12, [sp], #16
1239*91f16700Schasinglulu	ldp  x9,  x10, [sp], #16
1240*91f16700Schasinglulu
1241*91f16700Schasinglulu	/* restore IPSTPCRn regs */
1242*91f16700Schasinglulu	mov  x15, #PMU_IPSTPCR5_OFFSET
1243*91f16700Schasinglulu	str  w14, [x1, x15]
1244*91f16700Schasinglulu	mov  x16, #PMU_IPSTPCR4_OFFSET
1245*91f16700Schasinglulu	str  w13, [x1, x16]
1246*91f16700Schasinglulu	mov  x17, #PMU_IPSTPCR3_OFFSET
1247*91f16700Schasinglulu	str  w12, [x1, x17]
1248*91f16700Schasinglulu	mov  x18, #PMU_IPSTPCR2_OFFSET
1249*91f16700Schasinglulu	str  w11, [x1, x18]
1250*91f16700Schasinglulu	mov  x19, #PMU_IPSTPCR1_OFFSET
1251*91f16700Schasinglulu	str  w10, [x1, x19]
1252*91f16700Schasinglulu	mov  x20, #PMU_IPSTPCR0_OFFSET
1253*91f16700Schasinglulu	str  w9,  [x1, x20]
1254*91f16700Schasinglulu	isb
1255*91f16700Schasinglulu
1256*91f16700Schasinglulu	/* poll on IPSTPACKCRn regs til IP clocks are restarted */
1257*91f16700Schasinglulu	mov  w4,  #CLOCK_RETRY_CNT
1258*91f16700Schasinglulu	mov  x15, #PMU_IPSTPACKSR5_OFFSET
1259*91f16700Schasinglulu16:
1260*91f16700Schasinglulu	ldr  w5, [x1, x15]
1261*91f16700Schasinglulu	and  w5, w5, w14
1262*91f16700Schasinglulu	cbz  w5, 17f
1263*91f16700Schasinglulu	sub  w4, w4, #1
1264*91f16700Schasinglulu	cbnz w4, 16b
1265*91f16700Schasinglulu
1266*91f16700Schasinglulu17:
1267*91f16700Schasinglulu	mov  w4,  #CLOCK_RETRY_CNT
1268*91f16700Schasinglulu	mov  x15, #PMU_IPSTPACKSR4_OFFSET
1269*91f16700Schasinglulu18:
1270*91f16700Schasinglulu	ldr  w5, [x1, x15]
1271*91f16700Schasinglulu	and  w5, w5, w13
1272*91f16700Schasinglulu	cbz  w5, 19f
1273*91f16700Schasinglulu	sub  w4, w4, #1
1274*91f16700Schasinglulu	cbnz w4, 18b
1275*91f16700Schasinglulu
1276*91f16700Schasinglulu19:
1277*91f16700Schasinglulu	mov  w4,  #CLOCK_RETRY_CNT
1278*91f16700Schasinglulu	mov  x15, #PMU_IPSTPACKSR3_OFFSET
1279*91f16700Schasinglulu20:
1280*91f16700Schasinglulu	ldr  w5, [x1, x15]
1281*91f16700Schasinglulu	and  w5, w5, w12
1282*91f16700Schasinglulu	cbz  w5, 21f
1283*91f16700Schasinglulu	sub  w4, w4, #1
1284*91f16700Schasinglulu	cbnz w4, 20b
1285*91f16700Schasinglulu
1286*91f16700Schasinglulu21:
1287*91f16700Schasinglulu	mov  w4,  #CLOCK_RETRY_CNT
1288*91f16700Schasinglulu	mov  x15, #PMU_IPSTPACKSR2_OFFSET
1289*91f16700Schasinglulu22:
1290*91f16700Schasinglulu	ldr  w5, [x1, x15]
1291*91f16700Schasinglulu	and  w5, w5, w11
1292*91f16700Schasinglulu	cbz  w5, 23f
1293*91f16700Schasinglulu	sub  w4, w4, #1
1294*91f16700Schasinglulu	cbnz w4, 22b
1295*91f16700Schasinglulu
1296*91f16700Schasinglulu23:
1297*91f16700Schasinglulu	mov  w4,  #CLOCK_RETRY_CNT
1298*91f16700Schasinglulu	mov  x15, #PMU_IPSTPACKSR1_OFFSET
1299*91f16700Schasinglulu24:
1300*91f16700Schasinglulu	ldr  w5, [x1, x15]
1301*91f16700Schasinglulu	and  w5, w5, w10
1302*91f16700Schasinglulu	cbz  w5, 25f
1303*91f16700Schasinglulu	sub  w4, w4, #1
1304*91f16700Schasinglulu	cbnz w4, 24b
1305*91f16700Schasinglulu
1306*91f16700Schasinglulu25:
1307*91f16700Schasinglulu	mov  w4,  #CLOCK_RETRY_CNT
1308*91f16700Schasinglulu	mov  x15, #PMU_IPSTPACKSR0_OFFSET
1309*91f16700Schasinglulu26:
1310*91f16700Schasinglulu	ldr  w5, [x1, x15]
1311*91f16700Schasinglulu	and  w5, w5, w9
1312*91f16700Schasinglulu	cbz  w5, 27f
1313*91f16700Schasinglulu	sub  w4, w4, #1
1314*91f16700Schasinglulu	cbnz w4, 26b
1315*91f16700Schasinglulu
1316*91f16700Schasinglulu27:
1317*91f16700Schasinglulu	/* disable the stop-request-override */
1318*91f16700Schasinglulu	mov  x8, #PMU_POWMGTDCR0_OFFSET
1319*91f16700Schasinglulu	mov  w9, #POWMGTDCR_STP_OV_EN
1320*91f16700Schasinglulu	str  w9, [x2, x8]
1321*91f16700Schasinglulu	isb
1322*91f16700Schasinglulu
1323*91f16700Schasinglulu	/* get hnf-sdcr and cpuactlr off stack */
1324*91f16700Schasinglulu	ldp  x7, x8, [sp], #16
1325*91f16700Schasinglulu
1326*91f16700Schasinglulu	/* restore cpuactlr */
1327*91f16700Schasinglulu	msr  CORTEX_A72_CPUACTLR_EL1, x8
1328*91f16700Schasinglulu	isb
1329*91f16700Schasinglulu
1330*91f16700Schasinglulu	/* restore snooping in the hnf nodes */
1331*91f16700Schasinglulu	ldr  x9, =NXP_CCN_HN_F_0_ADDR
1332*91f16700Schasinglulu	mov  x6, #CCN_HNF_NODE_COUNT
1333*91f16700Schasinglulu28:
1334*91f16700Schasinglulu	str  x7, [x9, #CCN_HN_F_SNP_DMN_CTL_SET_OFFSET]
1335*91f16700Schasinglulu	sub  x6, x6, #1
1336*91f16700Schasinglulu	add  x9, x9, #CCN_HNF_OFFSET
1337*91f16700Schasinglulu	cbnz x6, 28b
1338*91f16700Schasinglulu	isb
1339*91f16700Schasinglulu
1340*91f16700Schasinglulu	mov  x30, x28
1341*91f16700Schasinglulu	ret
1342*91f16700Schasingluluendfunc _soc_sys_pwrdn_wfi
1343*91f16700Schasinglulu
1344*91f16700Schasinglulu
1345*91f16700Schasinglulu/* Part of CPU_SUSPEND
1346*91f16700Schasinglulu * Function performs any SoC-specific cleanup after power-down
1347*91f16700Schasinglulu * in:  x0 = core mask lsb
1348*91f16700Schasinglulu * out: none
1349*91f16700Schasinglulu * uses x0,
1350*91f16700Schasinglulu */
1351*91f16700Schasinglulufunc _soc_sys_exit_pwrdn
1352*91f16700Schasinglulu
1353*91f16700Schasinglulu	mrs   x1, CORTEX_A72_ECTLR_EL1
1354*91f16700Schasinglulu	/* make sure the smp bit is set */
1355*91f16700Schasinglulu	orr   x1, x1, #CPUECTLR_SMPEN_MASK
1356*91f16700Schasinglulu	/* clr the retention control */
1357*91f16700Schasinglulu	mov   x2, #CPUECTLR_RET_8CLK
1358*91f16700Schasinglulu	bic   x1, x1, x2
1359*91f16700Schasinglulu	/* enable tablewalk prefetch */
1360*91f16700Schasinglulu	mov   x2, #CPUECTLR_DISABLE_TWALK_PREFETCH
1361*91f16700Schasinglulu	bic   x1, x1, x2
1362*91f16700Schasinglulu	msr   CORTEX_A72_ECTLR_EL1, x1
1363*91f16700Schasinglulu	isb
1364*91f16700Schasinglulu
1365*91f16700Schasinglulu	ret
1366*91f16700Schasingluluendfunc _soc_sys_exit_pwrdn
1367*91f16700Schasinglulu
1368*91f16700Schasinglulu
1369*91f16700Schasinglulu/* Function will pwrdown ddr and the final core - it will do this
1370*91f16700Schasinglulu * by loading itself into the icache and then executing from there
1371*91f16700Schasinglulu * in:
1372*91f16700Schasinglulu *   x0  = core mask
1373*91f16700Schasinglulu *   x1  = NXP_PMU_CCSR_ADDR
1374*91f16700Schasinglulu *   x2  = NXP_PMU_DCSR_ADDR
1375*91f16700Schasinglulu *   x3  = NXP_DCFG_ADDR
1376*91f16700Schasinglulu *   x4  = NXP_DDR_ADDR
1377*91f16700Schasinglulu *   x5  = NXP_DDR2_ADDR
1378*91f16700Schasinglulu *   w6  = IPSTPCR4
1379*91f16700Schasinglulu *   w7  = DEVDISR5
1380*91f16700Schasinglulu *   x12 = PMU_CLAINACTSETR_OFFSET
1381*91f16700Schasinglulu *   x13 = PMU_CLSINACTSETR_OFFSET
1382*91f16700Schasinglulu *   x14 = PMU_CLAINACTCLRR_OFFSET
1383*91f16700Schasinglulu *   x15 = PMU_CLSINACTCLRR_OFFSET
1384*91f16700Schasinglulu * out: none
1385*91f16700Schasinglulu * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x13, x14, x15, x16,
1386*91f16700Schasinglulu *	  x17, x18
1387*91f16700Schasinglulu */
1388*91f16700Schasinglulu
1389*91f16700Schasinglulu/* 4Kb aligned */
1390*91f16700Schasinglulu.align 12
1391*91f16700Schasinglulufunc final_pwrdown
1392*91f16700Schasinglulu
1393*91f16700Schasinglulu	mov  x0, xzr
1394*91f16700Schasinglulu	b	touch_line_0
1395*91f16700Schasinglulustart_line_0:
1396*91f16700Schasinglulu	mov  x0, #1
1397*91f16700Schasinglulu	/* put ddr controller 1 into self-refresh */
1398*91f16700Schasinglulu	ldr  w8, [x4, #DDR_CFG_2_OFFSET]
1399*91f16700Schasinglulu	orr  w8, w8, #CFG_2_FORCE_REFRESH
1400*91f16700Schasinglulu	str  w8, [x4, #DDR_CFG_2_OFFSET]
1401*91f16700Schasinglulu
1402*91f16700Schasinglulu	/* put ddr controller 2 into self-refresh */
1403*91f16700Schasinglulu	ldr  w8, [x5, #DDR_CFG_2_OFFSET]
1404*91f16700Schasinglulu	orr  w8, w8, #CFG_2_FORCE_REFRESH
1405*91f16700Schasinglulu	str  w8, [x5, #DDR_CFG_2_OFFSET]
1406*91f16700Schasinglulu
1407*91f16700Schasinglulu	/* stop the clocks in both ddr controllers */
1408*91f16700Schasinglulu	mov  w10, #DEVDISR5_MASK_DDR
1409*91f16700Schasinglulu	mov  x16, #PMU_IPSTPCR4_OFFSET
1410*91f16700Schasinglulu	orr  w9,  w6, w10
1411*91f16700Schasinglulu	str  w9,  [x1, x16]
1412*91f16700Schasinglulu	isb
1413*91f16700Schasinglulu
1414*91f16700Schasinglulu	mov  x17, #PMU_IPSTPACKSR4_OFFSET
1415*91f16700Schasinglulutouch_line_0:
1416*91f16700Schasinglulu	cbz  x0, touch_line_1
1417*91f16700Schasinglulu
1418*91f16700Schasinglulustart_line_1:
1419*91f16700Schasinglulu	/* poll IPSTPACKSR4 until
1420*91f16700Schasinglulu	 * ddr controller clocks are stopped.
1421*91f16700Schasinglulu	 */
1422*91f16700Schasinglulu1:
1423*91f16700Schasinglulu	ldr  w8, [x1, x17]
1424*91f16700Schasinglulu	and  w8, w8, w10
1425*91f16700Schasinglulu	cmp  w8, w10
1426*91f16700Schasinglulu	b.ne 1b
1427*91f16700Schasinglulu
1428*91f16700Schasinglulu	/* shut down power to the ddr controllers */
1429*91f16700Schasinglulu	orr w9, w7, #DEVDISR5_MASK_DDR
1430*91f16700Schasinglulu	str w9, [x3, #DCFG_DEVDISR5_OFFSET]
1431*91f16700Schasinglulu
1432*91f16700Schasinglulu	/* disable cluster acp ports */
1433*91f16700Schasinglulu	mov  w8, #CLAINACT_DISABLE_ACP
1434*91f16700Schasinglulu	str  w8, [x1, x12]
1435*91f16700Schasinglulu
1436*91f16700Schasinglulu	/* disable skyros ports */
1437*91f16700Schasinglulu	mov  w9, #CLSINACT_DISABLE_SKY
1438*91f16700Schasinglulu	str  w9, [x1, x13]
1439*91f16700Schasinglulu	isb
1440*91f16700Schasinglulu
1441*91f16700Schasinglulutouch_line_1:
1442*91f16700Schasinglulu	cbz  x0, touch_line_2
1443*91f16700Schasinglulu
1444*91f16700Schasinglulustart_line_2:
1445*91f16700Schasinglulu	isb
1446*91f16700Schasinglulu3:
1447*91f16700Schasinglulu	wfi
1448*91f16700Schasinglulu
1449*91f16700Schasinglulu	/* if we are here then we are awake
1450*91f16700Schasinglulu	 * - bring this device back up
1451*91f16700Schasinglulu	 */
1452*91f16700Schasinglulu
1453*91f16700Schasinglulu	/* enable skyros ports */
1454*91f16700Schasinglulu	mov  w9, #CLSINACT_DISABLE_SKY
1455*91f16700Schasinglulu	str  w9, [x1, x15]
1456*91f16700Schasinglulu
1457*91f16700Schasinglulu	/* enable acp ports */
1458*91f16700Schasinglulu	mov  w8, #CLAINACT_DISABLE_ACP
1459*91f16700Schasinglulu	str  w8, [x1, x14]
1460*91f16700Schasinglulu	isb
1461*91f16700Schasinglulu
1462*91f16700Schasinglulu	/* bring up the ddr controllers */
1463*91f16700Schasinglulu	str w7, [x3, #DCFG_DEVDISR5_OFFSET]
1464*91f16700Schasinglulu	isb
1465*91f16700Schasinglulu	str w6,  [x1, x16]
1466*91f16700Schasinglulu	isb
1467*91f16700Schasinglulu
1468*91f16700Schasinglulu	nop
1469*91f16700Schasinglulutouch_line_2:
1470*91f16700Schasinglulu	cbz  x0, touch_line_3
1471*91f16700Schasinglulu
1472*91f16700Schasinglulustart_line_3:
1473*91f16700Schasinglulu	/* poll IPSTPACKSR4 until
1474*91f16700Schasinglulu	 * ddr controller clocks are running
1475*91f16700Schasinglulu	 */
1476*91f16700Schasinglulu	mov w10, #DEVDISR5_MASK_DDR
1477*91f16700Schasinglulu2:
1478*91f16700Schasinglulu	ldr  w8, [x1, x17]
1479*91f16700Schasinglulu	and  w8, w8, w10
1480*91f16700Schasinglulu	cbnz w8, 2b
1481*91f16700Schasinglulu
1482*91f16700Schasinglulu	/* take ddr controller 2 out of self-refresh */
1483*91f16700Schasinglulu	mov w8, #CFG_2_FORCE_REFRESH
1484*91f16700Schasinglulu	ldr w9, [x5, #DDR_CFG_2_OFFSET]
1485*91f16700Schasinglulu	bic w9, w9, w8
1486*91f16700Schasinglulu	str w9, [x5, #DDR_CFG_2_OFFSET]
1487*91f16700Schasinglulu
1488*91f16700Schasinglulu	/* take ddr controller 1 out of self-refresh */
1489*91f16700Schasinglulu	ldr w9, [x4, #DDR_CFG_2_OFFSET]
1490*91f16700Schasinglulu	bic w9, w9, w8
1491*91f16700Schasinglulu	str w9, [x4, #DDR_CFG_2_OFFSET]
1492*91f16700Schasinglulu	isb
1493*91f16700Schasinglulu
1494*91f16700Schasinglulu	nop
1495*91f16700Schasinglulu	nop
1496*91f16700Schasinglulu	nop
1497*91f16700Schasinglulutouch_line_3:
1498*91f16700Schasinglulu	cbz  x0, start_line_0
1499*91f16700Schasinglulu
1500*91f16700Schasinglulu	/* execute here after ddr is back up */
1501*91f16700Schasinglulu
1502*91f16700Schasinglulu	ret
1503*91f16700Schasingluluendfunc final_pwrdown
1504*91f16700Schasinglulu
1505*91f16700Schasinglulu/* Function returns CLUSTER_3_NORMAL if the cores of cluster 3 are
1506*91f16700Schasinglulu * to be handled normally, and it returns CLUSTER_3_IN_RESET if the cores
1507*91f16700Schasinglulu * are to be held in reset
1508*91f16700Schasinglulu * in:  none
1509*91f16700Schasinglulu * out: x0 = #CLUSTER_3_NORMAL,   cluster 3 treated normal
1510*91f16700Schasinglulu *	  x0 = #CLUSTER_3_IN_RESET, cluster 3 cores held in reset
1511*91f16700Schasinglulu * uses x0, x1, x2
1512*91f16700Schasinglulu */
1513*91f16700Schasinglulufunc cluster3InReset
1514*91f16700Schasinglulu
1515*91f16700Schasinglulu	/* default return is treat cores normal */
1516*91f16700Schasinglulu	mov  x0, #CLUSTER_3_NORMAL
1517*91f16700Schasinglulu
1518*91f16700Schasinglulu	/* read RCW_SR27 register */
1519*91f16700Schasinglulu	mov  x1, #NXP_DCFG_ADDR
1520*91f16700Schasinglulu	ldr  w2, [x1, #RCW_SR27_OFFSET]
1521*91f16700Schasinglulu
1522*91f16700Schasinglulu	/* test the cluster 3 bit */
1523*91f16700Schasinglulu	tst  w2, #CLUSTER_3_RCW_BIT
1524*91f16700Schasinglulu	b.eq 1f
1525*91f16700Schasinglulu
1526*91f16700Schasinglulu	/* if we are here, then the bit was set */
1527*91f16700Schasinglulu	mov  x0, #CLUSTER_3_IN_RESET
1528*91f16700Schasinglulu1:
1529*91f16700Schasinglulu	ret
1530*91f16700Schasingluluendfunc cluster3InReset
1531*91f16700Schasinglulu
1532*91f16700Schasinglulu
1533*91f16700Schasinglulu/* Function checks to see if cores which are to be disabled have been
1534*91f16700Schasinglulu * released from reset - if not, it releases them
1535*91f16700Schasinglulu * Note: there may be special handling of cluster 3 cores depending upon the
1536*91f16700Schasinglulu *	   sys clk frequency
1537*91f16700Schasinglulu * in:  none
1538*91f16700Schasinglulu * out: none
1539*91f16700Schasinglulu * uses x0, x1, x2, x3, x4, x5, x6, x7, x8, x9
1540*91f16700Schasinglulu */
1541*91f16700Schasinglulufunc release_disabled
1542*91f16700Schasinglulu	mov  x9, x30
1543*91f16700Schasinglulu
1544*91f16700Schasinglulu	/* check if we need to keep cluster 3 cores in reset */
1545*91f16700Schasinglulu	bl   cluster3InReset		/*  0-2  */
1546*91f16700Schasinglulu	mov  x8, x0
1547*91f16700Schasinglulu
1548*91f16700Schasinglulu	/* x8 = cluster 3 handling */
1549*91f16700Schasinglulu
1550*91f16700Schasinglulu	/* read COREDISABLESR */
1551*91f16700Schasinglulu	mov  x0, #NXP_DCFG_ADDR
1552*91f16700Schasinglulu	ldr  w4, [x0, #DCFG_COREDISABLEDSR_OFFSET]
1553*91f16700Schasinglulu	cmp  x8, #CLUSTER_3_IN_RESET
1554*91f16700Schasinglulu	b.ne 4f
1555*91f16700Schasinglulu
1556*91f16700Schasinglulu	/* the cluster 3 cores are to be held in reset, so remove
1557*91f16700Schasinglulu	 * them from the disable mask
1558*91f16700Schasinglulu	 */
1559*91f16700Schasinglulu	bic  x4, x4, #CLUSTER_3_CORES_MASK
1560*91f16700Schasinglulu4:
1561*91f16700Schasinglulu	/* get the number of cpus on this device */
1562*91f16700Schasinglulu	mov   x6, #PLATFORM_CORE_COUNT
1563*91f16700Schasinglulu
1564*91f16700Schasinglulu	mov  x0, #NXP_RESET_ADDR
1565*91f16700Schasinglulu	ldr  w5, [x0, #BRR_OFFSET]
1566*91f16700Schasinglulu
1567*91f16700Schasinglulu	/* load the core mask for the first core */
1568*91f16700Schasinglulu	mov  x7, #1
1569*91f16700Schasinglulu
1570*91f16700Schasinglulu	/* x4 = COREDISABLESR
1571*91f16700Schasinglulu	 * x5 = BRR
1572*91f16700Schasinglulu	 * x6 = loop count
1573*91f16700Schasinglulu	 * x7 = core mask bit
1574*91f16700Schasinglulu	 */
1575*91f16700Schasinglulu2:
1576*91f16700Schasinglulu	/* check if the core is to be disabled */
1577*91f16700Schasinglulu	tst  x4, x7
1578*91f16700Schasinglulu	b.eq 1f
1579*91f16700Schasinglulu
1580*91f16700Schasinglulu	/* see if disabled cores have already been released from reset */
1581*91f16700Schasinglulu	tst  x5, x7
1582*91f16700Schasinglulu	b.ne 5f
1583*91f16700Schasinglulu
1584*91f16700Schasinglulu	/* if core has not been released, then release it (0-3) */
1585*91f16700Schasinglulu	mov  x0, x7
1586*91f16700Schasinglulu	bl   _soc_core_release
1587*91f16700Schasinglulu
1588*91f16700Schasinglulu	/* record the core state in the data area (0-3) */
1589*91f16700Schasinglulu	mov  x0, x7
1590*91f16700Schasinglulu	mov  x1, #CORE_STATE_DATA
1591*91f16700Schasinglulu	mov  x2, #CORE_DISABLED
1592*91f16700Schasinglulu	bl   _setCoreData
1593*91f16700Schasinglulu
1594*91f16700Schasinglulu1:
1595*91f16700Schasinglulu	/* see if this is a cluster 3 core */
1596*91f16700Schasinglulu	mov   x3, #CLUSTER_3_CORES_MASK
1597*91f16700Schasinglulu	tst   x3, x7
1598*91f16700Schasinglulu	b.eq  5f
1599*91f16700Schasinglulu
1600*91f16700Schasinglulu	/* this is a cluster 3 core - see if it needs to be held in reset */
1601*91f16700Schasinglulu	cmp  x8, #CLUSTER_3_IN_RESET
1602*91f16700Schasinglulu	b.ne 5f
1603*91f16700Schasinglulu
1604*91f16700Schasinglulu	/* record the core state as disabled in the data area (0-3) */
1605*91f16700Schasinglulu	mov  x0, x7
1606*91f16700Schasinglulu	mov  x1, #CORE_STATE_DATA
1607*91f16700Schasinglulu	mov  x2, #CORE_DISABLED
1608*91f16700Schasinglulu	bl   _setCoreData
1609*91f16700Schasinglulu
1610*91f16700Schasinglulu5:
1611*91f16700Schasinglulu	/* decrement the counter */
1612*91f16700Schasinglulu	subs  x6, x6, #1
1613*91f16700Schasinglulu	b.le  3f
1614*91f16700Schasinglulu
1615*91f16700Schasinglulu	/* shift the core mask to the next core */
1616*91f16700Schasinglulu	lsl   x7, x7, #1
1617*91f16700Schasinglulu	/* continue */
1618*91f16700Schasinglulu	b	 2b
1619*91f16700Schasinglulu3:
1620*91f16700Schasinglulu	cmp  x8, #CLUSTER_3_IN_RESET
1621*91f16700Schasinglulu	b.ne 6f
1622*91f16700Schasinglulu
1623*91f16700Schasinglulu	/* we need to hold the cluster 3 cores in reset,
1624*91f16700Schasinglulu	 * so mark them in the COREDISR and COREDISABLEDSR registers as
1625*91f16700Schasinglulu	 * "disabled", and the rest of the sw stack will leave them alone
1626*91f16700Schasinglulu	 * thinking that they have been disabled
1627*91f16700Schasinglulu	 */
1628*91f16700Schasinglulu	mov  x0, #NXP_DCFG_ADDR
1629*91f16700Schasinglulu	ldr  w1, [x0, #DCFG_COREDISR_OFFSET]
1630*91f16700Schasinglulu	orr  w1, w1, #CLUSTER_3_CORES_MASK
1631*91f16700Schasinglulu	str  w1, [x0, #DCFG_COREDISR_OFFSET]
1632*91f16700Schasinglulu
1633*91f16700Schasinglulu	ldr  w2, [x0, #DCFG_COREDISABLEDSR_OFFSET]
1634*91f16700Schasinglulu	orr  w2, w2, #CLUSTER_3_CORES_MASK
1635*91f16700Schasinglulu	str  w2, [x0, #DCFG_COREDISABLEDSR_OFFSET]
1636*91f16700Schasinglulu	dsb  sy
1637*91f16700Schasinglulu	isb
1638*91f16700Schasinglulu
1639*91f16700Schasinglulu#if (PSCI_TEST)
1640*91f16700Schasinglulu	/* x0 = NXP_DCFG_ADDR : read COREDISABLESR */
1641*91f16700Schasinglulu	ldr  w4, [x0, #DCFG_COREDISABLEDSR_OFFSET]
1642*91f16700Schasinglulu	/* read COREDISR */
1643*91f16700Schasinglulu	ldr  w3, [x0, #DCFG_COREDISR_OFFSET]
1644*91f16700Schasinglulu#endif
1645*91f16700Schasinglulu
1646*91f16700Schasinglulu6:
1647*91f16700Schasinglulu	mov  x30, x9
1648*91f16700Schasinglulu	ret
1649*91f16700Schasinglulu
1650*91f16700Schasingluluendfunc release_disabled
1651*91f16700Schasinglulu
1652*91f16700Schasinglulu
1653*91f16700Schasinglulu/* Function setc up the TrustZone Address Space Controller (TZASC)
1654*91f16700Schasinglulu * in:  none
1655*91f16700Schasinglulu * out: none
1656*91f16700Schasinglulu * uses x0, x1
1657*91f16700Schasinglulu */
1658*91f16700Schasinglulufunc init_tzpc
1659*91f16700Schasinglulu
1660*91f16700Schasinglulu	/* set Non Secure access for all devices protected via TZPC */
1661*91f16700Schasinglulu
1662*91f16700Schasinglulu	/* decode Protection-0 Set Reg */
1663*91f16700Schasinglulu	ldr	x1, =TZPCDECPROT_0_SET_BASE
1664*91f16700Schasinglulu	/* set decode region to NS, Bits[7:0] */
1665*91f16700Schasinglulu	mov	w0, #0xFF
1666*91f16700Schasinglulu	str	w0, [x1]
1667*91f16700Schasinglulu
1668*91f16700Schasinglulu	/* decode Protection-1 Set Reg */
1669*91f16700Schasinglulu	ldr	x1, =TZPCDECPROT_1_SET_BASE
1670*91f16700Schasinglulu	/* set decode region to NS, Bits[7:0] */
1671*91f16700Schasinglulu	mov	w0, #0xFF
1672*91f16700Schasinglulu	str	w0, [x1]
1673*91f16700Schasinglulu
1674*91f16700Schasinglulu	/* decode Protection-2 Set Reg */
1675*91f16700Schasinglulu	ldr	x1, =TZPCDECPROT_2_SET_BASE
1676*91f16700Schasinglulu	/* set decode region to NS, Bits[7:0] */
1677*91f16700Schasinglulu	mov	w0, #0xFF
1678*91f16700Schasinglulu	str	w0, [x1]
1679*91f16700Schasinglulu
1680*91f16700Schasinglulu	/* entire SRAM as NS */
1681*91f16700Schasinglulu	/* secure RAM region size Reg */
1682*91f16700Schasinglulu	ldr	x1, =TZPC_BASE
1683*91f16700Schasinglulu	/* 0x00000000 = no secure region */
1684*91f16700Schasinglulu	mov	w0, #0x00000000
1685*91f16700Schasinglulu	str	w0, [x1]
1686*91f16700Schasinglulu
1687*91f16700Schasinglulu	ret
1688*91f16700Schasingluluendfunc init_tzpc
1689*91f16700Schasinglulu
1690*91f16700Schasinglulu/* write a register in the DCFG block
1691*91f16700Schasinglulu * in:  x0 = offset
1692*91f16700Schasinglulu * in:  w1 = value to write
1693*91f16700Schasinglulu * uses x0, x1, x2
1694*91f16700Schasinglulu */
1695*91f16700Schasinglulufunc _write_reg_dcfg
1696*91f16700Schasinglulu	ldr  x2, =NXP_DCFG_ADDR
1697*91f16700Schasinglulu	str  w1, [x2, x0]
1698*91f16700Schasinglulu	ret
1699*91f16700Schasingluluendfunc _write_reg_dcfg
1700*91f16700Schasinglulu
1701*91f16700Schasinglulu
1702*91f16700Schasinglulu/* read a register in the DCFG block
1703*91f16700Schasinglulu * in:  x0 = offset
1704*91f16700Schasinglulu * out: w0 = value read
1705*91f16700Schasinglulu * uses x0, x1, x2
1706*91f16700Schasinglulu */
1707*91f16700Schasinglulufunc _read_reg_dcfg
1708*91f16700Schasinglulu	ldr  x2, =NXP_DCFG_ADDR
1709*91f16700Schasinglulu	ldr  w1, [x2, x0]
1710*91f16700Schasinglulu	mov  w0, w1
1711*91f16700Schasinglulu	ret
1712*91f16700Schasingluluendfunc _read_reg_dcfg
1713*91f16700Schasinglulu
1714*91f16700Schasinglulu
1715*91f16700Schasinglulu/* Function returns an mpidr value for a core, given a core_mask_lsb
1716*91f16700Schasinglulu * in:  x0 = core mask lsb
1717*91f16700Schasinglulu * out: x0 = affinity2:affinity1:affinity0, where affinity is 8-bits
1718*91f16700Schasinglulu * uses x0, x1
1719*91f16700Schasinglulu */
1720*91f16700Schasinglulufunc get_mpidr_value
1721*91f16700Schasinglulu
1722*91f16700Schasinglulu	/* convert a core mask to an SoC core number */
1723*91f16700Schasinglulu	clz  w0, w0
1724*91f16700Schasinglulu	mov  w1, #31
1725*91f16700Schasinglulu	sub  w0, w1, w0
1726*91f16700Schasinglulu
1727*91f16700Schasinglulu	/* get the mpidr core number from the SoC core number */
1728*91f16700Schasinglulu	mov  w1, wzr
1729*91f16700Schasinglulu	tst  x0, #1
1730*91f16700Schasinglulu	b.eq 1f
1731*91f16700Schasinglulu	orr  w1, w1, #1
1732*91f16700Schasinglulu
1733*91f16700Schasinglulu1:
1734*91f16700Schasinglulu	/* extract the cluster number */
1735*91f16700Schasinglulu	lsr  w0, w0, #1
1736*91f16700Schasinglulu	orr  w0, w1, w0, lsl #8
1737*91f16700Schasinglulu
1738*91f16700Schasinglulu	ret
1739*91f16700Schasingluluendfunc get_mpidr_value
1740*91f16700Schasinglulu
1741*91f16700Schasinglulu
1742*91f16700Schasinglulu/* Function returns the redistributor base address for the core specified
1743*91f16700Schasinglulu * in x1
1744*91f16700Schasinglulu * in:  x0 - core mask lsb of specified core
1745*91f16700Schasinglulu * out: x0 = redistributor rd base address for specified core
1746*91f16700Schasinglulu * uses x0, x1, x2
1747*91f16700Schasinglulu */
1748*91f16700Schasinglulufunc get_gic_rd_base
1749*91f16700Schasinglulu	clz  w1, w0
1750*91f16700Schasinglulu	mov  w2, #0x20
1751*91f16700Schasinglulu	sub  w2, w2, w1
1752*91f16700Schasinglulu	sub  w2, w2, #1
1753*91f16700Schasinglulu
1754*91f16700Schasinglulu	ldr  x0, =NXP_GICR_ADDR
1755*91f16700Schasinglulu	mov  x1, #GIC_RD_OFFSET
1756*91f16700Schasinglulu
1757*91f16700Schasinglulu	/* x2 = core number
1758*91f16700Schasinglulu	 * loop counter
1759*91f16700Schasinglulu	 */
1760*91f16700Schasinglulu2:
1761*91f16700Schasinglulu	cbz  x2, 1f
1762*91f16700Schasinglulu	add  x0, x0, x1
1763*91f16700Schasinglulu	sub  x2, x2, #1
1764*91f16700Schasinglulu	b	2b
1765*91f16700Schasinglulu1:
1766*91f16700Schasinglulu	ret
1767*91f16700Schasingluluendfunc get_gic_rd_base
1768*91f16700Schasinglulu
1769*91f16700Schasinglulu
1770*91f16700Schasinglulu/* Function returns the redistributor base address for the core specified
1771*91f16700Schasinglulu * in x1
1772*91f16700Schasinglulu * in:  x0 - core mask lsb of specified core
1773*91f16700Schasinglulu * out: x0 = redistributor sgi base address for specified core
1774*91f16700Schasinglulu * uses x0, x1, x2
1775*91f16700Schasinglulu */
1776*91f16700Schasinglulufunc get_gic_sgi_base
1777*91f16700Schasinglulu	clz  w1, w0
1778*91f16700Schasinglulu	mov  w2, #0x20
1779*91f16700Schasinglulu	sub  w2, w2, w1
1780*91f16700Schasinglulu	sub  w2, w2, #1
1781*91f16700Schasinglulu
1782*91f16700Schasinglulu	ldr  x0, =NXP_GICR_SGI_ADDR
1783*91f16700Schasinglulu	mov  x1, #GIC_SGI_OFFSET
1784*91f16700Schasinglulu
1785*91f16700Schasinglulu	/* loop counter */
1786*91f16700Schasinglulu2:
1787*91f16700Schasinglulu	cbz  x2, 1f		/* x2 = core number */
1788*91f16700Schasinglulu	add  x0, x0, x1
1789*91f16700Schasinglulu	sub  x2, x2, #1
1790*91f16700Schasinglulu	b	2b
1791*91f16700Schasinglulu1:
1792*91f16700Schasinglulu	ret
1793*91f16700Schasingluluendfunc get_gic_sgi_base
1794*91f16700Schasinglulu
1795*91f16700Schasinglulu/* Function writes a register in the RESET block
1796*91f16700Schasinglulu * in:  x0 = offset
1797*91f16700Schasinglulu * in:  w1 = value to write
1798*91f16700Schasinglulu * uses x0, x1, x2
1799*91f16700Schasinglulu */
1800*91f16700Schasinglulufunc _write_reg_reset
1801*91f16700Schasinglulu	ldr  x2, =NXP_RESET_ADDR
1802*91f16700Schasinglulu	str  w1, [x2, x0]
1803*91f16700Schasinglulu	ret
1804*91f16700Schasingluluendfunc _write_reg_reset
1805*91f16700Schasinglulu
1806*91f16700Schasinglulu
1807*91f16700Schasinglulu/* Function reads a register in the RESET block
1808*91f16700Schasinglulu * in:  x0 = offset
1809*91f16700Schasinglulu * out: w0 = value read
1810*91f16700Schasinglulu * uses x0, x1
1811*91f16700Schasinglulu */
1812*91f16700Schasinglulufunc _read_reg_reset
1813*91f16700Schasinglulu	ldr  x1, =NXP_RESET_ADDR
1814*91f16700Schasinglulu	ldr  w0, [x1, x0]
1815*91f16700Schasinglulu	ret
1816*91f16700Schasingluluendfunc _read_reg_reset
1817