xref: /arm-trusted-firmware/lib/psci/psci_private.h (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu /*
2*91f16700Schasinglulu  * Copyright (c) 2013-2023, Arm Limited and Contributors. All rights reserved.
3*91f16700Schasinglulu  *
4*91f16700Schasinglulu  * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu  */
6*91f16700Schasinglulu 
7*91f16700Schasinglulu #ifndef PSCI_PRIVATE_H
8*91f16700Schasinglulu #define PSCI_PRIVATE_H
9*91f16700Schasinglulu 
10*91f16700Schasinglulu #include <stdbool.h>
11*91f16700Schasinglulu 
12*91f16700Schasinglulu #include <arch.h>
13*91f16700Schasinglulu #include <arch_helpers.h>
14*91f16700Schasinglulu #include <common/bl_common.h>
15*91f16700Schasinglulu #include <lib/bakery_lock.h>
16*91f16700Schasinglulu #include <lib/el3_runtime/cpu_data.h>
17*91f16700Schasinglulu #include <lib/psci/psci.h>
18*91f16700Schasinglulu #include <lib/spinlock.h>
19*91f16700Schasinglulu 
20*91f16700Schasinglulu /*
21*91f16700Schasinglulu  * The PSCI capability which are provided by the generic code but does not
22*91f16700Schasinglulu  * depend on the platform or spd capabilities.
23*91f16700Schasinglulu  */
24*91f16700Schasinglulu #define PSCI_GENERIC_CAP	\
25*91f16700Schasinglulu 			(define_psci_cap(PSCI_VERSION) |		\
26*91f16700Schasinglulu 			define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) |	\
27*91f16700Schasinglulu 			define_psci_cap(PSCI_FEATURES))
28*91f16700Schasinglulu 
29*91f16700Schasinglulu /*
30*91f16700Schasinglulu  * The PSCI capabilities mask for 64 bit functions.
31*91f16700Schasinglulu  */
32*91f16700Schasinglulu #define PSCI_CAP_64BIT_MASK	\
33*91f16700Schasinglulu 			(define_psci_cap(PSCI_CPU_SUSPEND_AARCH64) |	\
34*91f16700Schasinglulu 			define_psci_cap(PSCI_CPU_ON_AARCH64) |		\
35*91f16700Schasinglulu 			define_psci_cap(PSCI_AFFINITY_INFO_AARCH64) |	\
36*91f16700Schasinglulu 			define_psci_cap(PSCI_MIG_AARCH64) |		\
37*91f16700Schasinglulu 			define_psci_cap(PSCI_MIG_INFO_UP_CPU_AARCH64) |	\
38*91f16700Schasinglulu 			define_psci_cap(PSCI_NODE_HW_STATE_AARCH64) |	\
39*91f16700Schasinglulu 			define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64) |	\
40*91f16700Schasinglulu 			define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64) |	\
41*91f16700Schasinglulu 			define_psci_cap(PSCI_STAT_COUNT_AARCH64) |	\
42*91f16700Schasinglulu 			define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64) |	\
43*91f16700Schasinglulu 			define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64))
44*91f16700Schasinglulu 
45*91f16700Schasinglulu /* Internally PSCI uses a uint16_t for various cpu indexes so
46*91f16700Schasinglulu  * define a limit to number of CPUs that can be initialised.
47*91f16700Schasinglulu  */
48*91f16700Schasinglulu #define PSCI_MAX_CPUS_INDEX	0xFFFFU
49*91f16700Schasinglulu 
50*91f16700Schasinglulu /* Invalid parent */
51*91f16700Schasinglulu #define PSCI_PARENT_NODE_INVALID	0xFFFFFFFFU
52*91f16700Schasinglulu 
53*91f16700Schasinglulu /*
54*91f16700Schasinglulu  * Helper functions to get/set the fields of PSCI per-cpu data.
55*91f16700Schasinglulu  */
56*91f16700Schasinglulu static inline void psci_set_aff_info_state(aff_info_state_t aff_state)
57*91f16700Schasinglulu {
58*91f16700Schasinglulu 	set_cpu_data(psci_svc_cpu_data.aff_info_state, aff_state);
59*91f16700Schasinglulu }
60*91f16700Schasinglulu 
61*91f16700Schasinglulu static inline aff_info_state_t psci_get_aff_info_state(void)
62*91f16700Schasinglulu {
63*91f16700Schasinglulu 	return get_cpu_data(psci_svc_cpu_data.aff_info_state);
64*91f16700Schasinglulu }
65*91f16700Schasinglulu 
66*91f16700Schasinglulu static inline aff_info_state_t psci_get_aff_info_state_by_idx(unsigned int idx)
67*91f16700Schasinglulu {
68*91f16700Schasinglulu 	return get_cpu_data_by_index(idx,
69*91f16700Schasinglulu 				     psci_svc_cpu_data.aff_info_state);
70*91f16700Schasinglulu }
71*91f16700Schasinglulu 
72*91f16700Schasinglulu static inline void psci_set_aff_info_state_by_idx(unsigned int idx,
73*91f16700Schasinglulu 						  aff_info_state_t aff_state)
74*91f16700Schasinglulu {
75*91f16700Schasinglulu 	set_cpu_data_by_index(idx,
76*91f16700Schasinglulu 			      psci_svc_cpu_data.aff_info_state, aff_state);
77*91f16700Schasinglulu }
78*91f16700Schasinglulu 
79*91f16700Schasinglulu static inline unsigned int psci_get_suspend_pwrlvl(void)
80*91f16700Schasinglulu {
81*91f16700Schasinglulu 	return get_cpu_data(psci_svc_cpu_data.target_pwrlvl);
82*91f16700Schasinglulu }
83*91f16700Schasinglulu 
84*91f16700Schasinglulu static inline void psci_set_suspend_pwrlvl(unsigned int target_lvl)
85*91f16700Schasinglulu {
86*91f16700Schasinglulu 	set_cpu_data(psci_svc_cpu_data.target_pwrlvl, target_lvl);
87*91f16700Schasinglulu }
88*91f16700Schasinglulu 
89*91f16700Schasinglulu static inline void psci_set_cpu_local_state(plat_local_state_t state)
90*91f16700Schasinglulu {
91*91f16700Schasinglulu 	set_cpu_data(psci_svc_cpu_data.local_state, state);
92*91f16700Schasinglulu }
93*91f16700Schasinglulu 
94*91f16700Schasinglulu static inline plat_local_state_t psci_get_cpu_local_state(void)
95*91f16700Schasinglulu {
96*91f16700Schasinglulu 	return get_cpu_data(psci_svc_cpu_data.local_state);
97*91f16700Schasinglulu }
98*91f16700Schasinglulu 
99*91f16700Schasinglulu static inline plat_local_state_t psci_get_cpu_local_state_by_idx(
100*91f16700Schasinglulu 		unsigned int idx)
101*91f16700Schasinglulu {
102*91f16700Schasinglulu 	return get_cpu_data_by_index(idx,
103*91f16700Schasinglulu 				     psci_svc_cpu_data.local_state);
104*91f16700Schasinglulu }
105*91f16700Schasinglulu 
106*91f16700Schasinglulu /* Helper function to identify a CPU standby request in PSCI Suspend call */
107*91f16700Schasinglulu static inline bool is_cpu_standby_req(unsigned int is_power_down_state,
108*91f16700Schasinglulu 				      unsigned int retn_lvl)
109*91f16700Schasinglulu {
110*91f16700Schasinglulu 	return (is_power_down_state == 0U) && (retn_lvl == 0U);
111*91f16700Schasinglulu }
112*91f16700Schasinglulu 
113*91f16700Schasinglulu /*******************************************************************************
114*91f16700Schasinglulu  * The following two data structures implement the power domain tree. The tree
115*91f16700Schasinglulu  * is used to track the state of all the nodes i.e. power domain instances
116*91f16700Schasinglulu  * described by the platform. The tree consists of nodes that describe CPU power
117*91f16700Schasinglulu  * domains i.e. leaf nodes and all other power domains which are parents of a
118*91f16700Schasinglulu  * CPU power domain i.e. non-leaf nodes.
119*91f16700Schasinglulu  ******************************************************************************/
120*91f16700Schasinglulu typedef struct non_cpu_pwr_domain_node {
121*91f16700Schasinglulu 	/*
122*91f16700Schasinglulu 	 * Index of the first CPU power domain node level 0 which has this node
123*91f16700Schasinglulu 	 * as its parent.
124*91f16700Schasinglulu 	 */
125*91f16700Schasinglulu 	unsigned int cpu_start_idx;
126*91f16700Schasinglulu 
127*91f16700Schasinglulu 	/*
128*91f16700Schasinglulu 	 * Number of CPU power domains which are siblings of the domain indexed
129*91f16700Schasinglulu 	 * by 'cpu_start_idx' i.e. all the domains in the range 'cpu_start_idx
130*91f16700Schasinglulu 	 * -> cpu_start_idx + ncpus' have this node as their parent.
131*91f16700Schasinglulu 	 */
132*91f16700Schasinglulu 	unsigned int ncpus;
133*91f16700Schasinglulu 
134*91f16700Schasinglulu 	/*
135*91f16700Schasinglulu 	 * Index of the parent power domain node.
136*91f16700Schasinglulu 	 * TODO: Figure out whether to whether using pointer is more efficient.
137*91f16700Schasinglulu 	 */
138*91f16700Schasinglulu 	unsigned int parent_node;
139*91f16700Schasinglulu 
140*91f16700Schasinglulu 	plat_local_state_t local_state;
141*91f16700Schasinglulu 
142*91f16700Schasinglulu 	unsigned char level;
143*91f16700Schasinglulu 
144*91f16700Schasinglulu 	/* For indexing the psci_lock array*/
145*91f16700Schasinglulu 	uint16_t lock_index;
146*91f16700Schasinglulu } non_cpu_pd_node_t;
147*91f16700Schasinglulu 
148*91f16700Schasinglulu typedef struct cpu_pwr_domain_node {
149*91f16700Schasinglulu 	u_register_t mpidr;
150*91f16700Schasinglulu 
151*91f16700Schasinglulu 	/*
152*91f16700Schasinglulu 	 * Index of the parent power domain node.
153*91f16700Schasinglulu 	 * TODO: Figure out whether to whether using pointer is more efficient.
154*91f16700Schasinglulu 	 */
155*91f16700Schasinglulu 	unsigned int parent_node;
156*91f16700Schasinglulu 
157*91f16700Schasinglulu 	/*
158*91f16700Schasinglulu 	 * A CPU power domain does not require state coordination like its
159*91f16700Schasinglulu 	 * parent power domains. Hence this node does not include a bakery
160*91f16700Schasinglulu 	 * lock. A spinlock is required by the CPU_ON handler to prevent a race
161*91f16700Schasinglulu 	 * when multiple CPUs try to turn ON the same target CPU.
162*91f16700Schasinglulu 	 */
163*91f16700Schasinglulu 	spinlock_t cpu_lock;
164*91f16700Schasinglulu } cpu_pd_node_t;
165*91f16700Schasinglulu 
166*91f16700Schasinglulu #if PSCI_OS_INIT_MODE
167*91f16700Schasinglulu /*******************************************************************************
168*91f16700Schasinglulu  * The supported power state coordination modes that can be used in CPU_SUSPEND.
169*91f16700Schasinglulu  ******************************************************************************/
170*91f16700Schasinglulu typedef enum suspend_mode {
171*91f16700Schasinglulu 	PLAT_COORD = 0,
172*91f16700Schasinglulu 	OS_INIT = 1
173*91f16700Schasinglulu } suspend_mode_t;
174*91f16700Schasinglulu #endif
175*91f16700Schasinglulu 
176*91f16700Schasinglulu /*******************************************************************************
177*91f16700Schasinglulu  * The following are helpers and declarations of locks.
178*91f16700Schasinglulu  ******************************************************************************/
179*91f16700Schasinglulu #if HW_ASSISTED_COHERENCY
180*91f16700Schasinglulu /*
181*91f16700Schasinglulu  * On systems where participant CPUs are cache-coherent, we can use spinlocks
182*91f16700Schasinglulu  * instead of bakery locks.
183*91f16700Schasinglulu  */
184*91f16700Schasinglulu #define DEFINE_PSCI_LOCK(_name)		spinlock_t _name
185*91f16700Schasinglulu #define DECLARE_PSCI_LOCK(_name)	extern DEFINE_PSCI_LOCK(_name)
186*91f16700Schasinglulu 
187*91f16700Schasinglulu /* One lock is required per non-CPU power domain node */
188*91f16700Schasinglulu DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
189*91f16700Schasinglulu 
190*91f16700Schasinglulu /*
191*91f16700Schasinglulu  * On systems with hardware-assisted coherency, make PSCI cache operations NOP,
192*91f16700Schasinglulu  * as PSCI participants are cache-coherent, and there's no need for explicit
193*91f16700Schasinglulu  * cache maintenance operations or barriers to coordinate their state.
194*91f16700Schasinglulu  */
195*91f16700Schasinglulu static inline void psci_flush_dcache_range(uintptr_t __unused addr,
196*91f16700Schasinglulu 					   size_t __unused size)
197*91f16700Schasinglulu {
198*91f16700Schasinglulu 	/* Empty */
199*91f16700Schasinglulu }
200*91f16700Schasinglulu 
201*91f16700Schasinglulu #define psci_flush_cpu_data(member)
202*91f16700Schasinglulu #define psci_inv_cpu_data(member)
203*91f16700Schasinglulu 
204*91f16700Schasinglulu static inline void psci_dsbish(void)
205*91f16700Schasinglulu {
206*91f16700Schasinglulu 	/* Empty */
207*91f16700Schasinglulu }
208*91f16700Schasinglulu 
209*91f16700Schasinglulu static inline void psci_lock_get(non_cpu_pd_node_t *non_cpu_pd_node)
210*91f16700Schasinglulu {
211*91f16700Schasinglulu 	spin_lock(&psci_locks[non_cpu_pd_node->lock_index]);
212*91f16700Schasinglulu }
213*91f16700Schasinglulu 
214*91f16700Schasinglulu static inline void psci_lock_release(non_cpu_pd_node_t *non_cpu_pd_node)
215*91f16700Schasinglulu {
216*91f16700Schasinglulu 	spin_unlock(&psci_locks[non_cpu_pd_node->lock_index]);
217*91f16700Schasinglulu }
218*91f16700Schasinglulu 
219*91f16700Schasinglulu #else /* if HW_ASSISTED_COHERENCY == 0 */
220*91f16700Schasinglulu /*
221*91f16700Schasinglulu  * Use bakery locks for state coordination as not all PSCI participants are
222*91f16700Schasinglulu  * cache coherent.
223*91f16700Schasinglulu  */
224*91f16700Schasinglulu #define DEFINE_PSCI_LOCK(_name)		DEFINE_BAKERY_LOCK(_name)
225*91f16700Schasinglulu #define DECLARE_PSCI_LOCK(_name)	DECLARE_BAKERY_LOCK(_name)
226*91f16700Schasinglulu 
227*91f16700Schasinglulu /* One lock is required per non-CPU power domain node */
228*91f16700Schasinglulu DECLARE_PSCI_LOCK(psci_locks[PSCI_NUM_NON_CPU_PWR_DOMAINS]);
229*91f16700Schasinglulu 
230*91f16700Schasinglulu /*
231*91f16700Schasinglulu  * If not all PSCI participants are cache-coherent, perform cache maintenance
232*91f16700Schasinglulu  * and issue barriers wherever required to coordinate state.
233*91f16700Schasinglulu  */
234*91f16700Schasinglulu static inline void psci_flush_dcache_range(uintptr_t addr, size_t size)
235*91f16700Schasinglulu {
236*91f16700Schasinglulu 	flush_dcache_range(addr, size);
237*91f16700Schasinglulu }
238*91f16700Schasinglulu 
239*91f16700Schasinglulu #define psci_flush_cpu_data(member)		flush_cpu_data(member)
240*91f16700Schasinglulu #define psci_inv_cpu_data(member)		inv_cpu_data(member)
241*91f16700Schasinglulu 
242*91f16700Schasinglulu static inline void psci_dsbish(void)
243*91f16700Schasinglulu {
244*91f16700Schasinglulu 	dsbish();
245*91f16700Schasinglulu }
246*91f16700Schasinglulu 
247*91f16700Schasinglulu static inline void psci_lock_get(non_cpu_pd_node_t *non_cpu_pd_node)
248*91f16700Schasinglulu {
249*91f16700Schasinglulu 	bakery_lock_get(&psci_locks[non_cpu_pd_node->lock_index]);
250*91f16700Schasinglulu }
251*91f16700Schasinglulu 
252*91f16700Schasinglulu static inline void psci_lock_release(non_cpu_pd_node_t *non_cpu_pd_node)
253*91f16700Schasinglulu {
254*91f16700Schasinglulu 	bakery_lock_release(&psci_locks[non_cpu_pd_node->lock_index]);
255*91f16700Schasinglulu }
256*91f16700Schasinglulu 
257*91f16700Schasinglulu #endif /* HW_ASSISTED_COHERENCY */
258*91f16700Schasinglulu 
259*91f16700Schasinglulu static inline void psci_lock_init(non_cpu_pd_node_t *non_cpu_pd_node,
260*91f16700Schasinglulu 				  uint16_t idx)
261*91f16700Schasinglulu {
262*91f16700Schasinglulu 	non_cpu_pd_node[idx].lock_index = idx;
263*91f16700Schasinglulu }
264*91f16700Schasinglulu 
265*91f16700Schasinglulu /*******************************************************************************
266*91f16700Schasinglulu  * Data prototypes
267*91f16700Schasinglulu  ******************************************************************************/
268*91f16700Schasinglulu extern const plat_psci_ops_t *psci_plat_pm_ops;
269*91f16700Schasinglulu extern non_cpu_pd_node_t psci_non_cpu_pd_nodes[PSCI_NUM_NON_CPU_PWR_DOMAINS];
270*91f16700Schasinglulu extern cpu_pd_node_t psci_cpu_pd_nodes[PLATFORM_CORE_COUNT];
271*91f16700Schasinglulu extern unsigned int psci_caps;
272*91f16700Schasinglulu extern unsigned int psci_plat_core_count;
273*91f16700Schasinglulu #if PSCI_OS_INIT_MODE
274*91f16700Schasinglulu extern suspend_mode_t psci_suspend_mode;
275*91f16700Schasinglulu #endif
276*91f16700Schasinglulu 
277*91f16700Schasinglulu /*******************************************************************************
278*91f16700Schasinglulu  * SPD's power management hooks registered with PSCI
279*91f16700Schasinglulu  ******************************************************************************/
280*91f16700Schasinglulu extern const spd_pm_ops_t *psci_spd_pm;
281*91f16700Schasinglulu 
282*91f16700Schasinglulu /*******************************************************************************
283*91f16700Schasinglulu  * Function prototypes
284*91f16700Schasinglulu  ******************************************************************************/
285*91f16700Schasinglulu /* Private exported functions from psci_common.c */
286*91f16700Schasinglulu int psci_validate_power_state(unsigned int power_state,
287*91f16700Schasinglulu 			      psci_power_state_t *state_info);
288*91f16700Schasinglulu void psci_query_sys_suspend_pwrstate(psci_power_state_t *state_info);
289*91f16700Schasinglulu void psci_init_req_local_pwr_states(void);
290*91f16700Schasinglulu #if PSCI_OS_INIT_MODE
291*91f16700Schasinglulu void psci_update_req_local_pwr_states(unsigned int end_pwrlvl,
292*91f16700Schasinglulu 				      unsigned int cpu_idx,
293*91f16700Schasinglulu 				      psci_power_state_t *state_info,
294*91f16700Schasinglulu 				      plat_local_state_t *prev);
295*91f16700Schasinglulu void psci_restore_req_local_pwr_states(unsigned int cpu_idx,
296*91f16700Schasinglulu 				       plat_local_state_t *prev);
297*91f16700Schasinglulu #endif
298*91f16700Schasinglulu void psci_get_target_local_pwr_states(unsigned int end_pwrlvl,
299*91f16700Schasinglulu 				      psci_power_state_t *target_state);
300*91f16700Schasinglulu void psci_set_target_local_pwr_states(unsigned int end_pwrlvl,
301*91f16700Schasinglulu 				      const psci_power_state_t *target_state);
302*91f16700Schasinglulu int psci_validate_entry_point(entry_point_info_t *ep,
303*91f16700Schasinglulu 			uintptr_t entrypoint, u_register_t context_id);
304*91f16700Schasinglulu void psci_get_parent_pwr_domain_nodes(unsigned int cpu_idx,
305*91f16700Schasinglulu 				      unsigned int end_lvl,
306*91f16700Schasinglulu 				      unsigned int *node_index);
307*91f16700Schasinglulu void psci_do_state_coordination(unsigned int end_pwrlvl,
308*91f16700Schasinglulu 				psci_power_state_t *state_info);
309*91f16700Schasinglulu #if PSCI_OS_INIT_MODE
310*91f16700Schasinglulu int psci_validate_state_coordination(unsigned int end_pwrlvl,
311*91f16700Schasinglulu 				     psci_power_state_t *state_info);
312*91f16700Schasinglulu #endif
313*91f16700Schasinglulu void psci_acquire_pwr_domain_locks(unsigned int end_pwrlvl,
314*91f16700Schasinglulu 				   const unsigned int *parent_nodes);
315*91f16700Schasinglulu void psci_release_pwr_domain_locks(unsigned int end_pwrlvl,
316*91f16700Schasinglulu 				   const unsigned int *parent_nodes);
317*91f16700Schasinglulu int psci_validate_suspend_req(const psci_power_state_t *state_info,
318*91f16700Schasinglulu 			      unsigned int is_power_down_state);
319*91f16700Schasinglulu unsigned int psci_find_max_off_lvl(const psci_power_state_t *state_info);
320*91f16700Schasinglulu unsigned int psci_find_target_suspend_lvl(const psci_power_state_t *state_info);
321*91f16700Schasinglulu void psci_set_pwr_domains_to_run(unsigned int end_pwrlvl);
322*91f16700Schasinglulu void psci_print_power_domain_map(void);
323*91f16700Schasinglulu bool psci_is_last_on_cpu(void);
324*91f16700Schasinglulu int psci_spd_migrate_info(u_register_t *mpidr);
325*91f16700Schasinglulu 
326*91f16700Schasinglulu /*
327*91f16700Schasinglulu  * CPU power down is directly called only when HW_ASSISTED_COHERENCY is
328*91f16700Schasinglulu  * available. Otherwise, this needs post-call stack maintenance, which is
329*91f16700Schasinglulu  * handled in assembly.
330*91f16700Schasinglulu  */
331*91f16700Schasinglulu void prepare_cpu_pwr_dwn(unsigned int power_level);
332*91f16700Schasinglulu 
333*91f16700Schasinglulu /* This function applies various CPU errata during power down. */
334*91f16700Schasinglulu void apply_cpu_pwr_dwn_errata(void);
335*91f16700Schasinglulu 
336*91f16700Schasinglulu /* Private exported functions from psci_on.c */
337*91f16700Schasinglulu int psci_cpu_on_start(u_register_t target_cpu,
338*91f16700Schasinglulu 		      const entry_point_info_t *ep);
339*91f16700Schasinglulu 
340*91f16700Schasinglulu void psci_cpu_on_finish(unsigned int cpu_idx, const psci_power_state_t *state_info);
341*91f16700Schasinglulu 
342*91f16700Schasinglulu /* Private exported functions from psci_off.c */
343*91f16700Schasinglulu int psci_do_cpu_off(unsigned int end_pwrlvl);
344*91f16700Schasinglulu 
345*91f16700Schasinglulu /* Private exported functions from psci_suspend.c */
346*91f16700Schasinglulu int psci_cpu_suspend_start(const entry_point_info_t *ep,
347*91f16700Schasinglulu 			   unsigned int end_pwrlvl,
348*91f16700Schasinglulu 			   psci_power_state_t *state_info,
349*91f16700Schasinglulu 			   unsigned int is_power_down_state);
350*91f16700Schasinglulu 
351*91f16700Schasinglulu void psci_cpu_suspend_finish(unsigned int cpu_idx, const psci_power_state_t *state_info);
352*91f16700Schasinglulu 
353*91f16700Schasinglulu /* Private exported functions from psci_helpers.S */
354*91f16700Schasinglulu void psci_do_pwrdown_cache_maintenance(unsigned int pwr_level);
355*91f16700Schasinglulu void psci_do_pwrup_cache_maintenance(void);
356*91f16700Schasinglulu 
357*91f16700Schasinglulu /* Private exported functions from psci_system_off.c */
358*91f16700Schasinglulu void __dead2 psci_system_off(void);
359*91f16700Schasinglulu void __dead2 psci_system_reset(void);
360*91f16700Schasinglulu u_register_t psci_system_reset2(uint32_t reset_type, u_register_t cookie);
361*91f16700Schasinglulu 
362*91f16700Schasinglulu /* Private exported functions from psci_stat.c */
363*91f16700Schasinglulu void psci_stats_update_pwr_down(unsigned int end_pwrlvl,
364*91f16700Schasinglulu 			const psci_power_state_t *state_info);
365*91f16700Schasinglulu void psci_stats_update_pwr_up(unsigned int end_pwrlvl,
366*91f16700Schasinglulu 			const psci_power_state_t *state_info);
367*91f16700Schasinglulu u_register_t psci_stat_residency(u_register_t target_cpu,
368*91f16700Schasinglulu 			unsigned int power_state);
369*91f16700Schasinglulu u_register_t psci_stat_count(u_register_t target_cpu,
370*91f16700Schasinglulu 			unsigned int power_state);
371*91f16700Schasinglulu 
372*91f16700Schasinglulu /* Private exported functions from psci_mem_protect.c */
373*91f16700Schasinglulu u_register_t psci_mem_protect(unsigned int enable);
374*91f16700Schasinglulu u_register_t psci_mem_chk_range(uintptr_t base, u_register_t length);
375*91f16700Schasinglulu 
376*91f16700Schasinglulu #endif /* PSCI_PRIVATE_H */
377