xref: /arm-trusted-firmware/lib/psci/psci_setup.c (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu /*
2*91f16700Schasinglulu  * Copyright (c) 2013-2020, Arm Limited and Contributors. All rights reserved.
3*91f16700Schasinglulu  *
4*91f16700Schasinglulu  * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu  */
6*91f16700Schasinglulu 
7*91f16700Schasinglulu #include <assert.h>
8*91f16700Schasinglulu #include <stddef.h>
9*91f16700Schasinglulu 
10*91f16700Schasinglulu #include <arch.h>
11*91f16700Schasinglulu #include <arch_helpers.h>
12*91f16700Schasinglulu #include <common/bl_common.h>
13*91f16700Schasinglulu #include <context.h>
14*91f16700Schasinglulu #include <lib/cpus/errata.h>
15*91f16700Schasinglulu #include <lib/el3_runtime/context_mgmt.h>
16*91f16700Schasinglulu #include <plat/common/platform.h>
17*91f16700Schasinglulu 
18*91f16700Schasinglulu #include "psci_private.h"
19*91f16700Schasinglulu 
20*91f16700Schasinglulu /*
21*91f16700Schasinglulu  * Check that PLATFORM_CORE_COUNT fits into the number of cores
22*91f16700Schasinglulu  * that can be represented by PSCI_MAX_CPUS_INDEX.
23*91f16700Schasinglulu  */
24*91f16700Schasinglulu CASSERT(PLATFORM_CORE_COUNT <= (PSCI_MAX_CPUS_INDEX + 1U), assert_psci_cores_overflow);
25*91f16700Schasinglulu 
26*91f16700Schasinglulu /*******************************************************************************
27*91f16700Schasinglulu  * Per cpu non-secure contexts used to program the architectural state prior
28*91f16700Schasinglulu  * return to the normal world.
29*91f16700Schasinglulu  * TODO: Use the memory allocator to set aside memory for the contexts instead
30*91f16700Schasinglulu  * of relying on platform defined constants.
31*91f16700Schasinglulu  ******************************************************************************/
32*91f16700Schasinglulu static cpu_context_t psci_ns_context[PLATFORM_CORE_COUNT];
33*91f16700Schasinglulu 
34*91f16700Schasinglulu /******************************************************************************
35*91f16700Schasinglulu  * Define the psci capability variable.
36*91f16700Schasinglulu  *****************************************************************************/
37*91f16700Schasinglulu unsigned int psci_caps;
38*91f16700Schasinglulu 
39*91f16700Schasinglulu /*******************************************************************************
40*91f16700Schasinglulu  * Function which initializes the 'psci_non_cpu_pd_nodes' or the
41*91f16700Schasinglulu  * 'psci_cpu_pd_nodes' corresponding to the power level.
42*91f16700Schasinglulu  ******************************************************************************/
43*91f16700Schasinglulu static void __init psci_init_pwr_domain_node(uint16_t node_idx,
44*91f16700Schasinglulu 					unsigned int parent_idx,
45*91f16700Schasinglulu 					unsigned char level)
46*91f16700Schasinglulu {
47*91f16700Schasinglulu 	if (level > PSCI_CPU_PWR_LVL) {
48*91f16700Schasinglulu 		assert(node_idx < PSCI_NUM_NON_CPU_PWR_DOMAINS);
49*91f16700Schasinglulu 
50*91f16700Schasinglulu 		psci_non_cpu_pd_nodes[node_idx].level = level;
51*91f16700Schasinglulu 		psci_lock_init(psci_non_cpu_pd_nodes, node_idx);
52*91f16700Schasinglulu 		psci_non_cpu_pd_nodes[node_idx].parent_node = parent_idx;
53*91f16700Schasinglulu 		psci_non_cpu_pd_nodes[node_idx].local_state =
54*91f16700Schasinglulu 							 PLAT_MAX_OFF_STATE;
55*91f16700Schasinglulu 	} else {
56*91f16700Schasinglulu 		psci_cpu_data_t *svc_cpu_data;
57*91f16700Schasinglulu 
58*91f16700Schasinglulu 		assert(node_idx < PLATFORM_CORE_COUNT);
59*91f16700Schasinglulu 
60*91f16700Schasinglulu 		psci_cpu_pd_nodes[node_idx].parent_node = parent_idx;
61*91f16700Schasinglulu 
62*91f16700Schasinglulu 		/* Initialize with an invalid mpidr */
63*91f16700Schasinglulu 		psci_cpu_pd_nodes[node_idx].mpidr = PSCI_INVALID_MPIDR;
64*91f16700Schasinglulu 
65*91f16700Schasinglulu 		svc_cpu_data =
66*91f16700Schasinglulu 			&(_cpu_data_by_index(node_idx)->psci_svc_cpu_data);
67*91f16700Schasinglulu 
68*91f16700Schasinglulu 		/* Set the Affinity Info for the cores as OFF */
69*91f16700Schasinglulu 		svc_cpu_data->aff_info_state = AFF_STATE_OFF;
70*91f16700Schasinglulu 
71*91f16700Schasinglulu 		/* Invalidate the suspend level for the cpu */
72*91f16700Schasinglulu 		svc_cpu_data->target_pwrlvl = PSCI_INVALID_PWR_LVL;
73*91f16700Schasinglulu 
74*91f16700Schasinglulu 		/* Set the power state to OFF state */
75*91f16700Schasinglulu 		svc_cpu_data->local_state = PLAT_MAX_OFF_STATE;
76*91f16700Schasinglulu 
77*91f16700Schasinglulu 		psci_flush_dcache_range((uintptr_t)svc_cpu_data,
78*91f16700Schasinglulu 						 sizeof(*svc_cpu_data));
79*91f16700Schasinglulu 
80*91f16700Schasinglulu 		cm_set_context_by_index(node_idx,
81*91f16700Schasinglulu 					(void *) &psci_ns_context[node_idx],
82*91f16700Schasinglulu 					NON_SECURE);
83*91f16700Schasinglulu 	}
84*91f16700Schasinglulu }
85*91f16700Schasinglulu 
86*91f16700Schasinglulu /*******************************************************************************
87*91f16700Schasinglulu  * This functions updates cpu_start_idx and ncpus field for each of the node in
88*91f16700Schasinglulu  * psci_non_cpu_pd_nodes[]. It does so by comparing the parent nodes of each of
89*91f16700Schasinglulu  * the CPUs and check whether they match with the parent of the previous
90*91f16700Schasinglulu  * CPU. The basic assumption for this work is that children of the same parent
91*91f16700Schasinglulu  * are allocated adjacent indices. The platform should ensure this though proper
92*91f16700Schasinglulu  * mapping of the CPUs to indices via plat_core_pos_by_mpidr() and
93*91f16700Schasinglulu  * plat_my_core_pos() APIs.
94*91f16700Schasinglulu  *******************************************************************************/
95*91f16700Schasinglulu static void __init psci_update_pwrlvl_limits(void)
96*91f16700Schasinglulu {
97*91f16700Schasinglulu 	unsigned int cpu_idx;
98*91f16700Schasinglulu 	int j;
99*91f16700Schasinglulu 	unsigned int nodes_idx[PLAT_MAX_PWR_LVL] = {0};
100*91f16700Schasinglulu 	unsigned int temp_index[PLAT_MAX_PWR_LVL];
101*91f16700Schasinglulu 
102*91f16700Schasinglulu 	for (cpu_idx = 0; cpu_idx < psci_plat_core_count; cpu_idx++) {
103*91f16700Schasinglulu 		psci_get_parent_pwr_domain_nodes(cpu_idx,
104*91f16700Schasinglulu 						 PLAT_MAX_PWR_LVL,
105*91f16700Schasinglulu 						 temp_index);
106*91f16700Schasinglulu 		for (j = (int)PLAT_MAX_PWR_LVL - 1; j >= 0; j--) {
107*91f16700Schasinglulu 			if (temp_index[j] != nodes_idx[j]) {
108*91f16700Schasinglulu 				nodes_idx[j] = temp_index[j];
109*91f16700Schasinglulu 				psci_non_cpu_pd_nodes[nodes_idx[j]].cpu_start_idx
110*91f16700Schasinglulu 					= cpu_idx;
111*91f16700Schasinglulu 			}
112*91f16700Schasinglulu 			psci_non_cpu_pd_nodes[nodes_idx[j]].ncpus++;
113*91f16700Schasinglulu 		}
114*91f16700Schasinglulu 	}
115*91f16700Schasinglulu }
116*91f16700Schasinglulu 
117*91f16700Schasinglulu /*******************************************************************************
118*91f16700Schasinglulu  * Core routine to populate the power domain tree. The tree descriptor passed by
119*91f16700Schasinglulu  * the platform is populated breadth-first and the first entry in the map
120*91f16700Schasinglulu  * informs the number of root power domains. The parent nodes of the root nodes
121*91f16700Schasinglulu  * will point to an invalid entry(-1).
122*91f16700Schasinglulu  ******************************************************************************/
123*91f16700Schasinglulu static unsigned int __init populate_power_domain_tree(const unsigned char
124*91f16700Schasinglulu 							*topology)
125*91f16700Schasinglulu {
126*91f16700Schasinglulu 	unsigned int i, j = 0U, num_nodes_at_lvl = 1U, num_nodes_at_next_lvl;
127*91f16700Schasinglulu 	unsigned int node_index = 0U, num_children;
128*91f16700Schasinglulu 	unsigned int parent_node_index = 0U;
129*91f16700Schasinglulu 	int level = (int)PLAT_MAX_PWR_LVL;
130*91f16700Schasinglulu 
131*91f16700Schasinglulu 	/*
132*91f16700Schasinglulu 	 * For each level the inputs are:
133*91f16700Schasinglulu 	 * - number of nodes at this level in plat_array i.e. num_nodes_at_level
134*91f16700Schasinglulu 	 *   This is the sum of values of nodes at the parent level.
135*91f16700Schasinglulu 	 * - Index of first entry at this level in the plat_array i.e.
136*91f16700Schasinglulu 	 *   parent_node_index.
137*91f16700Schasinglulu 	 * - Index of first free entry in psci_non_cpu_pd_nodes[] or
138*91f16700Schasinglulu 	 *   psci_cpu_pd_nodes[] i.e. node_index depending upon the level.
139*91f16700Schasinglulu 	 */
140*91f16700Schasinglulu 	while (level >= (int) PSCI_CPU_PWR_LVL) {
141*91f16700Schasinglulu 		num_nodes_at_next_lvl = 0U;
142*91f16700Schasinglulu 		/*
143*91f16700Schasinglulu 		 * For each entry (parent node) at this level in the plat_array:
144*91f16700Schasinglulu 		 * - Find the number of children
145*91f16700Schasinglulu 		 * - Allocate a node in a power domain array for each child
146*91f16700Schasinglulu 		 * - Set the parent of the child to the parent_node_index - 1
147*91f16700Schasinglulu 		 * - Increment parent_node_index to point to the next parent
148*91f16700Schasinglulu 		 * - Accumulate the number of children at next level.
149*91f16700Schasinglulu 		 */
150*91f16700Schasinglulu 		for (i = 0U; i < num_nodes_at_lvl; i++) {
151*91f16700Schasinglulu 			assert(parent_node_index <=
152*91f16700Schasinglulu 					PSCI_NUM_NON_CPU_PWR_DOMAINS);
153*91f16700Schasinglulu 			num_children = topology[parent_node_index];
154*91f16700Schasinglulu 
155*91f16700Schasinglulu 			for (j = node_index;
156*91f16700Schasinglulu 				j < (node_index + num_children); j++)
157*91f16700Schasinglulu 				psci_init_pwr_domain_node((uint16_t)j,
158*91f16700Schasinglulu 						  parent_node_index - 1U,
159*91f16700Schasinglulu 						  (unsigned char)level);
160*91f16700Schasinglulu 
161*91f16700Schasinglulu 			node_index = j;
162*91f16700Schasinglulu 			num_nodes_at_next_lvl += num_children;
163*91f16700Schasinglulu 			parent_node_index++;
164*91f16700Schasinglulu 		}
165*91f16700Schasinglulu 
166*91f16700Schasinglulu 		num_nodes_at_lvl = num_nodes_at_next_lvl;
167*91f16700Schasinglulu 		level--;
168*91f16700Schasinglulu 
169*91f16700Schasinglulu 		/* Reset the index for the cpu power domain array */
170*91f16700Schasinglulu 		if (level == (int) PSCI_CPU_PWR_LVL)
171*91f16700Schasinglulu 			node_index = 0;
172*91f16700Schasinglulu 	}
173*91f16700Schasinglulu 
174*91f16700Schasinglulu 	/* Validate the sanity of array exported by the platform */
175*91f16700Schasinglulu 	assert(j <= PLATFORM_CORE_COUNT);
176*91f16700Schasinglulu 	return j;
177*91f16700Schasinglulu }
178*91f16700Schasinglulu 
179*91f16700Schasinglulu /*******************************************************************************
180*91f16700Schasinglulu  * This function does the architectural setup and takes the warm boot
181*91f16700Schasinglulu  * entry-point `mailbox_ep` as an argument. The function also initializes the
182*91f16700Schasinglulu  * power domain topology tree by querying the platform. The power domain nodes
183*91f16700Schasinglulu  * higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and
184*91f16700Schasinglulu  * the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform
185*91f16700Schasinglulu  * exports its static topology map through the
186*91f16700Schasinglulu  * populate_power_domain_topology_tree() API. The algorithm populates the
187*91f16700Schasinglulu  * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
188*91f16700Schasinglulu  * topology map.  On a platform that implements two clusters of 2 cpus each,
189*91f16700Schasinglulu  * and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would
190*91f16700Schasinglulu  * look like this:
191*91f16700Schasinglulu  *
192*91f16700Schasinglulu  * ---------------------------------------------------
193*91f16700Schasinglulu  * | system node | cluster 0 node  | cluster 1 node  |
194*91f16700Schasinglulu  * ---------------------------------------------------
195*91f16700Schasinglulu  *
196*91f16700Schasinglulu  * And populated psci_cpu_pd_nodes would look like this :
197*91f16700Schasinglulu  * <-    cpus cluster0   -><-   cpus cluster1   ->
198*91f16700Schasinglulu  * ------------------------------------------------
199*91f16700Schasinglulu  * |   CPU 0   |   CPU 1   |   CPU 2   |   CPU 3  |
200*91f16700Schasinglulu  * ------------------------------------------------
201*91f16700Schasinglulu  ******************************************************************************/
202*91f16700Schasinglulu int __init psci_setup(const psci_lib_args_t *lib_args)
203*91f16700Schasinglulu {
204*91f16700Schasinglulu 	const unsigned char *topology_tree;
205*91f16700Schasinglulu 
206*91f16700Schasinglulu 	assert(VERIFY_PSCI_LIB_ARGS_V1(lib_args));
207*91f16700Schasinglulu 
208*91f16700Schasinglulu 	/* Do the Architectural initialization */
209*91f16700Schasinglulu 	psci_arch_setup();
210*91f16700Schasinglulu 
211*91f16700Schasinglulu 	/* Query the topology map from the platform */
212*91f16700Schasinglulu 	topology_tree = plat_get_power_domain_tree_desc();
213*91f16700Schasinglulu 
214*91f16700Schasinglulu 	/* Populate the power domain arrays using the platform topology map */
215*91f16700Schasinglulu 	psci_plat_core_count = populate_power_domain_tree(topology_tree);
216*91f16700Schasinglulu 
217*91f16700Schasinglulu 	/* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
218*91f16700Schasinglulu 	psci_update_pwrlvl_limits();
219*91f16700Schasinglulu 
220*91f16700Schasinglulu 	/* Populate the mpidr field of cpu node for this CPU */
221*91f16700Schasinglulu 	psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
222*91f16700Schasinglulu 		read_mpidr() & MPIDR_AFFINITY_MASK;
223*91f16700Schasinglulu 
224*91f16700Schasinglulu 	psci_init_req_local_pwr_states();
225*91f16700Schasinglulu 
226*91f16700Schasinglulu 	/*
227*91f16700Schasinglulu 	 * Set the requested and target state of this CPU and all the higher
228*91f16700Schasinglulu 	 * power domain levels for this CPU to run.
229*91f16700Schasinglulu 	 */
230*91f16700Schasinglulu 	psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);
231*91f16700Schasinglulu 
232*91f16700Schasinglulu 	(void) plat_setup_psci_ops((uintptr_t)lib_args->mailbox_ep,
233*91f16700Schasinglulu 				   &psci_plat_pm_ops);
234*91f16700Schasinglulu 	assert(psci_plat_pm_ops != NULL);
235*91f16700Schasinglulu 
236*91f16700Schasinglulu 	/*
237*91f16700Schasinglulu 	 * Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs
238*91f16700Schasinglulu 	 * during warm boot, possibly before data cache is enabled.
239*91f16700Schasinglulu 	 */
240*91f16700Schasinglulu 	psci_flush_dcache_range((uintptr_t)&psci_plat_pm_ops,
241*91f16700Schasinglulu 					sizeof(psci_plat_pm_ops));
242*91f16700Schasinglulu 
243*91f16700Schasinglulu 	/* Initialize the psci capability */
244*91f16700Schasinglulu 	psci_caps = PSCI_GENERIC_CAP;
245*91f16700Schasinglulu 
246*91f16700Schasinglulu 	if (psci_plat_pm_ops->pwr_domain_off != NULL)
247*91f16700Schasinglulu 		psci_caps |=  define_psci_cap(PSCI_CPU_OFF);
248*91f16700Schasinglulu 	if ((psci_plat_pm_ops->pwr_domain_on != NULL) &&
249*91f16700Schasinglulu 	    (psci_plat_pm_ops->pwr_domain_on_finish != NULL))
250*91f16700Schasinglulu 		psci_caps |=  define_psci_cap(PSCI_CPU_ON_AARCH64);
251*91f16700Schasinglulu 	if ((psci_plat_pm_ops->pwr_domain_suspend != NULL) &&
252*91f16700Schasinglulu 	    (psci_plat_pm_ops->pwr_domain_suspend_finish != NULL)) {
253*91f16700Schasinglulu 		if (psci_plat_pm_ops->validate_power_state != NULL)
254*91f16700Schasinglulu 			psci_caps |=  define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
255*91f16700Schasinglulu 		if (psci_plat_pm_ops->get_sys_suspend_power_state != NULL)
256*91f16700Schasinglulu 			psci_caps |=  define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
257*91f16700Schasinglulu #if PSCI_OS_INIT_MODE
258*91f16700Schasinglulu 		psci_caps |= define_psci_cap(PSCI_SET_SUSPEND_MODE);
259*91f16700Schasinglulu #endif
260*91f16700Schasinglulu 	}
261*91f16700Schasinglulu 	if (psci_plat_pm_ops->system_off != NULL)
262*91f16700Schasinglulu 		psci_caps |=  define_psci_cap(PSCI_SYSTEM_OFF);
263*91f16700Schasinglulu 	if (psci_plat_pm_ops->system_reset != NULL)
264*91f16700Schasinglulu 		psci_caps |=  define_psci_cap(PSCI_SYSTEM_RESET);
265*91f16700Schasinglulu 	if (psci_plat_pm_ops->get_node_hw_state != NULL)
266*91f16700Schasinglulu 		psci_caps |= define_psci_cap(PSCI_NODE_HW_STATE_AARCH64);
267*91f16700Schasinglulu 	if ((psci_plat_pm_ops->read_mem_protect != NULL) &&
268*91f16700Schasinglulu 			(psci_plat_pm_ops->write_mem_protect != NULL))
269*91f16700Schasinglulu 		psci_caps |= define_psci_cap(PSCI_MEM_PROTECT);
270*91f16700Schasinglulu 	if (psci_plat_pm_ops->mem_protect_chk != NULL)
271*91f16700Schasinglulu 		psci_caps |= define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64);
272*91f16700Schasinglulu 	if (psci_plat_pm_ops->system_reset2 != NULL)
273*91f16700Schasinglulu 		psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64);
274*91f16700Schasinglulu 
275*91f16700Schasinglulu #if ENABLE_PSCI_STAT
276*91f16700Schasinglulu 	psci_caps |=  define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64);
277*91f16700Schasinglulu 	psci_caps |=  define_psci_cap(PSCI_STAT_COUNT_AARCH64);
278*91f16700Schasinglulu #endif
279*91f16700Schasinglulu 
280*91f16700Schasinglulu 	return 0;
281*91f16700Schasinglulu }
282*91f16700Schasinglulu 
283*91f16700Schasinglulu /*******************************************************************************
284*91f16700Schasinglulu  * This duplicates what the primary cpu did after a cold boot in BL1. The same
285*91f16700Schasinglulu  * needs to be done when a cpu is hotplugged in. This function could also over-
286*91f16700Schasinglulu  * ride any EL3 setup done by BL1 as this code resides in rw memory.
287*91f16700Schasinglulu  ******************************************************************************/
288*91f16700Schasinglulu void psci_arch_setup(void)
289*91f16700Schasinglulu {
290*91f16700Schasinglulu #if (ARM_ARCH_MAJOR > 7) || defined(ARMV7_SUPPORTS_GENERIC_TIMER)
291*91f16700Schasinglulu 	/* Program the counter frequency */
292*91f16700Schasinglulu 	write_cntfrq_el0(plat_get_syscnt_freq2());
293*91f16700Schasinglulu #endif
294*91f16700Schasinglulu 
295*91f16700Schasinglulu 	/* Initialize the cpu_ops pointer. */
296*91f16700Schasinglulu 	init_cpu_ops();
297*91f16700Schasinglulu 
298*91f16700Schasinglulu 	/* Having initialized cpu_ops, we can now print errata status */
299*91f16700Schasinglulu 	print_errata_status();
300*91f16700Schasinglulu 
301*91f16700Schasinglulu #if ENABLE_PAUTH
302*91f16700Schasinglulu 	/* Store APIAKey_EL1 key */
303*91f16700Schasinglulu 	set_cpu_data(apiakey[0], read_apiakeylo_el1());
304*91f16700Schasinglulu 	set_cpu_data(apiakey[1], read_apiakeyhi_el1());
305*91f16700Schasinglulu #endif /* ENABLE_PAUTH */
306*91f16700Schasinglulu }
307*91f16700Schasinglulu 
308*91f16700Schasinglulu /******************************************************************************
309*91f16700Schasinglulu  * PSCI Library interface to initialize the cpu context for the next non
310*91f16700Schasinglulu  * secure image during cold boot. The relevant registers in the cpu context
311*91f16700Schasinglulu  * need to be retrieved and programmed on return from this interface.
312*91f16700Schasinglulu  *****************************************************************************/
313*91f16700Schasinglulu void psci_prepare_next_non_secure_ctx(entry_point_info_t *next_image_info)
314*91f16700Schasinglulu {
315*91f16700Schasinglulu 	assert(GET_SECURITY_STATE(next_image_info->h.attr) == NON_SECURE);
316*91f16700Schasinglulu 	cm_init_my_context(next_image_info);
317*91f16700Schasinglulu 	cm_prepare_el3_exit(NON_SECURE);
318*91f16700Schasinglulu }
319