xref: /arm-trusted-firmware/lib/gpt_rme/gpt_rme.c (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu /*
2*91f16700Schasinglulu  * Copyright (c) 2022, Arm Limited. All rights reserved.
3*91f16700Schasinglulu  *
4*91f16700Schasinglulu  * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu  */
6*91f16700Schasinglulu 
7*91f16700Schasinglulu #include <assert.h>
8*91f16700Schasinglulu #include <errno.h>
9*91f16700Schasinglulu #include <inttypes.h>
10*91f16700Schasinglulu #include <limits.h>
11*91f16700Schasinglulu #include <stdint.h>
12*91f16700Schasinglulu 
13*91f16700Schasinglulu #include <arch.h>
14*91f16700Schasinglulu #include <arch_helpers.h>
15*91f16700Schasinglulu #include <common/debug.h>
16*91f16700Schasinglulu #include "gpt_rme_private.h"
17*91f16700Schasinglulu #include <lib/gpt_rme/gpt_rme.h>
18*91f16700Schasinglulu #include <lib/smccc.h>
19*91f16700Schasinglulu #include <lib/spinlock.h>
20*91f16700Schasinglulu #include <lib/xlat_tables/xlat_tables_v2.h>
21*91f16700Schasinglulu 
22*91f16700Schasinglulu #if !ENABLE_RME
23*91f16700Schasinglulu #error "ENABLE_RME must be enabled to use the GPT library."
24*91f16700Schasinglulu #endif
25*91f16700Schasinglulu 
26*91f16700Schasinglulu /*
27*91f16700Schasinglulu  * Lookup T from PPS
28*91f16700Schasinglulu  *
29*91f16700Schasinglulu  *   PPS    Size    T
30*91f16700Schasinglulu  *   0b000  4GB     32
31*91f16700Schasinglulu  *   0b001  64GB    36
32*91f16700Schasinglulu  *   0b010  1TB     40
33*91f16700Schasinglulu  *   0b011  4TB     42
34*91f16700Schasinglulu  *   0b100  16TB    44
35*91f16700Schasinglulu  *   0b101  256TB   48
36*91f16700Schasinglulu  *   0b110  4PB     52
37*91f16700Schasinglulu  *
38*91f16700Schasinglulu  * See section 15.1.27 of the RME specification.
39*91f16700Schasinglulu  */
40*91f16700Schasinglulu static const gpt_t_val_e gpt_t_lookup[] = {PPS_4GB_T, PPS_64GB_T,
41*91f16700Schasinglulu 					   PPS_1TB_T, PPS_4TB_T,
42*91f16700Schasinglulu 					   PPS_16TB_T, PPS_256TB_T,
43*91f16700Schasinglulu 					   PPS_4PB_T};
44*91f16700Schasinglulu 
45*91f16700Schasinglulu /*
46*91f16700Schasinglulu  * Lookup P from PGS
47*91f16700Schasinglulu  *
48*91f16700Schasinglulu  *   PGS    Size    P
49*91f16700Schasinglulu  *   0b00   4KB     12
50*91f16700Schasinglulu  *   0b10   16KB    14
51*91f16700Schasinglulu  *   0b01   64KB    16
52*91f16700Schasinglulu  *
53*91f16700Schasinglulu  * Note that pgs=0b10 is 16KB and pgs=0b01 is 64KB, this is not a typo.
54*91f16700Schasinglulu  *
55*91f16700Schasinglulu  * See section 15.1.27 of the RME specification.
56*91f16700Schasinglulu  */
57*91f16700Schasinglulu static const gpt_p_val_e gpt_p_lookup[] = {PGS_4KB_P, PGS_64KB_P, PGS_16KB_P};
58*91f16700Schasinglulu 
59*91f16700Schasinglulu /*
60*91f16700Schasinglulu  * This structure contains GPT configuration data.
61*91f16700Schasinglulu  */
62*91f16700Schasinglulu typedef struct {
63*91f16700Schasinglulu 	uintptr_t plat_gpt_l0_base;
64*91f16700Schasinglulu 	gpccr_pps_e pps;
65*91f16700Schasinglulu 	gpt_t_val_e t;
66*91f16700Schasinglulu 	gpccr_pgs_e pgs;
67*91f16700Schasinglulu 	gpt_p_val_e p;
68*91f16700Schasinglulu } gpt_config_t;
69*91f16700Schasinglulu 
70*91f16700Schasinglulu static gpt_config_t gpt_config;
71*91f16700Schasinglulu 
72*91f16700Schasinglulu /* These variables are used during initialization of the L1 tables. */
73*91f16700Schasinglulu static unsigned int gpt_next_l1_tbl_idx;
74*91f16700Schasinglulu static uintptr_t gpt_l1_tbl;
75*91f16700Schasinglulu 
76*91f16700Schasinglulu /*
77*91f16700Schasinglulu  * This function checks to see if a GPI value is valid.
78*91f16700Schasinglulu  *
79*91f16700Schasinglulu  * These are valid GPI values.
80*91f16700Schasinglulu  *   GPT_GPI_NO_ACCESS   U(0x0)
81*91f16700Schasinglulu  *   GPT_GPI_SECURE      U(0x8)
82*91f16700Schasinglulu  *   GPT_GPI_NS          U(0x9)
83*91f16700Schasinglulu  *   GPT_GPI_ROOT        U(0xA)
84*91f16700Schasinglulu  *   GPT_GPI_REALM       U(0xB)
85*91f16700Schasinglulu  *   GPT_GPI_ANY         U(0xF)
86*91f16700Schasinglulu  *
87*91f16700Schasinglulu  * Parameters
88*91f16700Schasinglulu  *   gpi		GPI to check for validity.
89*91f16700Schasinglulu  *
90*91f16700Schasinglulu  * Return
91*91f16700Schasinglulu  *   true for a valid GPI, false for an invalid one.
92*91f16700Schasinglulu  */
93*91f16700Schasinglulu static bool gpt_is_gpi_valid(unsigned int gpi)
94*91f16700Schasinglulu {
95*91f16700Schasinglulu 	if ((gpi == GPT_GPI_NO_ACCESS) || (gpi == GPT_GPI_ANY) ||
96*91f16700Schasinglulu 	    ((gpi >= GPT_GPI_SECURE) && (gpi <= GPT_GPI_REALM))) {
97*91f16700Schasinglulu 		return true;
98*91f16700Schasinglulu 	}
99*91f16700Schasinglulu 	return false;
100*91f16700Schasinglulu }
101*91f16700Schasinglulu 
102*91f16700Schasinglulu /*
103*91f16700Schasinglulu  * This function checks to see if two PAS regions overlap.
104*91f16700Schasinglulu  *
105*91f16700Schasinglulu  * Parameters
106*91f16700Schasinglulu  *   base_1: base address of first PAS
107*91f16700Schasinglulu  *   size_1: size of first PAS
108*91f16700Schasinglulu  *   base_2: base address of second PAS
109*91f16700Schasinglulu  *   size_2: size of second PAS
110*91f16700Schasinglulu  *
111*91f16700Schasinglulu  * Return
112*91f16700Schasinglulu  *   True if PAS regions overlap, false if they do not.
113*91f16700Schasinglulu  */
114*91f16700Schasinglulu static bool gpt_check_pas_overlap(uintptr_t base_1, size_t size_1,
115*91f16700Schasinglulu 				  uintptr_t base_2, size_t size_2)
116*91f16700Schasinglulu {
117*91f16700Schasinglulu 	if (((base_1 + size_1) > base_2) && ((base_2 + size_2) > base_1)) {
118*91f16700Schasinglulu 		return true;
119*91f16700Schasinglulu 	}
120*91f16700Schasinglulu 	return false;
121*91f16700Schasinglulu }
122*91f16700Schasinglulu 
123*91f16700Schasinglulu /*
124*91f16700Schasinglulu  * This helper function checks to see if a PAS region from index 0 to
125*91f16700Schasinglulu  * (pas_idx - 1) occupies the L0 region at index l0_idx in the L0 table.
126*91f16700Schasinglulu  *
127*91f16700Schasinglulu  * Parameters
128*91f16700Schasinglulu  *   l0_idx:      Index of the L0 entry to check
129*91f16700Schasinglulu  *   pas_regions: PAS region array
130*91f16700Schasinglulu  *   pas_idx:     Upper bound of the PAS array index.
131*91f16700Schasinglulu  *
132*91f16700Schasinglulu  * Return
133*91f16700Schasinglulu  *   True if a PAS region occupies the L0 region in question, false if not.
134*91f16700Schasinglulu  */
135*91f16700Schasinglulu static bool gpt_does_previous_pas_exist_here(unsigned int l0_idx,
136*91f16700Schasinglulu 					     pas_region_t *pas_regions,
137*91f16700Schasinglulu 					     unsigned int pas_idx)
138*91f16700Schasinglulu {
139*91f16700Schasinglulu 	/* Iterate over PAS regions up to pas_idx. */
140*91f16700Schasinglulu 	for (unsigned int i = 0U; i < pas_idx; i++) {
141*91f16700Schasinglulu 		if (gpt_check_pas_overlap((GPT_L0GPTSZ_ACTUAL_SIZE * l0_idx),
142*91f16700Schasinglulu 		    GPT_L0GPTSZ_ACTUAL_SIZE,
143*91f16700Schasinglulu 		    pas_regions[i].base_pa, pas_regions[i].size)) {
144*91f16700Schasinglulu 			return true;
145*91f16700Schasinglulu 		}
146*91f16700Schasinglulu 	}
147*91f16700Schasinglulu 	return false;
148*91f16700Schasinglulu }
149*91f16700Schasinglulu 
150*91f16700Schasinglulu /*
151*91f16700Schasinglulu  * This function iterates over all of the PAS regions and checks them to ensure
152*91f16700Schasinglulu  * proper alignment of base and size, that the GPI is valid, and that no regions
153*91f16700Schasinglulu  * overlap. As a part of the overlap checks, this function checks existing L0
154*91f16700Schasinglulu  * mappings against the new PAS regions in the event that gpt_init_pas_l1_tables
155*91f16700Schasinglulu  * is called multiple times to place L1 tables in different areas of memory. It
156*91f16700Schasinglulu  * also counts the number of L1 tables needed and returns it on success.
157*91f16700Schasinglulu  *
158*91f16700Schasinglulu  * Parameters
159*91f16700Schasinglulu  *   *pas_regions	Pointer to array of PAS region structures.
160*91f16700Schasinglulu  *   pas_region_cnt	Total number of PAS regions in the array.
161*91f16700Schasinglulu  *
162*91f16700Schasinglulu  * Return
163*91f16700Schasinglulu  *   Negative Linux error code in the event of a failure, number of L1 regions
164*91f16700Schasinglulu  *   required when successful.
165*91f16700Schasinglulu  */
166*91f16700Schasinglulu static int gpt_validate_pas_mappings(pas_region_t *pas_regions,
167*91f16700Schasinglulu 				     unsigned int pas_region_cnt)
168*91f16700Schasinglulu {
169*91f16700Schasinglulu 	unsigned int idx;
170*91f16700Schasinglulu 	unsigned int l1_cnt = 0U;
171*91f16700Schasinglulu 	unsigned int pas_l1_cnt;
172*91f16700Schasinglulu 	uint64_t *l0_desc = (uint64_t *)gpt_config.plat_gpt_l0_base;
173*91f16700Schasinglulu 
174*91f16700Schasinglulu 	assert(pas_regions != NULL);
175*91f16700Schasinglulu 	assert(pas_region_cnt != 0U);
176*91f16700Schasinglulu 
177*91f16700Schasinglulu 	for (idx = 0U; idx < pas_region_cnt; idx++) {
178*91f16700Schasinglulu 		/* Check for arithmetic overflow in region. */
179*91f16700Schasinglulu 		if ((ULONG_MAX - pas_regions[idx].base_pa) <
180*91f16700Schasinglulu 		    pas_regions[idx].size) {
181*91f16700Schasinglulu 			ERROR("[GPT] Address overflow in PAS[%u]!\n", idx);
182*91f16700Schasinglulu 			return -EOVERFLOW;
183*91f16700Schasinglulu 		}
184*91f16700Schasinglulu 
185*91f16700Schasinglulu 		/* Initial checks for PAS validity. */
186*91f16700Schasinglulu 		if (((pas_regions[idx].base_pa + pas_regions[idx].size) >
187*91f16700Schasinglulu 		    GPT_PPS_ACTUAL_SIZE(gpt_config.t)) ||
188*91f16700Schasinglulu 		    !gpt_is_gpi_valid(GPT_PAS_ATTR_GPI(pas_regions[idx].attrs))) {
189*91f16700Schasinglulu 			ERROR("[GPT] PAS[%u] is invalid!\n", idx);
190*91f16700Schasinglulu 			return -EFAULT;
191*91f16700Schasinglulu 		}
192*91f16700Schasinglulu 
193*91f16700Schasinglulu 		/*
194*91f16700Schasinglulu 		 * Make sure this PAS does not overlap with another one. We
195*91f16700Schasinglulu 		 * start from idx + 1 instead of 0 since prior PAS mappings will
196*91f16700Schasinglulu 		 * have already checked themselves against this one.
197*91f16700Schasinglulu 		 */
198*91f16700Schasinglulu 		for (unsigned int i = idx + 1; i < pas_region_cnt; i++) {
199*91f16700Schasinglulu 			if (gpt_check_pas_overlap(pas_regions[idx].base_pa,
200*91f16700Schasinglulu 			    pas_regions[idx].size,
201*91f16700Schasinglulu 			    pas_regions[i].base_pa,
202*91f16700Schasinglulu 			    pas_regions[i].size)) {
203*91f16700Schasinglulu 				ERROR("[GPT] PAS[%u] overlaps with PAS[%u]\n",
204*91f16700Schasinglulu 					i, idx);
205*91f16700Schasinglulu 				return -EFAULT;
206*91f16700Schasinglulu 			}
207*91f16700Schasinglulu 		}
208*91f16700Schasinglulu 
209*91f16700Schasinglulu 		/*
210*91f16700Schasinglulu 		 * Since this function can be called multiple times with
211*91f16700Schasinglulu 		 * separate L1 tables we need to check the existing L0 mapping
212*91f16700Schasinglulu 		 * to see if this PAS would fall into one that has already been
213*91f16700Schasinglulu 		 * initialized.
214*91f16700Schasinglulu 		 */
215*91f16700Schasinglulu 		for (unsigned int i = GPT_L0_IDX(pas_regions[idx].base_pa);
216*91f16700Schasinglulu 		     i <= GPT_L0_IDX(pas_regions[idx].base_pa + pas_regions[idx].size - 1);
217*91f16700Schasinglulu 		     i++) {
218*91f16700Schasinglulu 			if ((GPT_L0_TYPE(l0_desc[i]) == GPT_L0_TYPE_BLK_DESC) &&
219*91f16700Schasinglulu 			    (GPT_L0_BLKD_GPI(l0_desc[i]) == GPT_GPI_ANY)) {
220*91f16700Schasinglulu 				/* This descriptor is unused so continue. */
221*91f16700Schasinglulu 				continue;
222*91f16700Schasinglulu 			}
223*91f16700Schasinglulu 
224*91f16700Schasinglulu 			/*
225*91f16700Schasinglulu 			 * This descriptor has been initialized in a previous
226*91f16700Schasinglulu 			 * call to this function so cannot be initialized again.
227*91f16700Schasinglulu 			 */
228*91f16700Schasinglulu 			ERROR("[GPT] PAS[%u] overlaps with previous L0[%d]!\n",
229*91f16700Schasinglulu 			      idx, i);
230*91f16700Schasinglulu 			return -EFAULT;
231*91f16700Schasinglulu 		}
232*91f16700Schasinglulu 
233*91f16700Schasinglulu 		/* Check for block mapping (L0) type. */
234*91f16700Schasinglulu 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
235*91f16700Schasinglulu 		    GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
236*91f16700Schasinglulu 			/* Make sure base and size are block-aligned. */
237*91f16700Schasinglulu 			if (!GPT_IS_L0_ALIGNED(pas_regions[idx].base_pa) ||
238*91f16700Schasinglulu 			    !GPT_IS_L0_ALIGNED(pas_regions[idx].size)) {
239*91f16700Schasinglulu 				ERROR("[GPT] PAS[%u] is not block-aligned!\n",
240*91f16700Schasinglulu 				      idx);
241*91f16700Schasinglulu 				return -EFAULT;
242*91f16700Schasinglulu 			}
243*91f16700Schasinglulu 
244*91f16700Schasinglulu 			continue;
245*91f16700Schasinglulu 		}
246*91f16700Schasinglulu 
247*91f16700Schasinglulu 		/* Check for granule mapping (L1) type. */
248*91f16700Schasinglulu 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
249*91f16700Schasinglulu 		    GPT_PAS_ATTR_MAP_TYPE_GRANULE) {
250*91f16700Schasinglulu 			/* Make sure base and size are granule-aligned. */
251*91f16700Schasinglulu 			if (!GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].base_pa) ||
252*91f16700Schasinglulu 			    !GPT_IS_L1_ALIGNED(gpt_config.p, pas_regions[idx].size)) {
253*91f16700Schasinglulu 				ERROR("[GPT] PAS[%u] is not granule-aligned!\n",
254*91f16700Schasinglulu 				      idx);
255*91f16700Schasinglulu 				return -EFAULT;
256*91f16700Schasinglulu 			}
257*91f16700Schasinglulu 
258*91f16700Schasinglulu 			/* Find how many L1 tables this PAS occupies. */
259*91f16700Schasinglulu 			pas_l1_cnt = (GPT_L0_IDX(pas_regions[idx].base_pa +
260*91f16700Schasinglulu 				     pas_regions[idx].size - 1) -
261*91f16700Schasinglulu 				     GPT_L0_IDX(pas_regions[idx].base_pa) + 1);
262*91f16700Schasinglulu 
263*91f16700Schasinglulu 			/*
264*91f16700Schasinglulu 			 * This creates a situation where, if multiple PAS
265*91f16700Schasinglulu 			 * regions occupy the same table descriptor, we can get
266*91f16700Schasinglulu 			 * an artificially high total L1 table count. The way we
267*91f16700Schasinglulu 			 * handle this is by checking each PAS against those
268*91f16700Schasinglulu 			 * before it in the array, and if they both occupy the
269*91f16700Schasinglulu 			 * same PAS we subtract from pas_l1_cnt and only the
270*91f16700Schasinglulu 			 * first PAS in the array gets to count it.
271*91f16700Schasinglulu 			 */
272*91f16700Schasinglulu 
273*91f16700Schasinglulu 			/*
274*91f16700Schasinglulu 			 * If L1 count is greater than 1 we know the start and
275*91f16700Schasinglulu 			 * end PAs are in different L0 regions so we must check
276*91f16700Schasinglulu 			 * both for overlap against other PAS.
277*91f16700Schasinglulu 			 */
278*91f16700Schasinglulu 			if (pas_l1_cnt > 1) {
279*91f16700Schasinglulu 				if (gpt_does_previous_pas_exist_here(
280*91f16700Schasinglulu 				    GPT_L0_IDX(pas_regions[idx].base_pa +
281*91f16700Schasinglulu 				    pas_regions[idx].size - 1),
282*91f16700Schasinglulu 				    pas_regions, idx)) {
283*91f16700Schasinglulu 					pas_l1_cnt = pas_l1_cnt - 1;
284*91f16700Schasinglulu 				}
285*91f16700Schasinglulu 			}
286*91f16700Schasinglulu 
287*91f16700Schasinglulu 			if (gpt_does_previous_pas_exist_here(
288*91f16700Schasinglulu 			    GPT_L0_IDX(pas_regions[idx].base_pa),
289*91f16700Schasinglulu 			    pas_regions, idx)) {
290*91f16700Schasinglulu 				pas_l1_cnt = pas_l1_cnt - 1;
291*91f16700Schasinglulu 			}
292*91f16700Schasinglulu 
293*91f16700Schasinglulu 			l1_cnt += pas_l1_cnt;
294*91f16700Schasinglulu 			continue;
295*91f16700Schasinglulu 		}
296*91f16700Schasinglulu 
297*91f16700Schasinglulu 		/* If execution reaches this point, mapping type is invalid. */
298*91f16700Schasinglulu 		ERROR("[GPT] PAS[%u] has invalid mapping type 0x%x.\n", idx,
299*91f16700Schasinglulu 		      GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
300*91f16700Schasinglulu 		return -EINVAL;
301*91f16700Schasinglulu 	}
302*91f16700Schasinglulu 
303*91f16700Schasinglulu 	return l1_cnt;
304*91f16700Schasinglulu }
305*91f16700Schasinglulu 
306*91f16700Schasinglulu /*
307*91f16700Schasinglulu  * This function validates L0 initialization parameters.
308*91f16700Schasinglulu  *
309*91f16700Schasinglulu  * Parameters
310*91f16700Schasinglulu  *   l0_mem_base	Base address of memory used for L0 tables.
311*91f16700Schasinglulu  *   l1_mem_size	Size of memory available for L0 tables.
312*91f16700Schasinglulu  *
313*91f16700Schasinglulu  * Return
314*91f16700Schasinglulu  *   Negative Linux error code in the event of a failure, 0 for success.
315*91f16700Schasinglulu  */
316*91f16700Schasinglulu static int gpt_validate_l0_params(gpccr_pps_e pps, uintptr_t l0_mem_base,
317*91f16700Schasinglulu 				  size_t l0_mem_size)
318*91f16700Schasinglulu {
319*91f16700Schasinglulu 	size_t l0_alignment;
320*91f16700Schasinglulu 
321*91f16700Schasinglulu 	/*
322*91f16700Schasinglulu 	 * Make sure PPS is valid and then store it since macros need this value
323*91f16700Schasinglulu 	 * to work.
324*91f16700Schasinglulu 	 */
325*91f16700Schasinglulu 	if (pps > GPT_PPS_MAX) {
326*91f16700Schasinglulu 		ERROR("[GPT] Invalid PPS: 0x%x\n", pps);
327*91f16700Schasinglulu 		return -EINVAL;
328*91f16700Schasinglulu 	}
329*91f16700Schasinglulu 	gpt_config.pps = pps;
330*91f16700Schasinglulu 	gpt_config.t = gpt_t_lookup[pps];
331*91f16700Schasinglulu 
332*91f16700Schasinglulu 	/* Alignment must be the greater of 4k or l0 table size. */
333*91f16700Schasinglulu 	l0_alignment = PAGE_SIZE_4KB;
334*91f16700Schasinglulu 	if (l0_alignment < GPT_L0_TABLE_SIZE(gpt_config.t)) {
335*91f16700Schasinglulu 		l0_alignment = GPT_L0_TABLE_SIZE(gpt_config.t);
336*91f16700Schasinglulu 	}
337*91f16700Schasinglulu 
338*91f16700Schasinglulu 	/* Check base address. */
339*91f16700Schasinglulu 	if ((l0_mem_base == 0U) || ((l0_mem_base & (l0_alignment - 1)) != 0U)) {
340*91f16700Schasinglulu 		ERROR("[GPT] Invalid L0 base address: 0x%lx\n", l0_mem_base);
341*91f16700Schasinglulu 		return -EFAULT;
342*91f16700Schasinglulu 	}
343*91f16700Schasinglulu 
344*91f16700Schasinglulu 	/* Check size. */
345*91f16700Schasinglulu 	if (l0_mem_size < GPT_L0_TABLE_SIZE(gpt_config.t)) {
346*91f16700Schasinglulu 		ERROR("[GPT] Inadequate L0 memory: need 0x%lx, have 0x%lx)\n",
347*91f16700Schasinglulu 		      GPT_L0_TABLE_SIZE(gpt_config.t),
348*91f16700Schasinglulu 		      l0_mem_size);
349*91f16700Schasinglulu 		return -ENOMEM;
350*91f16700Schasinglulu 	}
351*91f16700Schasinglulu 
352*91f16700Schasinglulu 	return 0;
353*91f16700Schasinglulu }
354*91f16700Schasinglulu 
355*91f16700Schasinglulu /*
356*91f16700Schasinglulu  * In the event that L1 tables are needed, this function validates
357*91f16700Schasinglulu  * the L1 table generation parameters.
358*91f16700Schasinglulu  *
359*91f16700Schasinglulu  * Parameters
360*91f16700Schasinglulu  *   l1_mem_base	Base address of memory used for L1 table allocation.
361*91f16700Schasinglulu  *   l1_mem_size	Total size of memory available for L1 tables.
362*91f16700Schasinglulu  *   l1_gpt_cnt		Number of L1 tables needed.
363*91f16700Schasinglulu  *
364*91f16700Schasinglulu  * Return
365*91f16700Schasinglulu  *   Negative Linux error code in the event of a failure, 0 for success.
366*91f16700Schasinglulu  */
367*91f16700Schasinglulu static int gpt_validate_l1_params(uintptr_t l1_mem_base, size_t l1_mem_size,
368*91f16700Schasinglulu 				  unsigned int l1_gpt_cnt)
369*91f16700Schasinglulu {
370*91f16700Schasinglulu 	size_t l1_gpt_mem_sz;
371*91f16700Schasinglulu 
372*91f16700Schasinglulu 	/* Check if the granularity is supported */
373*91f16700Schasinglulu 	if (!xlat_arch_is_granule_size_supported(
374*91f16700Schasinglulu 	    GPT_PGS_ACTUAL_SIZE(gpt_config.p))) {
375*91f16700Schasinglulu 		return -EPERM;
376*91f16700Schasinglulu 	}
377*91f16700Schasinglulu 
378*91f16700Schasinglulu 	/* Make sure L1 tables are aligned to their size. */
379*91f16700Schasinglulu 	if ((l1_mem_base & (GPT_L1_TABLE_SIZE(gpt_config.p) - 1)) != 0U) {
380*91f16700Schasinglulu 		ERROR("[GPT] Unaligned L1 GPT base address: 0x%lx\n",
381*91f16700Schasinglulu 		      l1_mem_base);
382*91f16700Schasinglulu 		return -EFAULT;
383*91f16700Schasinglulu 	}
384*91f16700Schasinglulu 
385*91f16700Schasinglulu 	/* Get total memory needed for L1 tables. */
386*91f16700Schasinglulu 	l1_gpt_mem_sz = l1_gpt_cnt * GPT_L1_TABLE_SIZE(gpt_config.p);
387*91f16700Schasinglulu 
388*91f16700Schasinglulu 	/* Check for overflow. */
389*91f16700Schasinglulu 	if ((l1_gpt_mem_sz / GPT_L1_TABLE_SIZE(gpt_config.p)) != l1_gpt_cnt) {
390*91f16700Schasinglulu 		ERROR("[GPT] Overflow calculating L1 memory size.\n");
391*91f16700Schasinglulu 		return -ENOMEM;
392*91f16700Schasinglulu 	}
393*91f16700Schasinglulu 
394*91f16700Schasinglulu 	/* Make sure enough space was supplied. */
395*91f16700Schasinglulu 	if (l1_mem_size < l1_gpt_mem_sz) {
396*91f16700Schasinglulu 		ERROR("[GPT] Inadequate memory for L1 GPTs. ");
397*91f16700Schasinglulu 		ERROR("      Expected 0x%lx bytes. Got 0x%lx bytes\n",
398*91f16700Schasinglulu 		      l1_gpt_mem_sz, l1_mem_size);
399*91f16700Schasinglulu 		return -ENOMEM;
400*91f16700Schasinglulu 	}
401*91f16700Schasinglulu 
402*91f16700Schasinglulu 	VERBOSE("[GPT] Requested 0x%lx bytes for L1 GPTs.\n", l1_gpt_mem_sz);
403*91f16700Schasinglulu 	return 0;
404*91f16700Schasinglulu }
405*91f16700Schasinglulu 
406*91f16700Schasinglulu /*
407*91f16700Schasinglulu  * This function initializes L0 block descriptors (regions that cannot be
408*91f16700Schasinglulu  * transitioned at the granule level) according to the provided PAS.
409*91f16700Schasinglulu  *
410*91f16700Schasinglulu  * Parameters
411*91f16700Schasinglulu  *   *pas		Pointer to the structure defining the PAS region to
412*91f16700Schasinglulu  *			initialize.
413*91f16700Schasinglulu  */
414*91f16700Schasinglulu static void gpt_generate_l0_blk_desc(pas_region_t *pas)
415*91f16700Schasinglulu {
416*91f16700Schasinglulu 	uint64_t gpt_desc;
417*91f16700Schasinglulu 	unsigned int end_idx;
418*91f16700Schasinglulu 	unsigned int idx;
419*91f16700Schasinglulu 	uint64_t *l0_gpt_arr;
420*91f16700Schasinglulu 
421*91f16700Schasinglulu 	assert(gpt_config.plat_gpt_l0_base != 0U);
422*91f16700Schasinglulu 	assert(pas != NULL);
423*91f16700Schasinglulu 
424*91f16700Schasinglulu 	/*
425*91f16700Schasinglulu 	 * Checking of PAS parameters has already been done in
426*91f16700Schasinglulu 	 * gpt_validate_pas_mappings so no need to check the same things again.
427*91f16700Schasinglulu 	 */
428*91f16700Schasinglulu 
429*91f16700Schasinglulu 	l0_gpt_arr = (uint64_t *)gpt_config.plat_gpt_l0_base;
430*91f16700Schasinglulu 
431*91f16700Schasinglulu 	/* Create the GPT Block descriptor for this PAS region */
432*91f16700Schasinglulu 	gpt_desc = GPT_L0_BLK_DESC(GPT_PAS_ATTR_GPI(pas->attrs));
433*91f16700Schasinglulu 
434*91f16700Schasinglulu 	/* Start index of this region in L0 GPTs */
435*91f16700Schasinglulu 	idx = GPT_L0_IDX(pas->base_pa);
436*91f16700Schasinglulu 
437*91f16700Schasinglulu 	/*
438*91f16700Schasinglulu 	 * Determine number of L0 GPT descriptors covered by
439*91f16700Schasinglulu 	 * this PAS region and use the count to populate these
440*91f16700Schasinglulu 	 * descriptors.
441*91f16700Schasinglulu 	 */
442*91f16700Schasinglulu 	end_idx = GPT_L0_IDX(pas->base_pa + pas->size);
443*91f16700Schasinglulu 
444*91f16700Schasinglulu 	/* Generate the needed block descriptors. */
445*91f16700Schasinglulu 	for (; idx < end_idx; idx++) {
446*91f16700Schasinglulu 		l0_gpt_arr[idx] = gpt_desc;
447*91f16700Schasinglulu 		VERBOSE("[GPT] L0 entry (BLOCK) index %u [%p]: GPI = 0x%" PRIx64 " (0x%" PRIx64 ")\n",
448*91f16700Schasinglulu 			idx, &l0_gpt_arr[idx],
449*91f16700Schasinglulu 			(gpt_desc >> GPT_L0_BLK_DESC_GPI_SHIFT) &
450*91f16700Schasinglulu 			GPT_L0_BLK_DESC_GPI_MASK, l0_gpt_arr[idx]);
451*91f16700Schasinglulu 	}
452*91f16700Schasinglulu }
453*91f16700Schasinglulu 
454*91f16700Schasinglulu /*
455*91f16700Schasinglulu  * Helper function to determine if the end physical address lies in the same L0
456*91f16700Schasinglulu  * region as the current physical address. If true, the end physical address is
457*91f16700Schasinglulu  * returned else, the start address of the next region is returned.
458*91f16700Schasinglulu  *
459*91f16700Schasinglulu  * Parameters
460*91f16700Schasinglulu  *   cur_pa		Physical address of the current PA in the loop through
461*91f16700Schasinglulu  *			the range.
462*91f16700Schasinglulu  *   end_pa		Physical address of the end PA in a PAS range.
463*91f16700Schasinglulu  *
464*91f16700Schasinglulu  * Return
465*91f16700Schasinglulu  *   The PA of the end of the current range.
466*91f16700Schasinglulu  */
467*91f16700Schasinglulu static uintptr_t gpt_get_l1_end_pa(uintptr_t cur_pa, uintptr_t end_pa)
468*91f16700Schasinglulu {
469*91f16700Schasinglulu 	uintptr_t cur_idx;
470*91f16700Schasinglulu 	uintptr_t end_idx;
471*91f16700Schasinglulu 
472*91f16700Schasinglulu 	cur_idx = GPT_L0_IDX(cur_pa);
473*91f16700Schasinglulu 	end_idx = GPT_L0_IDX(end_pa);
474*91f16700Schasinglulu 
475*91f16700Schasinglulu 	assert(cur_idx <= end_idx);
476*91f16700Schasinglulu 
477*91f16700Schasinglulu 	if (cur_idx == end_idx) {
478*91f16700Schasinglulu 		return end_pa;
479*91f16700Schasinglulu 	}
480*91f16700Schasinglulu 
481*91f16700Schasinglulu 	return (cur_idx + 1U) << GPT_L0_IDX_SHIFT;
482*91f16700Schasinglulu }
483*91f16700Schasinglulu 
484*91f16700Schasinglulu /*
485*91f16700Schasinglulu  * Helper function to fill out GPI entries in a single L1 table. This function
486*91f16700Schasinglulu  * fills out entire L1 descriptors at a time to save memory writes.
487*91f16700Schasinglulu  *
488*91f16700Schasinglulu  * Parameters
489*91f16700Schasinglulu  *   gpi		GPI to set this range to
490*91f16700Schasinglulu  *   l1			Pointer to L1 table to fill out
491*91f16700Schasinglulu  *   first		Address of first granule in range.
492*91f16700Schasinglulu  *   last		Address of last granule in range (inclusive).
493*91f16700Schasinglulu  */
494*91f16700Schasinglulu static void gpt_fill_l1_tbl(uint64_t gpi, uint64_t *l1, uintptr_t first,
495*91f16700Schasinglulu 			    uintptr_t last)
496*91f16700Schasinglulu {
497*91f16700Schasinglulu 	uint64_t gpi_field = GPT_BUILD_L1_DESC(gpi);
498*91f16700Schasinglulu 	uint64_t gpi_mask = 0xFFFFFFFFFFFFFFFF;
499*91f16700Schasinglulu 
500*91f16700Schasinglulu 	assert(first <= last);
501*91f16700Schasinglulu 	assert((first & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U);
502*91f16700Schasinglulu 	assert((last & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) == 0U);
503*91f16700Schasinglulu 	assert(GPT_L0_IDX(first) == GPT_L0_IDX(last));
504*91f16700Schasinglulu 	assert(l1 != NULL);
505*91f16700Schasinglulu 
506*91f16700Schasinglulu 	/* Shift the mask if we're starting in the middle of an L1 entry. */
507*91f16700Schasinglulu 	gpi_mask = gpi_mask << (GPT_L1_GPI_IDX(gpt_config.p, first) << 2);
508*91f16700Schasinglulu 
509*91f16700Schasinglulu 	/* Fill out each L1 entry for this region. */
510*91f16700Schasinglulu 	for (unsigned int i = GPT_L1_IDX(gpt_config.p, first);
511*91f16700Schasinglulu 	     i <= GPT_L1_IDX(gpt_config.p, last); i++) {
512*91f16700Schasinglulu 		/* Account for stopping in the middle of an L1 entry. */
513*91f16700Schasinglulu 		if (i == GPT_L1_IDX(gpt_config.p, last)) {
514*91f16700Schasinglulu 			gpi_mask &= (gpi_mask >> ((15 -
515*91f16700Schasinglulu 				    GPT_L1_GPI_IDX(gpt_config.p, last)) << 2));
516*91f16700Schasinglulu 		}
517*91f16700Schasinglulu 
518*91f16700Schasinglulu 		/* Write GPI values. */
519*91f16700Schasinglulu 		assert((l1[i] & gpi_mask) ==
520*91f16700Schasinglulu 		       (GPT_BUILD_L1_DESC(GPT_GPI_ANY) & gpi_mask));
521*91f16700Schasinglulu 		l1[i] = (l1[i] & ~gpi_mask) | (gpi_mask & gpi_field);
522*91f16700Schasinglulu 
523*91f16700Schasinglulu 		/* Reset mask. */
524*91f16700Schasinglulu 		gpi_mask = 0xFFFFFFFFFFFFFFFF;
525*91f16700Schasinglulu 	}
526*91f16700Schasinglulu }
527*91f16700Schasinglulu 
528*91f16700Schasinglulu /*
529*91f16700Schasinglulu  * This function finds the next available unused L1 table and initializes all
530*91f16700Schasinglulu  * granules descriptor entries to GPI_ANY. This ensures that there are no chunks
531*91f16700Schasinglulu  * of GPI_NO_ACCESS (0b0000) memory floating around in the system in the
532*91f16700Schasinglulu  * event that a PAS region stops midway through an L1 table, thus guaranteeing
533*91f16700Schasinglulu  * that all memory not explicitly assigned is GPI_ANY. This function does not
534*91f16700Schasinglulu  * check for overflow conditions, that should be done by the caller.
535*91f16700Schasinglulu  *
536*91f16700Schasinglulu  * Return
537*91f16700Schasinglulu  *   Pointer to the next available L1 table.
538*91f16700Schasinglulu  */
539*91f16700Schasinglulu static uint64_t *gpt_get_new_l1_tbl(void)
540*91f16700Schasinglulu {
541*91f16700Schasinglulu 	/* Retrieve the next L1 table. */
542*91f16700Schasinglulu 	uint64_t *l1 = (uint64_t *)((uint64_t)(gpt_l1_tbl) +
543*91f16700Schasinglulu 		       (GPT_L1_TABLE_SIZE(gpt_config.p) *
544*91f16700Schasinglulu 		       gpt_next_l1_tbl_idx));
545*91f16700Schasinglulu 
546*91f16700Schasinglulu 	/* Increment L1 counter. */
547*91f16700Schasinglulu 	gpt_next_l1_tbl_idx++;
548*91f16700Schasinglulu 
549*91f16700Schasinglulu 	/* Initialize all GPIs to GPT_GPI_ANY */
550*91f16700Schasinglulu 	for (unsigned int i = 0U; i < GPT_L1_ENTRY_COUNT(gpt_config.p); i++) {
551*91f16700Schasinglulu 		l1[i] = GPT_BUILD_L1_DESC(GPT_GPI_ANY);
552*91f16700Schasinglulu 	}
553*91f16700Schasinglulu 
554*91f16700Schasinglulu 	return l1;
555*91f16700Schasinglulu }
556*91f16700Schasinglulu 
557*91f16700Schasinglulu /*
558*91f16700Schasinglulu  * When L1 tables are needed, this function creates the necessary L0 table
559*91f16700Schasinglulu  * descriptors and fills out the L1 table entries according to the supplied
560*91f16700Schasinglulu  * PAS range.
561*91f16700Schasinglulu  *
562*91f16700Schasinglulu  * Parameters
563*91f16700Schasinglulu  *   *pas		Pointer to the structure defining the PAS region.
564*91f16700Schasinglulu  */
565*91f16700Schasinglulu static void gpt_generate_l0_tbl_desc(pas_region_t *pas)
566*91f16700Schasinglulu {
567*91f16700Schasinglulu 	uintptr_t end_pa;
568*91f16700Schasinglulu 	uintptr_t cur_pa;
569*91f16700Schasinglulu 	uintptr_t last_gran_pa;
570*91f16700Schasinglulu 	uint64_t *l0_gpt_base;
571*91f16700Schasinglulu 	uint64_t *l1_gpt_arr;
572*91f16700Schasinglulu 	unsigned int l0_idx;
573*91f16700Schasinglulu 
574*91f16700Schasinglulu 	assert(gpt_config.plat_gpt_l0_base != 0U);
575*91f16700Schasinglulu 	assert(pas != NULL);
576*91f16700Schasinglulu 
577*91f16700Schasinglulu 	/*
578*91f16700Schasinglulu 	 * Checking of PAS parameters has already been done in
579*91f16700Schasinglulu 	 * gpt_validate_pas_mappings so no need to check the same things again.
580*91f16700Schasinglulu 	 */
581*91f16700Schasinglulu 
582*91f16700Schasinglulu 	end_pa = pas->base_pa + pas->size;
583*91f16700Schasinglulu 	l0_gpt_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
584*91f16700Schasinglulu 
585*91f16700Schasinglulu 	/* We start working from the granule at base PA */
586*91f16700Schasinglulu 	cur_pa = pas->base_pa;
587*91f16700Schasinglulu 
588*91f16700Schasinglulu 	/* Iterate over each L0 region in this memory range. */
589*91f16700Schasinglulu 	for (l0_idx = GPT_L0_IDX(pas->base_pa);
590*91f16700Schasinglulu 	     l0_idx <= GPT_L0_IDX(end_pa - 1U);
591*91f16700Schasinglulu 	     l0_idx++) {
592*91f16700Schasinglulu 
593*91f16700Schasinglulu 		/*
594*91f16700Schasinglulu 		 * See if the L0 entry is already a table descriptor or if we
595*91f16700Schasinglulu 		 * need to create one.
596*91f16700Schasinglulu 		 */
597*91f16700Schasinglulu 		if (GPT_L0_TYPE(l0_gpt_base[l0_idx]) == GPT_L0_TYPE_TBL_DESC) {
598*91f16700Schasinglulu 			/* Get the L1 array from the L0 entry. */
599*91f16700Schasinglulu 			l1_gpt_arr = GPT_L0_TBLD_ADDR(l0_gpt_base[l0_idx]);
600*91f16700Schasinglulu 		} else {
601*91f16700Schasinglulu 			/* Get a new L1 table from the L1 memory space. */
602*91f16700Schasinglulu 			l1_gpt_arr = gpt_get_new_l1_tbl();
603*91f16700Schasinglulu 
604*91f16700Schasinglulu 			/* Fill out the L0 descriptor and flush it. */
605*91f16700Schasinglulu 			l0_gpt_base[l0_idx] = GPT_L0_TBL_DESC(l1_gpt_arr);
606*91f16700Schasinglulu 		}
607*91f16700Schasinglulu 
608*91f16700Schasinglulu 		VERBOSE("[GPT] L0 entry (TABLE) index %u [%p] ==> L1 Addr 0x%llx (0x%" PRIx64 ")\n",
609*91f16700Schasinglulu 			l0_idx, &l0_gpt_base[l0_idx],
610*91f16700Schasinglulu 			(unsigned long long)(l1_gpt_arr),
611*91f16700Schasinglulu 			l0_gpt_base[l0_idx]);
612*91f16700Schasinglulu 
613*91f16700Schasinglulu 		/*
614*91f16700Schasinglulu 		 * Determine the PA of the last granule in this L0 descriptor.
615*91f16700Schasinglulu 		 */
616*91f16700Schasinglulu 		last_gran_pa = gpt_get_l1_end_pa(cur_pa, end_pa) -
617*91f16700Schasinglulu 			       GPT_PGS_ACTUAL_SIZE(gpt_config.p);
618*91f16700Schasinglulu 
619*91f16700Schasinglulu 		/*
620*91f16700Schasinglulu 		 * Fill up L1 GPT entries between these two addresses. This
621*91f16700Schasinglulu 		 * function needs the addresses of the first granule and last
622*91f16700Schasinglulu 		 * granule in the range.
623*91f16700Schasinglulu 		 */
624*91f16700Schasinglulu 		gpt_fill_l1_tbl(GPT_PAS_ATTR_GPI(pas->attrs), l1_gpt_arr,
625*91f16700Schasinglulu 				cur_pa, last_gran_pa);
626*91f16700Schasinglulu 
627*91f16700Schasinglulu 		/* Advance cur_pa to first granule in next L0 region. */
628*91f16700Schasinglulu 		cur_pa = gpt_get_l1_end_pa(cur_pa, end_pa);
629*91f16700Schasinglulu 	}
630*91f16700Schasinglulu }
631*91f16700Schasinglulu 
632*91f16700Schasinglulu /*
633*91f16700Schasinglulu  * This function flushes a range of L0 descriptors used by a given PAS region
634*91f16700Schasinglulu  * array. There is a chance that some unmodified L0 descriptors would be flushed
635*91f16700Schasinglulu  * in the case that there are "holes" in an array of PAS regions but overall
636*91f16700Schasinglulu  * this should be faster than individually flushing each modified L0 descriptor
637*91f16700Schasinglulu  * as they are created.
638*91f16700Schasinglulu  *
639*91f16700Schasinglulu  * Parameters
640*91f16700Schasinglulu  *   *pas		Pointer to an array of PAS regions.
641*91f16700Schasinglulu  *   pas_count		Number of entries in the PAS array.
642*91f16700Schasinglulu  */
643*91f16700Schasinglulu static void flush_l0_for_pas_array(pas_region_t *pas, unsigned int pas_count)
644*91f16700Schasinglulu {
645*91f16700Schasinglulu 	unsigned int idx;
646*91f16700Schasinglulu 	unsigned int start_idx;
647*91f16700Schasinglulu 	unsigned int end_idx;
648*91f16700Schasinglulu 	uint64_t *l0 = (uint64_t *)gpt_config.plat_gpt_l0_base;
649*91f16700Schasinglulu 
650*91f16700Schasinglulu 	assert(pas != NULL);
651*91f16700Schasinglulu 	assert(pas_count > 0);
652*91f16700Schasinglulu 
653*91f16700Schasinglulu 	/* Initial start and end values. */
654*91f16700Schasinglulu 	start_idx = GPT_L0_IDX(pas[0].base_pa);
655*91f16700Schasinglulu 	end_idx = GPT_L0_IDX(pas[0].base_pa + pas[0].size - 1);
656*91f16700Schasinglulu 
657*91f16700Schasinglulu 	/* Find lowest and highest L0 indices used in this PAS array. */
658*91f16700Schasinglulu 	for (idx = 1; idx < pas_count; idx++) {
659*91f16700Schasinglulu 		if (GPT_L0_IDX(pas[idx].base_pa) < start_idx) {
660*91f16700Schasinglulu 			start_idx = GPT_L0_IDX(pas[idx].base_pa);
661*91f16700Schasinglulu 		}
662*91f16700Schasinglulu 		if (GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1) > end_idx) {
663*91f16700Schasinglulu 			end_idx = GPT_L0_IDX(pas[idx].base_pa + pas[idx].size - 1);
664*91f16700Schasinglulu 		}
665*91f16700Schasinglulu 	}
666*91f16700Schasinglulu 
667*91f16700Schasinglulu 	/*
668*91f16700Schasinglulu 	 * Flush all covered L0 descriptors, add 1 because we need to include
669*91f16700Schasinglulu 	 * the end index value.
670*91f16700Schasinglulu 	 */
671*91f16700Schasinglulu 	flush_dcache_range((uintptr_t)&l0[start_idx],
672*91f16700Schasinglulu 			   ((end_idx + 1) - start_idx) * sizeof(uint64_t));
673*91f16700Schasinglulu }
674*91f16700Schasinglulu 
675*91f16700Schasinglulu /*
676*91f16700Schasinglulu  * Public API to enable granule protection checks once the tables have all been
677*91f16700Schasinglulu  * initialized. This function is called at first initialization and then again
678*91f16700Schasinglulu  * later during warm boots of CPU cores.
679*91f16700Schasinglulu  *
680*91f16700Schasinglulu  * Return
681*91f16700Schasinglulu  *   Negative Linux error code in the event of a failure, 0 for success.
682*91f16700Schasinglulu  */
683*91f16700Schasinglulu int gpt_enable(void)
684*91f16700Schasinglulu {
685*91f16700Schasinglulu 	u_register_t gpccr_el3;
686*91f16700Schasinglulu 
687*91f16700Schasinglulu 	/*
688*91f16700Schasinglulu 	 * Granule tables must be initialised before enabling
689*91f16700Schasinglulu 	 * granule protection.
690*91f16700Schasinglulu 	 */
691*91f16700Schasinglulu 	if (gpt_config.plat_gpt_l0_base == 0U) {
692*91f16700Schasinglulu 		ERROR("[GPT] Tables have not been initialized!\n");
693*91f16700Schasinglulu 		return -EPERM;
694*91f16700Schasinglulu 	}
695*91f16700Schasinglulu 
696*91f16700Schasinglulu 	/* Write the base address of the L0 tables into GPTBR */
697*91f16700Schasinglulu 	write_gptbr_el3(((gpt_config.plat_gpt_l0_base >> GPTBR_BADDR_VAL_SHIFT)
698*91f16700Schasinglulu 			>> GPTBR_BADDR_SHIFT) & GPTBR_BADDR_MASK);
699*91f16700Schasinglulu 
700*91f16700Schasinglulu 	/* GPCCR_EL3.PPS */
701*91f16700Schasinglulu 	gpccr_el3 = SET_GPCCR_PPS(gpt_config.pps);
702*91f16700Schasinglulu 
703*91f16700Schasinglulu 	/* GPCCR_EL3.PGS */
704*91f16700Schasinglulu 	gpccr_el3 |= SET_GPCCR_PGS(gpt_config.pgs);
705*91f16700Schasinglulu 
706*91f16700Schasinglulu 	/*
707*91f16700Schasinglulu 	 * Since EL3 maps the L1 region as Inner shareable, use the same
708*91f16700Schasinglulu 	 * shareability attribute for GPC as well so that
709*91f16700Schasinglulu 	 * GPC fetches are visible to PEs
710*91f16700Schasinglulu 	 */
711*91f16700Schasinglulu 	gpccr_el3 |= SET_GPCCR_SH(GPCCR_SH_IS);
712*91f16700Schasinglulu 
713*91f16700Schasinglulu 	/* Outer and Inner cacheability set to Normal memory, WB, RA, WA. */
714*91f16700Schasinglulu 	gpccr_el3 |= SET_GPCCR_ORGN(GPCCR_ORGN_WB_RA_WA);
715*91f16700Schasinglulu 	gpccr_el3 |= SET_GPCCR_IRGN(GPCCR_IRGN_WB_RA_WA);
716*91f16700Schasinglulu 
717*91f16700Schasinglulu 	/* Prepopulate GPCCR_EL3 but don't enable GPC yet */
718*91f16700Schasinglulu 	write_gpccr_el3(gpccr_el3);
719*91f16700Schasinglulu 	isb();
720*91f16700Schasinglulu 
721*91f16700Schasinglulu 	/* Invalidate any stale TLB entries and any cached register fields */
722*91f16700Schasinglulu 	tlbipaallos();
723*91f16700Schasinglulu 	dsb();
724*91f16700Schasinglulu 	isb();
725*91f16700Schasinglulu 
726*91f16700Schasinglulu 	/* Enable GPT */
727*91f16700Schasinglulu 	gpccr_el3 |= GPCCR_GPC_BIT;
728*91f16700Schasinglulu 
729*91f16700Schasinglulu 	/* TODO: Configure GPCCR_EL3_GPCP for Fault control. */
730*91f16700Schasinglulu 	write_gpccr_el3(gpccr_el3);
731*91f16700Schasinglulu 	isb();
732*91f16700Schasinglulu 	tlbipaallos();
733*91f16700Schasinglulu 	dsb();
734*91f16700Schasinglulu 	isb();
735*91f16700Schasinglulu 
736*91f16700Schasinglulu 	return 0;
737*91f16700Schasinglulu }
738*91f16700Schasinglulu 
739*91f16700Schasinglulu /*
740*91f16700Schasinglulu  * Public API to disable granule protection checks.
741*91f16700Schasinglulu  */
742*91f16700Schasinglulu void gpt_disable(void)
743*91f16700Schasinglulu {
744*91f16700Schasinglulu 	u_register_t gpccr_el3 = read_gpccr_el3();
745*91f16700Schasinglulu 
746*91f16700Schasinglulu 	write_gpccr_el3(gpccr_el3 & ~GPCCR_GPC_BIT);
747*91f16700Schasinglulu 	dsbsy();
748*91f16700Schasinglulu 	isb();
749*91f16700Schasinglulu }
750*91f16700Schasinglulu 
751*91f16700Schasinglulu /*
752*91f16700Schasinglulu  * Public API that initializes the entire protected space to GPT_GPI_ANY using
753*91f16700Schasinglulu  * the L0 tables (block descriptors). Ideally, this function is invoked prior
754*91f16700Schasinglulu  * to DDR discovery and initialization. The MMU must be initialized before
755*91f16700Schasinglulu  * calling this function.
756*91f16700Schasinglulu  *
757*91f16700Schasinglulu  * Parameters
758*91f16700Schasinglulu  *   pps		PPS value to use for table generation
759*91f16700Schasinglulu  *   l0_mem_base	Base address of L0 tables in memory.
760*91f16700Schasinglulu  *   l0_mem_size	Total size of memory available for L0 tables.
761*91f16700Schasinglulu  *
762*91f16700Schasinglulu  * Return
763*91f16700Schasinglulu  *   Negative Linux error code in the event of a failure, 0 for success.
764*91f16700Schasinglulu  */
765*91f16700Schasinglulu int gpt_init_l0_tables(gpccr_pps_e pps, uintptr_t l0_mem_base,
766*91f16700Schasinglulu 		       size_t l0_mem_size)
767*91f16700Schasinglulu {
768*91f16700Schasinglulu 	int ret;
769*91f16700Schasinglulu 	uint64_t gpt_desc;
770*91f16700Schasinglulu 
771*91f16700Schasinglulu 	/* Ensure that MMU and Data caches are enabled. */
772*91f16700Schasinglulu 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
773*91f16700Schasinglulu 
774*91f16700Schasinglulu 	/* Validate other parameters. */
775*91f16700Schasinglulu 	ret = gpt_validate_l0_params(pps, l0_mem_base, l0_mem_size);
776*91f16700Schasinglulu 	if (ret != 0) {
777*91f16700Schasinglulu 		return ret;
778*91f16700Schasinglulu 	}
779*91f16700Schasinglulu 
780*91f16700Schasinglulu 	/* Create the descriptor to initialize L0 entries with. */
781*91f16700Schasinglulu 	gpt_desc = GPT_L0_BLK_DESC(GPT_GPI_ANY);
782*91f16700Schasinglulu 
783*91f16700Schasinglulu 	/* Iterate through all L0 entries */
784*91f16700Schasinglulu 	for (unsigned int i = 0U; i < GPT_L0_REGION_COUNT(gpt_config.t); i++) {
785*91f16700Schasinglulu 		((uint64_t *)l0_mem_base)[i] = gpt_desc;
786*91f16700Schasinglulu 	}
787*91f16700Schasinglulu 
788*91f16700Schasinglulu 	/* Flush updated L0 tables to memory. */
789*91f16700Schasinglulu 	flush_dcache_range((uintptr_t)l0_mem_base,
790*91f16700Schasinglulu 			   (size_t)GPT_L0_TABLE_SIZE(gpt_config.t));
791*91f16700Schasinglulu 
792*91f16700Schasinglulu 	/* Stash the L0 base address once initial setup is complete. */
793*91f16700Schasinglulu 	gpt_config.plat_gpt_l0_base = l0_mem_base;
794*91f16700Schasinglulu 
795*91f16700Schasinglulu 	return 0;
796*91f16700Schasinglulu }
797*91f16700Schasinglulu 
798*91f16700Schasinglulu /*
799*91f16700Schasinglulu  * Public API that carves out PAS regions from the L0 tables and builds any L1
800*91f16700Schasinglulu  * tables that are needed. This function ideally is run after DDR discovery and
801*91f16700Schasinglulu  * initialization. The L0 tables must have already been initialized to GPI_ANY
802*91f16700Schasinglulu  * when this function is called.
803*91f16700Schasinglulu  *
804*91f16700Schasinglulu  * This function can be called multiple times with different L1 memory ranges
805*91f16700Schasinglulu  * and PAS regions if it is desirable to place L1 tables in different locations
806*91f16700Schasinglulu  * in memory. (ex: you have multiple DDR banks and want to place the L1 tables
807*91f16700Schasinglulu  * in the DDR bank that they control)
808*91f16700Schasinglulu  *
809*91f16700Schasinglulu  * Parameters
810*91f16700Schasinglulu  *   pgs		PGS value to use for table generation.
811*91f16700Schasinglulu  *   l1_mem_base	Base address of memory used for L1 tables.
812*91f16700Schasinglulu  *   l1_mem_size	Total size of memory available for L1 tables.
813*91f16700Schasinglulu  *   *pas_regions	Pointer to PAS regions structure array.
814*91f16700Schasinglulu  *   pas_count		Total number of PAS regions.
815*91f16700Schasinglulu  *
816*91f16700Schasinglulu  * Return
817*91f16700Schasinglulu  *   Negative Linux error code in the event of a failure, 0 for success.
818*91f16700Schasinglulu  */
819*91f16700Schasinglulu int gpt_init_pas_l1_tables(gpccr_pgs_e pgs, uintptr_t l1_mem_base,
820*91f16700Schasinglulu 			   size_t l1_mem_size, pas_region_t *pas_regions,
821*91f16700Schasinglulu 			   unsigned int pas_count)
822*91f16700Schasinglulu {
823*91f16700Schasinglulu 	int ret;
824*91f16700Schasinglulu 	int l1_gpt_cnt;
825*91f16700Schasinglulu 
826*91f16700Schasinglulu 	/* Ensure that MMU and Data caches are enabled. */
827*91f16700Schasinglulu 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
828*91f16700Schasinglulu 
829*91f16700Schasinglulu 	/* PGS is needed for gpt_validate_pas_mappings so check it now. */
830*91f16700Schasinglulu 	if (pgs > GPT_PGS_MAX) {
831*91f16700Schasinglulu 		ERROR("[GPT] Invalid PGS: 0x%x\n", pgs);
832*91f16700Schasinglulu 		return -EINVAL;
833*91f16700Schasinglulu 	}
834*91f16700Schasinglulu 	gpt_config.pgs = pgs;
835*91f16700Schasinglulu 	gpt_config.p = gpt_p_lookup[pgs];
836*91f16700Schasinglulu 
837*91f16700Schasinglulu 	/* Make sure L0 tables have been initialized. */
838*91f16700Schasinglulu 	if (gpt_config.plat_gpt_l0_base == 0U) {
839*91f16700Schasinglulu 		ERROR("[GPT] L0 tables must be initialized first!\n");
840*91f16700Schasinglulu 		return -EPERM;
841*91f16700Schasinglulu 	}
842*91f16700Schasinglulu 
843*91f16700Schasinglulu 	/* Check if L1 GPTs are required and how many. */
844*91f16700Schasinglulu 	l1_gpt_cnt = gpt_validate_pas_mappings(pas_regions, pas_count);
845*91f16700Schasinglulu 	if (l1_gpt_cnt < 0) {
846*91f16700Schasinglulu 		return l1_gpt_cnt;
847*91f16700Schasinglulu 	}
848*91f16700Schasinglulu 
849*91f16700Schasinglulu 	VERBOSE("[GPT] %u L1 GPTs requested.\n", l1_gpt_cnt);
850*91f16700Schasinglulu 
851*91f16700Schasinglulu 	/* If L1 tables are needed then validate the L1 parameters. */
852*91f16700Schasinglulu 	if (l1_gpt_cnt > 0) {
853*91f16700Schasinglulu 		ret = gpt_validate_l1_params(l1_mem_base, l1_mem_size,
854*91f16700Schasinglulu 		      l1_gpt_cnt);
855*91f16700Schasinglulu 		if (ret != 0) {
856*91f16700Schasinglulu 			return ret;
857*91f16700Schasinglulu 		}
858*91f16700Schasinglulu 
859*91f16700Schasinglulu 		/* Set up parameters for L1 table generation. */
860*91f16700Schasinglulu 		gpt_l1_tbl = l1_mem_base;
861*91f16700Schasinglulu 		gpt_next_l1_tbl_idx = 0U;
862*91f16700Schasinglulu 	}
863*91f16700Schasinglulu 
864*91f16700Schasinglulu 	INFO("[GPT] Boot Configuration\n");
865*91f16700Schasinglulu 	INFO("  PPS/T:     0x%x/%u\n", gpt_config.pps, gpt_config.t);
866*91f16700Schasinglulu 	INFO("  PGS/P:     0x%x/%u\n", gpt_config.pgs, gpt_config.p);
867*91f16700Schasinglulu 	INFO("  L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
868*91f16700Schasinglulu 	INFO("  PAS count: 0x%x\n", pas_count);
869*91f16700Schasinglulu 	INFO("  L0 base:   0x%lx\n", gpt_config.plat_gpt_l0_base);
870*91f16700Schasinglulu 
871*91f16700Schasinglulu 	/* Generate the tables in memory. */
872*91f16700Schasinglulu 	for (unsigned int idx = 0U; idx < pas_count; idx++) {
873*91f16700Schasinglulu 		INFO("[GPT] PAS[%u]: base 0x%lx, size 0x%lx, GPI 0x%x, type 0x%x\n",
874*91f16700Schasinglulu 		     idx, pas_regions[idx].base_pa, pas_regions[idx].size,
875*91f16700Schasinglulu 		     GPT_PAS_ATTR_GPI(pas_regions[idx].attrs),
876*91f16700Schasinglulu 		     GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs));
877*91f16700Schasinglulu 
878*91f16700Schasinglulu 		/* Check if a block or table descriptor is required */
879*91f16700Schasinglulu 		if (GPT_PAS_ATTR_MAP_TYPE(pas_regions[idx].attrs) ==
880*91f16700Schasinglulu 		    GPT_PAS_ATTR_MAP_TYPE_BLOCK) {
881*91f16700Schasinglulu 			gpt_generate_l0_blk_desc(&pas_regions[idx]);
882*91f16700Schasinglulu 
883*91f16700Schasinglulu 		} else {
884*91f16700Schasinglulu 			gpt_generate_l0_tbl_desc(&pas_regions[idx]);
885*91f16700Schasinglulu 		}
886*91f16700Schasinglulu 	}
887*91f16700Schasinglulu 
888*91f16700Schasinglulu 	/* Flush modified L0 tables. */
889*91f16700Schasinglulu 	flush_l0_for_pas_array(pas_regions, pas_count);
890*91f16700Schasinglulu 
891*91f16700Schasinglulu 	/* Flush L1 tables if needed. */
892*91f16700Schasinglulu 	if (l1_gpt_cnt > 0) {
893*91f16700Schasinglulu 		flush_dcache_range(l1_mem_base,
894*91f16700Schasinglulu 				   GPT_L1_TABLE_SIZE(gpt_config.p) *
895*91f16700Schasinglulu 				   l1_gpt_cnt);
896*91f16700Schasinglulu 	}
897*91f16700Schasinglulu 
898*91f16700Schasinglulu 	/* Make sure that all the entries are written to the memory. */
899*91f16700Schasinglulu 	dsbishst();
900*91f16700Schasinglulu 	tlbipaallos();
901*91f16700Schasinglulu 	dsb();
902*91f16700Schasinglulu 	isb();
903*91f16700Schasinglulu 
904*91f16700Schasinglulu 	return 0;
905*91f16700Schasinglulu }
906*91f16700Schasinglulu 
907*91f16700Schasinglulu /*
908*91f16700Schasinglulu  * Public API to initialize the runtime gpt_config structure based on the values
909*91f16700Schasinglulu  * present in the GPTBR_EL3 and GPCCR_EL3 registers. GPT initialization
910*91f16700Schasinglulu  * typically happens in a bootloader stage prior to setting up the EL3 runtime
911*91f16700Schasinglulu  * environment for the granule transition service so this function detects the
912*91f16700Schasinglulu  * initialization from a previous stage. Granule protection checks must be
913*91f16700Schasinglulu  * enabled already or this function will return an error.
914*91f16700Schasinglulu  *
915*91f16700Schasinglulu  * Return
916*91f16700Schasinglulu  *   Negative Linux error code in the event of a failure, 0 for success.
917*91f16700Schasinglulu  */
918*91f16700Schasinglulu int gpt_runtime_init(void)
919*91f16700Schasinglulu {
920*91f16700Schasinglulu 	u_register_t reg;
921*91f16700Schasinglulu 
922*91f16700Schasinglulu 	/* Ensure that MMU and Data caches are enabled. */
923*91f16700Schasinglulu 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0U);
924*91f16700Schasinglulu 
925*91f16700Schasinglulu 	/* Ensure GPC are already enabled. */
926*91f16700Schasinglulu 	if ((read_gpccr_el3() & GPCCR_GPC_BIT) == 0U) {
927*91f16700Schasinglulu 		ERROR("[GPT] Granule protection checks are not enabled!\n");
928*91f16700Schasinglulu 		return -EPERM;
929*91f16700Schasinglulu 	}
930*91f16700Schasinglulu 
931*91f16700Schasinglulu 	/*
932*91f16700Schasinglulu 	 * Read the L0 table address from GPTBR, we don't need the L1 base
933*91f16700Schasinglulu 	 * address since those are included in the L0 tables as needed.
934*91f16700Schasinglulu 	 */
935*91f16700Schasinglulu 	reg = read_gptbr_el3();
936*91f16700Schasinglulu 	gpt_config.plat_gpt_l0_base = ((reg >> GPTBR_BADDR_SHIFT) &
937*91f16700Schasinglulu 				      GPTBR_BADDR_MASK) <<
938*91f16700Schasinglulu 				      GPTBR_BADDR_VAL_SHIFT;
939*91f16700Schasinglulu 
940*91f16700Schasinglulu 	/* Read GPCCR to get PGS and PPS values. */
941*91f16700Schasinglulu 	reg = read_gpccr_el3();
942*91f16700Schasinglulu 	gpt_config.pps = (reg >> GPCCR_PPS_SHIFT) & GPCCR_PPS_MASK;
943*91f16700Schasinglulu 	gpt_config.t = gpt_t_lookup[gpt_config.pps];
944*91f16700Schasinglulu 	gpt_config.pgs = (reg >> GPCCR_PGS_SHIFT) & GPCCR_PGS_MASK;
945*91f16700Schasinglulu 	gpt_config.p = gpt_p_lookup[gpt_config.pgs];
946*91f16700Schasinglulu 
947*91f16700Schasinglulu 	VERBOSE("[GPT] Runtime Configuration\n");
948*91f16700Schasinglulu 	VERBOSE("  PPS/T:     0x%x/%u\n", gpt_config.pps, gpt_config.t);
949*91f16700Schasinglulu 	VERBOSE("  PGS/P:     0x%x/%u\n", gpt_config.pgs, gpt_config.p);
950*91f16700Schasinglulu 	VERBOSE("  L0GPTSZ/S: 0x%x/%u\n", GPT_L0GPTSZ, GPT_S_VAL);
951*91f16700Schasinglulu 	VERBOSE("  L0 base:   0x%lx\n", gpt_config.plat_gpt_l0_base);
952*91f16700Schasinglulu 
953*91f16700Schasinglulu 	return 0;
954*91f16700Schasinglulu }
955*91f16700Schasinglulu 
956*91f16700Schasinglulu /*
957*91f16700Schasinglulu  * The L1 descriptors are protected by a spinlock to ensure that multiple
958*91f16700Schasinglulu  * CPUs do not attempt to change the descriptors at once. In the future it
959*91f16700Schasinglulu  * would be better to have separate spinlocks for each L1 descriptor.
960*91f16700Schasinglulu  */
961*91f16700Schasinglulu static spinlock_t gpt_lock;
962*91f16700Schasinglulu 
963*91f16700Schasinglulu /*
964*91f16700Schasinglulu  * A helper to write the value (target_pas << gpi_shift) to the index of
965*91f16700Schasinglulu  * the gpt_l1_addr
966*91f16700Schasinglulu  */
967*91f16700Schasinglulu static inline void write_gpt(uint64_t *gpt_l1_desc, uint64_t *gpt_l1_addr,
968*91f16700Schasinglulu 			     unsigned int gpi_shift, unsigned int idx,
969*91f16700Schasinglulu 			     unsigned int target_pas)
970*91f16700Schasinglulu {
971*91f16700Schasinglulu 	*gpt_l1_desc &= ~(GPT_L1_GRAN_DESC_GPI_MASK << gpi_shift);
972*91f16700Schasinglulu 	*gpt_l1_desc |= ((uint64_t)target_pas << gpi_shift);
973*91f16700Schasinglulu 	gpt_l1_addr[idx] = *gpt_l1_desc;
974*91f16700Schasinglulu }
975*91f16700Schasinglulu 
976*91f16700Schasinglulu /*
977*91f16700Schasinglulu  * Helper to retrieve the gpt_l1_* information from the base address
978*91f16700Schasinglulu  * returned in gpi_info
979*91f16700Schasinglulu  */
980*91f16700Schasinglulu static int get_gpi_params(uint64_t base, gpi_info_t *gpi_info)
981*91f16700Schasinglulu {
982*91f16700Schasinglulu 	uint64_t gpt_l0_desc, *gpt_l0_base;
983*91f16700Schasinglulu 
984*91f16700Schasinglulu 	gpt_l0_base = (uint64_t *)gpt_config.plat_gpt_l0_base;
985*91f16700Schasinglulu 	gpt_l0_desc = gpt_l0_base[GPT_L0_IDX(base)];
986*91f16700Schasinglulu 	if (GPT_L0_TYPE(gpt_l0_desc) != GPT_L0_TYPE_TBL_DESC) {
987*91f16700Schasinglulu 		VERBOSE("[GPT] Granule is not covered by a table descriptor!\n");
988*91f16700Schasinglulu 		VERBOSE("      Base=0x%" PRIx64 "\n", base);
989*91f16700Schasinglulu 		return -EINVAL;
990*91f16700Schasinglulu 	}
991*91f16700Schasinglulu 
992*91f16700Schasinglulu 	/* Get the table index and GPI shift from PA. */
993*91f16700Schasinglulu 	gpi_info->gpt_l1_addr = GPT_L0_TBLD_ADDR(gpt_l0_desc);
994*91f16700Schasinglulu 	gpi_info->idx = GPT_L1_IDX(gpt_config.p, base);
995*91f16700Schasinglulu 	gpi_info->gpi_shift = GPT_L1_GPI_IDX(gpt_config.p, base) << 2;
996*91f16700Schasinglulu 
997*91f16700Schasinglulu 	gpi_info->gpt_l1_desc = (gpi_info->gpt_l1_addr)[gpi_info->idx];
998*91f16700Schasinglulu 	gpi_info->gpi = (gpi_info->gpt_l1_desc >> gpi_info->gpi_shift) &
999*91f16700Schasinglulu 		GPT_L1_GRAN_DESC_GPI_MASK;
1000*91f16700Schasinglulu 	return 0;
1001*91f16700Schasinglulu }
1002*91f16700Schasinglulu 
1003*91f16700Schasinglulu /*
1004*91f16700Schasinglulu  * This function is the granule transition delegate service. When a granule
1005*91f16700Schasinglulu  * transition request occurs it is routed to this function to have the request,
1006*91f16700Schasinglulu  * if valid, fulfilled following A1.1.1 Delegate of RME supplement
1007*91f16700Schasinglulu  *
1008*91f16700Schasinglulu  * TODO: implement support for transitioning multiple granules at once.
1009*91f16700Schasinglulu  *
1010*91f16700Schasinglulu  * Parameters
1011*91f16700Schasinglulu  *   base		Base address of the region to transition, must be
1012*91f16700Schasinglulu  *			aligned to granule size.
1013*91f16700Schasinglulu  *   size		Size of region to transition, must be aligned to granule
1014*91f16700Schasinglulu  *			size.
1015*91f16700Schasinglulu  *   src_sec_state	Security state of the caller.
1016*91f16700Schasinglulu  *
1017*91f16700Schasinglulu  * Return
1018*91f16700Schasinglulu  *   Negative Linux error code in the event of a failure, 0 for success.
1019*91f16700Schasinglulu  */
1020*91f16700Schasinglulu int gpt_delegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
1021*91f16700Schasinglulu {
1022*91f16700Schasinglulu 	gpi_info_t gpi_info;
1023*91f16700Schasinglulu 	uint64_t nse;
1024*91f16700Schasinglulu 	int res;
1025*91f16700Schasinglulu 	unsigned int target_pas;
1026*91f16700Schasinglulu 
1027*91f16700Schasinglulu 	/* Ensure that the tables have been set up before taking requests. */
1028*91f16700Schasinglulu 	assert(gpt_config.plat_gpt_l0_base != 0UL);
1029*91f16700Schasinglulu 
1030*91f16700Schasinglulu 	/* Ensure that caches are enabled. */
1031*91f16700Schasinglulu 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1032*91f16700Schasinglulu 
1033*91f16700Schasinglulu 	/* Delegate request can only come from REALM or SECURE */
1034*91f16700Schasinglulu 	assert(src_sec_state == SMC_FROM_REALM ||
1035*91f16700Schasinglulu 	       src_sec_state == SMC_FROM_SECURE);
1036*91f16700Schasinglulu 
1037*91f16700Schasinglulu 	/* See if this is a single or a range of granule transition. */
1038*91f16700Schasinglulu 	if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
1039*91f16700Schasinglulu 		return -EINVAL;
1040*91f16700Schasinglulu 	}
1041*91f16700Schasinglulu 
1042*91f16700Schasinglulu 	/* Check that base and size are valid */
1043*91f16700Schasinglulu 	if ((ULONG_MAX - base) < size) {
1044*91f16700Schasinglulu 		VERBOSE("[GPT] Transition request address overflow!\n");
1045*91f16700Schasinglulu 		VERBOSE("      Base=0x%" PRIx64 "\n", base);
1046*91f16700Schasinglulu 		VERBOSE("      Size=0x%lx\n", size);
1047*91f16700Schasinglulu 		return -EINVAL;
1048*91f16700Schasinglulu 	}
1049*91f16700Schasinglulu 
1050*91f16700Schasinglulu 	/* Make sure base and size are valid. */
1051*91f16700Schasinglulu 	if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
1052*91f16700Schasinglulu 	    ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
1053*91f16700Schasinglulu 	    (size == 0UL) ||
1054*91f16700Schasinglulu 	    ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
1055*91f16700Schasinglulu 		VERBOSE("[GPT] Invalid granule transition address range!\n");
1056*91f16700Schasinglulu 		VERBOSE("      Base=0x%" PRIx64 "\n", base);
1057*91f16700Schasinglulu 		VERBOSE("      Size=0x%lx\n", size);
1058*91f16700Schasinglulu 		return -EINVAL;
1059*91f16700Schasinglulu 	}
1060*91f16700Schasinglulu 
1061*91f16700Schasinglulu 	target_pas = GPT_GPI_REALM;
1062*91f16700Schasinglulu 	if (src_sec_state == SMC_FROM_SECURE) {
1063*91f16700Schasinglulu 		target_pas = GPT_GPI_SECURE;
1064*91f16700Schasinglulu 	}
1065*91f16700Schasinglulu 
1066*91f16700Schasinglulu 	/*
1067*91f16700Schasinglulu 	 * Access to L1 tables is controlled by a global lock to ensure
1068*91f16700Schasinglulu 	 * that no more than one CPU is allowed to make changes at any
1069*91f16700Schasinglulu 	 * given time.
1070*91f16700Schasinglulu 	 */
1071*91f16700Schasinglulu 	spin_lock(&gpt_lock);
1072*91f16700Schasinglulu 	res = get_gpi_params(base, &gpi_info);
1073*91f16700Schasinglulu 	if (res != 0) {
1074*91f16700Schasinglulu 		spin_unlock(&gpt_lock);
1075*91f16700Schasinglulu 		return res;
1076*91f16700Schasinglulu 	}
1077*91f16700Schasinglulu 
1078*91f16700Schasinglulu 	/* Check that the current address is in NS state */
1079*91f16700Schasinglulu 	if (gpi_info.gpi != GPT_GPI_NS) {
1080*91f16700Schasinglulu 		VERBOSE("[GPT] Only Granule in NS state can be delegated.\n");
1081*91f16700Schasinglulu 		VERBOSE("      Caller: %u, Current GPI: %u\n", src_sec_state,
1082*91f16700Schasinglulu 			gpi_info.gpi);
1083*91f16700Schasinglulu 		spin_unlock(&gpt_lock);
1084*91f16700Schasinglulu 		return -EPERM;
1085*91f16700Schasinglulu 	}
1086*91f16700Schasinglulu 
1087*91f16700Schasinglulu 	if (src_sec_state == SMC_FROM_SECURE) {
1088*91f16700Schasinglulu 		nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
1089*91f16700Schasinglulu 	} else {
1090*91f16700Schasinglulu 		nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
1091*91f16700Schasinglulu 	}
1092*91f16700Schasinglulu 
1093*91f16700Schasinglulu 	/*
1094*91f16700Schasinglulu 	 * In order to maintain mutual distrust between Realm and Secure
1095*91f16700Schasinglulu 	 * states, remove any data speculatively fetched into the target
1096*91f16700Schasinglulu 	 * physical address space. Issue DC CIPAPA over address range
1097*91f16700Schasinglulu 	 */
1098*91f16700Schasinglulu 	flush_dcache_to_popa_range(nse | base,
1099*91f16700Schasinglulu 				   GPT_PGS_ACTUAL_SIZE(gpt_config.p));
1100*91f16700Schasinglulu 
1101*91f16700Schasinglulu 	write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
1102*91f16700Schasinglulu 		  gpi_info.gpi_shift, gpi_info.idx, target_pas);
1103*91f16700Schasinglulu 	dsboshst();
1104*91f16700Schasinglulu 
1105*91f16700Schasinglulu 	gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
1106*91f16700Schasinglulu 	dsbosh();
1107*91f16700Schasinglulu 
1108*91f16700Schasinglulu 	nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
1109*91f16700Schasinglulu 
1110*91f16700Schasinglulu 	flush_dcache_to_popa_range(nse | base,
1111*91f16700Schasinglulu 				   GPT_PGS_ACTUAL_SIZE(gpt_config.p));
1112*91f16700Schasinglulu 
1113*91f16700Schasinglulu 	/* Unlock access to the L1 tables. */
1114*91f16700Schasinglulu 	spin_unlock(&gpt_lock);
1115*91f16700Schasinglulu 
1116*91f16700Schasinglulu 	/*
1117*91f16700Schasinglulu 	 * The isb() will be done as part of context
1118*91f16700Schasinglulu 	 * synchronization when returning to lower EL
1119*91f16700Schasinglulu 	 */
1120*91f16700Schasinglulu 	VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n",
1121*91f16700Schasinglulu 		base, gpi_info.gpi, target_pas);
1122*91f16700Schasinglulu 
1123*91f16700Schasinglulu 	return 0;
1124*91f16700Schasinglulu }
1125*91f16700Schasinglulu 
1126*91f16700Schasinglulu /*
1127*91f16700Schasinglulu  * This function is the granule transition undelegate service. When a granule
1128*91f16700Schasinglulu  * transition request occurs it is routed to this function where the request is
1129*91f16700Schasinglulu  * validated then fulfilled if possible.
1130*91f16700Schasinglulu  *
1131*91f16700Schasinglulu  * TODO: implement support for transitioning multiple granules at once.
1132*91f16700Schasinglulu  *
1133*91f16700Schasinglulu  * Parameters
1134*91f16700Schasinglulu  *   base		Base address of the region to transition, must be
1135*91f16700Schasinglulu  *			aligned to granule size.
1136*91f16700Schasinglulu  *   size		Size of region to transition, must be aligned to granule
1137*91f16700Schasinglulu  *			size.
1138*91f16700Schasinglulu  *   src_sec_state	Security state of the caller.
1139*91f16700Schasinglulu  *
1140*91f16700Schasinglulu  * Return
1141*91f16700Schasinglulu  *    Negative Linux error code in the event of a failure, 0 for success.
1142*91f16700Schasinglulu  */
1143*91f16700Schasinglulu int gpt_undelegate_pas(uint64_t base, size_t size, unsigned int src_sec_state)
1144*91f16700Schasinglulu {
1145*91f16700Schasinglulu 	gpi_info_t gpi_info;
1146*91f16700Schasinglulu 	uint64_t nse;
1147*91f16700Schasinglulu 	int res;
1148*91f16700Schasinglulu 
1149*91f16700Schasinglulu 	/* Ensure that the tables have been set up before taking requests. */
1150*91f16700Schasinglulu 	assert(gpt_config.plat_gpt_l0_base != 0UL);
1151*91f16700Schasinglulu 
1152*91f16700Schasinglulu 	/* Ensure that MMU and caches are enabled. */
1153*91f16700Schasinglulu 	assert((read_sctlr_el3() & SCTLR_C_BIT) != 0UL);
1154*91f16700Schasinglulu 
1155*91f16700Schasinglulu 	/* Delegate request can only come from REALM or SECURE */
1156*91f16700Schasinglulu 	assert(src_sec_state == SMC_FROM_REALM ||
1157*91f16700Schasinglulu 	       src_sec_state == SMC_FROM_SECURE);
1158*91f16700Schasinglulu 
1159*91f16700Schasinglulu 	/* See if this is a single or a range of granule transition. */
1160*91f16700Schasinglulu 	if (size != GPT_PGS_ACTUAL_SIZE(gpt_config.p)) {
1161*91f16700Schasinglulu 		return -EINVAL;
1162*91f16700Schasinglulu 	}
1163*91f16700Schasinglulu 
1164*91f16700Schasinglulu 	/* Check that base and size are valid */
1165*91f16700Schasinglulu 	if ((ULONG_MAX - base) < size) {
1166*91f16700Schasinglulu 		VERBOSE("[GPT] Transition request address overflow!\n");
1167*91f16700Schasinglulu 		VERBOSE("      Base=0x%" PRIx64 "\n", base);
1168*91f16700Schasinglulu 		VERBOSE("      Size=0x%lx\n", size);
1169*91f16700Schasinglulu 		return -EINVAL;
1170*91f16700Schasinglulu 	}
1171*91f16700Schasinglulu 
1172*91f16700Schasinglulu 	/* Make sure base and size are valid. */
1173*91f16700Schasinglulu 	if (((base & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
1174*91f16700Schasinglulu 	    ((size & (GPT_PGS_ACTUAL_SIZE(gpt_config.p) - 1)) != 0UL) ||
1175*91f16700Schasinglulu 	    (size == 0UL) ||
1176*91f16700Schasinglulu 	    ((base + size) >= GPT_PPS_ACTUAL_SIZE(gpt_config.t))) {
1177*91f16700Schasinglulu 		VERBOSE("[GPT] Invalid granule transition address range!\n");
1178*91f16700Schasinglulu 		VERBOSE("      Base=0x%" PRIx64 "\n", base);
1179*91f16700Schasinglulu 		VERBOSE("      Size=0x%lx\n", size);
1180*91f16700Schasinglulu 		return -EINVAL;
1181*91f16700Schasinglulu 	}
1182*91f16700Schasinglulu 
1183*91f16700Schasinglulu 	/*
1184*91f16700Schasinglulu 	 * Access to L1 tables is controlled by a global lock to ensure
1185*91f16700Schasinglulu 	 * that no more than one CPU is allowed to make changes at any
1186*91f16700Schasinglulu 	 * given time.
1187*91f16700Schasinglulu 	 */
1188*91f16700Schasinglulu 	spin_lock(&gpt_lock);
1189*91f16700Schasinglulu 
1190*91f16700Schasinglulu 	res = get_gpi_params(base, &gpi_info);
1191*91f16700Schasinglulu 	if (res != 0) {
1192*91f16700Schasinglulu 		spin_unlock(&gpt_lock);
1193*91f16700Schasinglulu 		return res;
1194*91f16700Schasinglulu 	}
1195*91f16700Schasinglulu 
1196*91f16700Schasinglulu 	/* Check that the current address is in the delegated state */
1197*91f16700Schasinglulu 	if ((src_sec_state == SMC_FROM_REALM  &&
1198*91f16700Schasinglulu 	     gpi_info.gpi != GPT_GPI_REALM) ||
1199*91f16700Schasinglulu 	    (src_sec_state == SMC_FROM_SECURE &&
1200*91f16700Schasinglulu 	     gpi_info.gpi != GPT_GPI_SECURE)) {
1201*91f16700Schasinglulu 		VERBOSE("[GPT] Only Granule in REALM or SECURE state can be undelegated.\n");
1202*91f16700Schasinglulu 		VERBOSE("      Caller: %u, Current GPI: %u\n", src_sec_state,
1203*91f16700Schasinglulu 			gpi_info.gpi);
1204*91f16700Schasinglulu 		spin_unlock(&gpt_lock);
1205*91f16700Schasinglulu 		return -EPERM;
1206*91f16700Schasinglulu 	}
1207*91f16700Schasinglulu 
1208*91f16700Schasinglulu 
1209*91f16700Schasinglulu 	/* In order to maintain mutual distrust between Realm and Secure
1210*91f16700Schasinglulu 	 * states, remove access now, in order to guarantee that writes
1211*91f16700Schasinglulu 	 * to the currently-accessible physical address space will not
1212*91f16700Schasinglulu 	 * later become observable.
1213*91f16700Schasinglulu 	 */
1214*91f16700Schasinglulu 	write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
1215*91f16700Schasinglulu 		  gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NO_ACCESS);
1216*91f16700Schasinglulu 	dsboshst();
1217*91f16700Schasinglulu 
1218*91f16700Schasinglulu 	gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
1219*91f16700Schasinglulu 	dsbosh();
1220*91f16700Schasinglulu 
1221*91f16700Schasinglulu 	if (src_sec_state == SMC_FROM_SECURE) {
1222*91f16700Schasinglulu 		nse = (uint64_t)GPT_NSE_SECURE << GPT_NSE_SHIFT;
1223*91f16700Schasinglulu 	} else {
1224*91f16700Schasinglulu 		nse = (uint64_t)GPT_NSE_REALM << GPT_NSE_SHIFT;
1225*91f16700Schasinglulu 	}
1226*91f16700Schasinglulu 
1227*91f16700Schasinglulu 	/* Ensure that the scrubbed data has made it past the PoPA */
1228*91f16700Schasinglulu 	flush_dcache_to_popa_range(nse | base,
1229*91f16700Schasinglulu 				   GPT_PGS_ACTUAL_SIZE(gpt_config.p));
1230*91f16700Schasinglulu 
1231*91f16700Schasinglulu 	/*
1232*91f16700Schasinglulu 	 * Remove any data loaded speculatively
1233*91f16700Schasinglulu 	 * in NS space from before the scrubbing
1234*91f16700Schasinglulu 	 */
1235*91f16700Schasinglulu 	nse = (uint64_t)GPT_NSE_NS << GPT_NSE_SHIFT;
1236*91f16700Schasinglulu 
1237*91f16700Schasinglulu 	flush_dcache_to_popa_range(nse | base,
1238*91f16700Schasinglulu 				   GPT_PGS_ACTUAL_SIZE(gpt_config.p));
1239*91f16700Schasinglulu 
1240*91f16700Schasinglulu 	/* Clear existing GPI encoding and transition granule. */
1241*91f16700Schasinglulu 	write_gpt(&gpi_info.gpt_l1_desc, gpi_info.gpt_l1_addr,
1242*91f16700Schasinglulu 		  gpi_info.gpi_shift, gpi_info.idx, GPT_GPI_NS);
1243*91f16700Schasinglulu 	dsboshst();
1244*91f16700Schasinglulu 
1245*91f16700Schasinglulu 	/* Ensure that all agents observe the new NS configuration */
1246*91f16700Schasinglulu 	gpt_tlbi_by_pa_ll(base, GPT_PGS_ACTUAL_SIZE(gpt_config.p));
1247*91f16700Schasinglulu 	dsbosh();
1248*91f16700Schasinglulu 
1249*91f16700Schasinglulu 	/* Unlock access to the L1 tables. */
1250*91f16700Schasinglulu 	spin_unlock(&gpt_lock);
1251*91f16700Schasinglulu 
1252*91f16700Schasinglulu 	/*
1253*91f16700Schasinglulu 	 * The isb() will be done as part of context
1254*91f16700Schasinglulu 	 * synchronization when returning to lower EL
1255*91f16700Schasinglulu 	 */
1256*91f16700Schasinglulu 	VERBOSE("[GPT] Granule 0x%" PRIx64 ", GPI 0x%x->0x%x\n",
1257*91f16700Schasinglulu 		base, gpi_info.gpi, GPT_GPI_NS);
1258*91f16700Schasinglulu 
1259*91f16700Schasinglulu 	return 0;
1260*91f16700Schasinglulu }
1261