xref: /arm-trusted-firmware/plat/nxp/common/setup/ls_common.c (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu /*
2*91f16700Schasinglulu  * Copyright 2018-2022 NXP
3*91f16700Schasinglulu  *
4*91f16700Schasinglulu  * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu  *
6*91f16700Schasinglulu  */
7*91f16700Schasinglulu 
8*91f16700Schasinglulu #include <assert.h>
9*91f16700Schasinglulu 
10*91f16700Schasinglulu #include <arch.h>
11*91f16700Schasinglulu #include <arch_helpers.h>
12*91f16700Schasinglulu #include <common/debug.h>
13*91f16700Schasinglulu #include <lib/mmio.h>
14*91f16700Schasinglulu #include <lib/xlat_tables/xlat_tables_v2.h>
15*91f16700Schasinglulu #include <mmu_def.h>
16*91f16700Schasinglulu #include <plat/common/platform.h>
17*91f16700Schasinglulu 
18*91f16700Schasinglulu #include "plat_common.h"
19*91f16700Schasinglulu #include "platform_def.h"
20*91f16700Schasinglulu 
21*91f16700Schasinglulu const mmap_region_t *plat_ls_get_mmap(void);
22*91f16700Schasinglulu 
23*91f16700Schasinglulu /*
24*91f16700Schasinglulu  * Table of memory regions for various BL stages to map using the MMU.
25*91f16700Schasinglulu  * This doesn't include Trusted SRAM as arm_setup_page_tables() already
26*91f16700Schasinglulu  * takes care of mapping it.
27*91f16700Schasinglulu  *
28*91f16700Schasinglulu  * The flash needs to be mapped as writable in order to erase the FIP's Table of
29*91f16700Schasinglulu  * Contents in case of unrecoverable error (see plat_error_handler()).
30*91f16700Schasinglulu  */
31*91f16700Schasinglulu #ifdef IMAGE_BL2
32*91f16700Schasinglulu const mmap_region_t plat_ls_mmap[] = {
33*91f16700Schasinglulu 	LS_MAP_CCSR,
34*91f16700Schasinglulu 	{0}
35*91f16700Schasinglulu };
36*91f16700Schasinglulu #endif
37*91f16700Schasinglulu 
38*91f16700Schasinglulu #ifdef IMAGE_BL31
39*91f16700Schasinglulu const mmap_region_t plat_ls_mmap[] = {
40*91f16700Schasinglulu 	LS_MAP_CCSR,
41*91f16700Schasinglulu #ifdef NXP_DCSR_ADDR
42*91f16700Schasinglulu 	LS_MAP_DCSR,
43*91f16700Schasinglulu #endif
44*91f16700Schasinglulu 	LS_MAP_OCRAM,
45*91f16700Schasinglulu 	{0}
46*91f16700Schasinglulu };
47*91f16700Schasinglulu #endif
48*91f16700Schasinglulu #ifdef IMAGE_BL32
49*91f16700Schasinglulu const mmap_region_t plat_ls_mmap[] = {
50*91f16700Schasinglulu 	LS_MAP_CCSR,
51*91f16700Schasinglulu 	LS_MAP_BL32_SEC_MEM,
52*91f16700Schasinglulu 	{0}
53*91f16700Schasinglulu };
54*91f16700Schasinglulu #endif
55*91f16700Schasinglulu 
56*91f16700Schasinglulu /* Weak definitions may be overridden in specific NXP SoC */
57*91f16700Schasinglulu #pragma weak plat_get_ns_image_entrypoint
58*91f16700Schasinglulu #pragma weak plat_ls_get_mmap
59*91f16700Schasinglulu 
60*91f16700Schasinglulu #if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
61*91f16700Schasinglulu static void mmap_add_ddr_regions_statically(void)
62*91f16700Schasinglulu {
63*91f16700Schasinglulu 	int i = 0;
64*91f16700Schasinglulu 	dram_regions_info_t *info_dram_regions = get_dram_regions_info();
65*91f16700Schasinglulu 	/* MMU map for Non-Secure DRAM Regions */
66*91f16700Schasinglulu 	VERBOSE("DRAM Region %d: %p - %p\n", i,
67*91f16700Schasinglulu 			(void *) info_dram_regions->region[i].addr,
68*91f16700Schasinglulu 			(void *) (info_dram_regions->region[i].addr
69*91f16700Schasinglulu 				+ info_dram_regions->region[i].size
70*91f16700Schasinglulu 				- 1));
71*91f16700Schasinglulu 	mmap_add_region(info_dram_regions->region[i].addr,
72*91f16700Schasinglulu 			info_dram_regions->region[i].addr,
73*91f16700Schasinglulu 			info_dram_regions->region[i].size,
74*91f16700Schasinglulu 			MT_MEMORY | MT_RW | MT_NS);
75*91f16700Schasinglulu 
76*91f16700Schasinglulu 	/* MMU map for Secure DDR Region on DRAM-0 */
77*91f16700Schasinglulu 	if (info_dram_regions->region[i].size >
78*91f16700Schasinglulu 		(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
79*91f16700Schasinglulu 		VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
80*91f16700Schasinglulu 			(void *) (info_dram_regions->region[i].addr
81*91f16700Schasinglulu 				+ info_dram_regions->region[i].size),
82*91f16700Schasinglulu 			(void *) (info_dram_regions->region[i].addr
83*91f16700Schasinglulu 				+ info_dram_regions->region[i].size
84*91f16700Schasinglulu 				+ NXP_SECURE_DRAM_SIZE
85*91f16700Schasinglulu 				+ NXP_SP_SHRD_DRAM_SIZE
86*91f16700Schasinglulu 				- 1));
87*91f16700Schasinglulu 		mmap_add_region((info_dram_regions->region[i].addr
88*91f16700Schasinglulu 				+ info_dram_regions->region[i].size),
89*91f16700Schasinglulu 				(info_dram_regions->region[i].addr
90*91f16700Schasinglulu 				+ info_dram_regions->region[i].size),
91*91f16700Schasinglulu 				(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
92*91f16700Schasinglulu 				MT_MEMORY | MT_RW | MT_SECURE);
93*91f16700Schasinglulu 	}
94*91f16700Schasinglulu 
95*91f16700Schasinglulu #ifdef IMAGE_BL31
96*91f16700Schasinglulu 	for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
97*91f16700Schasinglulu 		if (info_dram_regions->region[i].size == 0)
98*91f16700Schasinglulu 			break;
99*91f16700Schasinglulu 		VERBOSE("DRAM Region %d: %p - %p\n", i,
100*91f16700Schasinglulu 			(void *) info_dram_regions->region[i].addr,
101*91f16700Schasinglulu 			(void *) (info_dram_regions->region[i].addr
102*91f16700Schasinglulu 				+ info_dram_regions->region[i].size
103*91f16700Schasinglulu 				- 1));
104*91f16700Schasinglulu 		mmap_add_region(info_dram_regions->region[i].addr,
105*91f16700Schasinglulu 				info_dram_regions->region[i].addr,
106*91f16700Schasinglulu 				info_dram_regions->region[i].size,
107*91f16700Schasinglulu 				MT_MEMORY | MT_RW | MT_NS);
108*91f16700Schasinglulu 	}
109*91f16700Schasinglulu #endif
110*91f16700Schasinglulu }
111*91f16700Schasinglulu #endif
112*91f16700Schasinglulu 
113*91f16700Schasinglulu #if defined(PLAT_XLAT_TABLES_DYNAMIC)
114*91f16700Schasinglulu void mmap_add_ddr_region_dynamically(void)
115*91f16700Schasinglulu {
116*91f16700Schasinglulu 	int ret, i = 0;
117*91f16700Schasinglulu 
118*91f16700Schasinglulu 	dram_regions_info_t *info_dram_regions = get_dram_regions_info();
119*91f16700Schasinglulu 	/* MMU map for Non-Secure DRAM Regions */
120*91f16700Schasinglulu 	VERBOSE("DRAM Region %d: %p - %p\n", i,
121*91f16700Schasinglulu 			(void *) info_dram_regions->region[i].addr,
122*91f16700Schasinglulu 			(void *) (info_dram_regions->region[i].addr
123*91f16700Schasinglulu 				+ info_dram_regions->region[i].size
124*91f16700Schasinglulu 				- 1));
125*91f16700Schasinglulu 	ret = mmap_add_dynamic_region(info_dram_regions->region[i].addr,
126*91f16700Schasinglulu 			info_dram_regions->region[i].addr,
127*91f16700Schasinglulu 			info_dram_regions->region[i].size,
128*91f16700Schasinglulu 			MT_MEMORY | MT_RW | MT_NS);
129*91f16700Schasinglulu 	if (ret != 0) {
130*91f16700Schasinglulu 		ERROR("Failed to add dynamic memory region\n");
131*91f16700Schasinglulu 		panic();
132*91f16700Schasinglulu 	}
133*91f16700Schasinglulu 
134*91f16700Schasinglulu 	/* MMU map for Secure DDR Region on DRAM-0 */
135*91f16700Schasinglulu 	if (info_dram_regions->region[i].size >
136*91f16700Schasinglulu 		(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE)) {
137*91f16700Schasinglulu 		VERBOSE("Secure DRAM Region %d: %p - %p\n", i,
138*91f16700Schasinglulu 			(void *) (info_dram_regions->region[i].addr
139*91f16700Schasinglulu 				+ info_dram_regions->region[i].size),
140*91f16700Schasinglulu 			(void *) (info_dram_regions->region[i].addr
141*91f16700Schasinglulu 				+ info_dram_regions->region[i].size
142*91f16700Schasinglulu 				+ NXP_SECURE_DRAM_SIZE
143*91f16700Schasinglulu 				+ NXP_SP_SHRD_DRAM_SIZE
144*91f16700Schasinglulu 				- 1));
145*91f16700Schasinglulu 		ret = mmap_add_dynamic_region((info_dram_regions->region[i].addr
146*91f16700Schasinglulu 				+ info_dram_regions->region[i].size),
147*91f16700Schasinglulu 				(info_dram_regions->region[i].addr
148*91f16700Schasinglulu 				+ info_dram_regions->region[i].size),
149*91f16700Schasinglulu 				(NXP_SECURE_DRAM_SIZE + NXP_SP_SHRD_DRAM_SIZE),
150*91f16700Schasinglulu 				MT_MEMORY | MT_RW | MT_SECURE);
151*91f16700Schasinglulu 		if (ret != 0) {
152*91f16700Schasinglulu 			ERROR("Failed to add dynamic memory region\n");
153*91f16700Schasinglulu 			panic();
154*91f16700Schasinglulu 		}
155*91f16700Schasinglulu 	}
156*91f16700Schasinglulu 
157*91f16700Schasinglulu #ifdef IMAGE_BL31
158*91f16700Schasinglulu 	for (i = 1; i < info_dram_regions->num_dram_regions; i++) {
159*91f16700Schasinglulu 		if (info_dram_regions->region[i].size == 0) {
160*91f16700Schasinglulu 			break;
161*91f16700Schasinglulu 		}
162*91f16700Schasinglulu 		VERBOSE("DRAM Region %d: %p - %p\n", i,
163*91f16700Schasinglulu 			(void *) info_dram_regions->region[i].addr,
164*91f16700Schasinglulu 			(void *) (info_dram_regions->region[i].addr
165*91f16700Schasinglulu 				+ info_dram_regions->region[i].size
166*91f16700Schasinglulu 				- 1));
167*91f16700Schasinglulu 		ret = mmap_add_dynamic_region(info_dram_regions->region[i].addr,
168*91f16700Schasinglulu 				info_dram_regions->region[i].addr,
169*91f16700Schasinglulu 				info_dram_regions->region[i].size,
170*91f16700Schasinglulu 				MT_MEMORY | MT_RW | MT_NS);
171*91f16700Schasinglulu 		if (ret != 0) {
172*91f16700Schasinglulu 			ERROR("Failed to add dynamic memory region\n");
173*91f16700Schasinglulu 			panic();
174*91f16700Schasinglulu 		}
175*91f16700Schasinglulu 	}
176*91f16700Schasinglulu #endif
177*91f16700Schasinglulu }
178*91f16700Schasinglulu #endif
179*91f16700Schasinglulu 
180*91f16700Schasinglulu /*
181*91f16700Schasinglulu  * Set up the page tables for the generic and platform-specific memory regions.
182*91f16700Schasinglulu  * The extents of the generic memory regions are specified by the function
183*91f16700Schasinglulu  * arguments and consist of:
184*91f16700Schasinglulu  * - Trusted SRAM seen by the BL image;
185*91f16700Schasinglulu  * - Code section;
186*91f16700Schasinglulu  * - Read-only data section;
187*91f16700Schasinglulu  * - Coherent memory region, if applicable.
188*91f16700Schasinglulu  */
189*91f16700Schasinglulu void ls_setup_page_tables(uintptr_t total_base,
190*91f16700Schasinglulu 			   size_t total_size,
191*91f16700Schasinglulu 			   uintptr_t code_start,
192*91f16700Schasinglulu 			   uintptr_t code_limit,
193*91f16700Schasinglulu 			   uintptr_t rodata_start,
194*91f16700Schasinglulu 			   uintptr_t rodata_limit
195*91f16700Schasinglulu #if USE_COHERENT_MEM
196*91f16700Schasinglulu 			   ,
197*91f16700Schasinglulu 			   uintptr_t coh_start,
198*91f16700Schasinglulu 			   uintptr_t coh_limit
199*91f16700Schasinglulu #endif
200*91f16700Schasinglulu 			   )
201*91f16700Schasinglulu {
202*91f16700Schasinglulu 	/*
203*91f16700Schasinglulu 	 * Map the Trusted SRAM with appropriate memory attributes.
204*91f16700Schasinglulu 	 * Subsequent mappings will adjust the attributes for specific regions.
205*91f16700Schasinglulu 	 */
206*91f16700Schasinglulu 	VERBOSE("Memory seen by this BL image: %p - %p\n",
207*91f16700Schasinglulu 		(void *) total_base, (void *) (total_base + total_size));
208*91f16700Schasinglulu 	mmap_add_region(total_base, total_base,
209*91f16700Schasinglulu 			total_size,
210*91f16700Schasinglulu 			MT_MEMORY | MT_RW | MT_SECURE);
211*91f16700Schasinglulu 
212*91f16700Schasinglulu 	/* Re-map the code section */
213*91f16700Schasinglulu 	VERBOSE("Code region: %p - %p\n",
214*91f16700Schasinglulu 		(void *) code_start, (void *) code_limit);
215*91f16700Schasinglulu 	mmap_add_region(code_start, code_start,
216*91f16700Schasinglulu 			code_limit - code_start,
217*91f16700Schasinglulu 			MT_CODE | MT_SECURE);
218*91f16700Schasinglulu 
219*91f16700Schasinglulu 	/* Re-map the read-only data section */
220*91f16700Schasinglulu 	VERBOSE("Read-only data region: %p - %p\n",
221*91f16700Schasinglulu 		(void *) rodata_start, (void *) rodata_limit);
222*91f16700Schasinglulu 	mmap_add_region(rodata_start, rodata_start,
223*91f16700Schasinglulu 			rodata_limit - rodata_start,
224*91f16700Schasinglulu 			MT_RO_DATA | MT_SECURE);
225*91f16700Schasinglulu 
226*91f16700Schasinglulu #if USE_COHERENT_MEM
227*91f16700Schasinglulu 	/* Re-map the coherent memory region */
228*91f16700Schasinglulu 	VERBOSE("Coherent region: %p - %p\n",
229*91f16700Schasinglulu 		(void *) coh_start, (void *) coh_limit);
230*91f16700Schasinglulu 	mmap_add_region(coh_start, coh_start,
231*91f16700Schasinglulu 			coh_limit - coh_start,
232*91f16700Schasinglulu 			MT_DEVICE | MT_RW | MT_SECURE);
233*91f16700Schasinglulu #endif
234*91f16700Schasinglulu 
235*91f16700Schasinglulu 	/* Now (re-)map the platform-specific memory regions */
236*91f16700Schasinglulu 	mmap_add(plat_ls_get_mmap());
237*91f16700Schasinglulu 
238*91f16700Schasinglulu 
239*91f16700Schasinglulu #if defined(IMAGE_BL31) || !defined(CONFIG_DDR_FIP_IMAGE)
240*91f16700Schasinglulu 	mmap_add_ddr_regions_statically();
241*91f16700Schasinglulu #endif
242*91f16700Schasinglulu 
243*91f16700Schasinglulu 	/* Create the page tables to reflect the above mappings */
244*91f16700Schasinglulu 	init_xlat_tables();
245*91f16700Schasinglulu }
246*91f16700Schasinglulu 
247*91f16700Schasinglulu /*******************************************************************************
248*91f16700Schasinglulu  * Returns NXP platform specific memory map regions.
249*91f16700Schasinglulu  ******************************************************************************/
250*91f16700Schasinglulu const mmap_region_t *plat_ls_get_mmap(void)
251*91f16700Schasinglulu {
252*91f16700Schasinglulu 	return plat_ls_mmap;
253*91f16700Schasinglulu }
254*91f16700Schasinglulu 
255*91f16700Schasinglulu /*
256*91f16700Schasinglulu  * This function get the number of clusters and cores count per cluster
257*91f16700Schasinglulu  * in the SoC.
258*91f16700Schasinglulu  */
259*91f16700Schasinglulu void get_cluster_info(const struct soc_type *soc_list, uint8_t ps_count,
260*91f16700Schasinglulu 		uint8_t *num_clusters, uint8_t *cores_per_cluster)
261*91f16700Schasinglulu {
262*91f16700Schasinglulu 	const soc_info_t *soc_info = get_soc_info();
263*91f16700Schasinglulu 	*num_clusters = NUMBER_OF_CLUSTERS;
264*91f16700Schasinglulu 	*cores_per_cluster = CORES_PER_CLUSTER;
265*91f16700Schasinglulu 	unsigned int i;
266*91f16700Schasinglulu 
267*91f16700Schasinglulu 	for (i = 0U; i < ps_count; i++) {
268*91f16700Schasinglulu 		if (soc_list[i].version == soc_info->svr_reg.bf_ver.version) {
269*91f16700Schasinglulu 			*num_clusters = soc_list[i].num_clusters;
270*91f16700Schasinglulu 			*cores_per_cluster = soc_list[i].cores_per_cluster;
271*91f16700Schasinglulu 			break;
272*91f16700Schasinglulu 		}
273*91f16700Schasinglulu 	}
274*91f16700Schasinglulu 
275*91f16700Schasinglulu 	VERBOSE("NUM of cluster = 0x%x, Cores per cluster = 0x%x\n",
276*91f16700Schasinglulu 			*num_clusters, *cores_per_cluster);
277*91f16700Schasinglulu }
278