xref: /arm-trusted-firmware/plat/marvell/armada/common/aarch64/marvell_common.c (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu /*
2*91f16700Schasinglulu  * Copyright (C) 2018 Marvell International Ltd.
3*91f16700Schasinglulu  *
4*91f16700Schasinglulu  * SPDX-License-Identifier:     BSD-3-Clause
5*91f16700Schasinglulu  * https://spdx.org/licenses
6*91f16700Schasinglulu  */
7*91f16700Schasinglulu 
8*91f16700Schasinglulu #include <assert.h>
9*91f16700Schasinglulu 
10*91f16700Schasinglulu #include <platform_def.h>
11*91f16700Schasinglulu 
12*91f16700Schasinglulu #include <arch.h>
13*91f16700Schasinglulu #include <arch_helpers.h>
14*91f16700Schasinglulu #include <common/debug.h>
15*91f16700Schasinglulu #include <lib/mmio.h>
16*91f16700Schasinglulu #include <lib/xlat_tables/xlat_tables_v2.h>
17*91f16700Schasinglulu 
18*91f16700Schasinglulu #include <plat_marvell.h>
19*91f16700Schasinglulu 
20*91f16700Schasinglulu /* Weak definitions may be overridden in specific ARM standard platform */
21*91f16700Schasinglulu #pragma weak plat_get_ns_image_entrypoint
22*91f16700Schasinglulu #pragma weak plat_marvell_get_mmap
23*91f16700Schasinglulu 
24*91f16700Schasinglulu /*
25*91f16700Schasinglulu  * Set up the page tables for the generic and platform-specific memory regions.
26*91f16700Schasinglulu  * The extents of the generic memory regions are specified by the function
27*91f16700Schasinglulu  * arguments and consist of:
28*91f16700Schasinglulu  * - Trusted SRAM seen by the BL image;
29*91f16700Schasinglulu  * - Code section;
30*91f16700Schasinglulu  * - Read-only data section;
31*91f16700Schasinglulu  * - Coherent memory region, if applicable.
32*91f16700Schasinglulu  */
33*91f16700Schasinglulu void marvell_setup_page_tables(uintptr_t total_base,
34*91f16700Schasinglulu 			       size_t total_size,
35*91f16700Schasinglulu 			       uintptr_t code_start,
36*91f16700Schasinglulu 			       uintptr_t code_limit,
37*91f16700Schasinglulu 			       uintptr_t rodata_start,
38*91f16700Schasinglulu 			       uintptr_t rodata_limit
39*91f16700Schasinglulu #if USE_COHERENT_MEM
40*91f16700Schasinglulu 			       ,
41*91f16700Schasinglulu 			       uintptr_t coh_start,
42*91f16700Schasinglulu 			       uintptr_t coh_limit
43*91f16700Schasinglulu #endif
44*91f16700Schasinglulu 			   )
45*91f16700Schasinglulu {
46*91f16700Schasinglulu 	/*
47*91f16700Schasinglulu 	 * Map the Trusted SRAM with appropriate memory attributes.
48*91f16700Schasinglulu 	 * Subsequent mappings will adjust the attributes for specific regions.
49*91f16700Schasinglulu 	 */
50*91f16700Schasinglulu 	VERBOSE("Trusted SRAM seen by this BL image: %p - %p\n",
51*91f16700Schasinglulu 		(void *) total_base, (void *) (total_base + total_size));
52*91f16700Schasinglulu 	mmap_add_region(total_base, total_base,
53*91f16700Schasinglulu 			total_size,
54*91f16700Schasinglulu 			MT_MEMORY | MT_RW | MT_SECURE);
55*91f16700Schasinglulu 
56*91f16700Schasinglulu 	/* Re-map the code section */
57*91f16700Schasinglulu 	VERBOSE("Code region: %p - %p\n",
58*91f16700Schasinglulu 		(void *) code_start, (void *) code_limit);
59*91f16700Schasinglulu 	mmap_add_region(code_start, code_start,
60*91f16700Schasinglulu 			code_limit - code_start,
61*91f16700Schasinglulu 			MT_CODE | MT_SECURE);
62*91f16700Schasinglulu 
63*91f16700Schasinglulu 	/* Re-map the read-only data section */
64*91f16700Schasinglulu 	VERBOSE("Read-only data region: %p - %p\n",
65*91f16700Schasinglulu 		(void *) rodata_start, (void *) rodata_limit);
66*91f16700Schasinglulu 	mmap_add_region(rodata_start, rodata_start,
67*91f16700Schasinglulu 			rodata_limit - rodata_start,
68*91f16700Schasinglulu 			MT_RO_DATA | MT_SECURE);
69*91f16700Schasinglulu 
70*91f16700Schasinglulu #if USE_COHERENT_MEM
71*91f16700Schasinglulu 	/* Re-map the coherent memory region */
72*91f16700Schasinglulu 	VERBOSE("Coherent region: %p - %p\n",
73*91f16700Schasinglulu 		(void *) coh_start, (void *) coh_limit);
74*91f16700Schasinglulu 	mmap_add_region(coh_start, coh_start,
75*91f16700Schasinglulu 			coh_limit - coh_start,
76*91f16700Schasinglulu 			MT_DEVICE | MT_RW | MT_SECURE);
77*91f16700Schasinglulu #endif
78*91f16700Schasinglulu 
79*91f16700Schasinglulu 	/* Now (re-)map the platform-specific memory regions */
80*91f16700Schasinglulu 	mmap_add(plat_marvell_get_mmap());
81*91f16700Schasinglulu 
82*91f16700Schasinglulu 	/* Create the page tables to reflect the above mappings */
83*91f16700Schasinglulu 	init_xlat_tables();
84*91f16700Schasinglulu }
85*91f16700Schasinglulu 
86*91f16700Schasinglulu unsigned long plat_get_ns_image_entrypoint(void)
87*91f16700Schasinglulu {
88*91f16700Schasinglulu 	return PLAT_MARVELL_NS_IMAGE_OFFSET;
89*91f16700Schasinglulu }
90*91f16700Schasinglulu 
91*91f16700Schasinglulu /*****************************************************************************
92*91f16700Schasinglulu  * Gets SPSR for BL32 entry
93*91f16700Schasinglulu  *****************************************************************************
94*91f16700Schasinglulu  */
95*91f16700Schasinglulu uint32_t marvell_get_spsr_for_bl32_entry(void)
96*91f16700Schasinglulu {
97*91f16700Schasinglulu 	/*
98*91f16700Schasinglulu 	 * The Secure Payload Dispatcher service is responsible for
99*91f16700Schasinglulu 	 * setting the SPSR prior to entry into the BL32 image.
100*91f16700Schasinglulu 	 */
101*91f16700Schasinglulu 	return 0;
102*91f16700Schasinglulu }
103*91f16700Schasinglulu 
104*91f16700Schasinglulu /*****************************************************************************
105*91f16700Schasinglulu  * Gets SPSR for BL33 entry
106*91f16700Schasinglulu  *****************************************************************************
107*91f16700Schasinglulu  */
108*91f16700Schasinglulu uint32_t marvell_get_spsr_for_bl33_entry(void)
109*91f16700Schasinglulu {
110*91f16700Schasinglulu 	unsigned long el_status;
111*91f16700Schasinglulu 	unsigned int mode;
112*91f16700Schasinglulu 	uint32_t spsr;
113*91f16700Schasinglulu 
114*91f16700Schasinglulu 	/* Figure out what mode we enter the non-secure world in */
115*91f16700Schasinglulu 	el_status = read_id_aa64pfr0_el1() >> ID_AA64PFR0_EL2_SHIFT;
116*91f16700Schasinglulu 	el_status &= ID_AA64PFR0_ELX_MASK;
117*91f16700Schasinglulu 
118*91f16700Schasinglulu 	mode = (el_status) ? MODE_EL2 : MODE_EL1;
119*91f16700Schasinglulu 
120*91f16700Schasinglulu 	/*
121*91f16700Schasinglulu 	 * TODO: Consider the possibility of specifying the SPSR in
122*91f16700Schasinglulu 	 * the FIP ToC and allowing the platform to have a say as
123*91f16700Schasinglulu 	 * well.
124*91f16700Schasinglulu 	 */
125*91f16700Schasinglulu 	spsr = SPSR_64(mode, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
126*91f16700Schasinglulu 	return spsr;
127*91f16700Schasinglulu }
128*91f16700Schasinglulu 
129*91f16700Schasinglulu /*****************************************************************************
130*91f16700Schasinglulu  * Returns ARM platform specific memory map regions.
131*91f16700Schasinglulu  *****************************************************************************
132*91f16700Schasinglulu  */
133*91f16700Schasinglulu const mmap_region_t *plat_marvell_get_mmap(void)
134*91f16700Schasinglulu {
135*91f16700Schasinglulu 	return plat_marvell_mmap;
136*91f16700Schasinglulu }
137*91f16700Schasinglulu 
138