xref: /arm-trusted-firmware/drivers/arm/smmu/smmu_v3.c (revision 91f16700b400a8c0651d24a598fc48ee2997a0d7)
1*91f16700Schasinglulu /*
2*91f16700Schasinglulu  * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
3*91f16700Schasinglulu  *
4*91f16700Schasinglulu  * SPDX-License-Identifier: BSD-3-Clause
5*91f16700Schasinglulu  */
6*91f16700Schasinglulu 
7*91f16700Schasinglulu #include <common/debug.h>
8*91f16700Schasinglulu #include <cdefs.h>
9*91f16700Schasinglulu #include <drivers/arm/smmu_v3.h>
10*91f16700Schasinglulu #include <drivers/delay_timer.h>
11*91f16700Schasinglulu #include <lib/mmio.h>
12*91f16700Schasinglulu #include <arch_features.h>
13*91f16700Schasinglulu 
14*91f16700Schasinglulu /* SMMU poll number of retries */
15*91f16700Schasinglulu #define SMMU_POLL_TIMEOUT_US	U(1000)
16*91f16700Schasinglulu 
17*91f16700Schasinglulu static int smmuv3_poll(uintptr_t smmu_reg, uint32_t mask,
18*91f16700Schasinglulu 				uint32_t value)
19*91f16700Schasinglulu {
20*91f16700Schasinglulu 	uint32_t reg_val;
21*91f16700Schasinglulu 	uint64_t timeout;
22*91f16700Schasinglulu 
23*91f16700Schasinglulu 	/* Set 1ms timeout value */
24*91f16700Schasinglulu 	timeout = timeout_init_us(SMMU_POLL_TIMEOUT_US);
25*91f16700Schasinglulu 	do {
26*91f16700Schasinglulu 		reg_val = mmio_read_32(smmu_reg);
27*91f16700Schasinglulu 		if ((reg_val & mask) == value)
28*91f16700Schasinglulu 			return 0;
29*91f16700Schasinglulu 	} while (!timeout_elapsed(timeout));
30*91f16700Schasinglulu 
31*91f16700Schasinglulu 	ERROR("Timeout polling SMMUv3 register @%p\n", (void *)smmu_reg);
32*91f16700Schasinglulu 	ERROR("Read value 0x%x, expected 0x%x\n", reg_val,
33*91f16700Schasinglulu 		value == 0U ? reg_val & ~mask : reg_val | mask);
34*91f16700Schasinglulu 	return -1;
35*91f16700Schasinglulu }
36*91f16700Schasinglulu 
37*91f16700Schasinglulu /*
38*91f16700Schasinglulu  * Abort all incoming transactions in order to implement a default
39*91f16700Schasinglulu  * deny policy on reset.
40*91f16700Schasinglulu  */
41*91f16700Schasinglulu int __init smmuv3_security_init(uintptr_t smmu_base)
42*91f16700Schasinglulu {
43*91f16700Schasinglulu 	/* Attribute update has completed when SMMU_(S)_GBPA.Update bit is 0 */
44*91f16700Schasinglulu 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
45*91f16700Schasinglulu 		return -1;
46*91f16700Schasinglulu 
47*91f16700Schasinglulu 	/*
48*91f16700Schasinglulu 	 * SMMU_(S)_CR0 resets to zero with all streams bypassing the SMMU,
49*91f16700Schasinglulu 	 * so just abort all incoming transactions.
50*91f16700Schasinglulu 	 */
51*91f16700Schasinglulu 	mmio_setbits_32(smmu_base + SMMU_GBPA,
52*91f16700Schasinglulu 			SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT);
53*91f16700Schasinglulu 
54*91f16700Schasinglulu 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
55*91f16700Schasinglulu 		return -1;
56*91f16700Schasinglulu 
57*91f16700Schasinglulu 	/* Check if the SMMU supports secure state */
58*91f16700Schasinglulu 	if ((mmio_read_32(smmu_base + SMMU_S_IDR1) &
59*91f16700Schasinglulu 				SMMU_S_IDR1_SECURE_IMPL) == 0U)
60*91f16700Schasinglulu 		return 0;
61*91f16700Schasinglulu 
62*91f16700Schasinglulu 	/* Abort all incoming secure transactions */
63*91f16700Schasinglulu 	if (smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U) != 0U)
64*91f16700Schasinglulu 		return -1;
65*91f16700Schasinglulu 
66*91f16700Schasinglulu 	mmio_setbits_32(smmu_base + SMMU_S_GBPA,
67*91f16700Schasinglulu 			SMMU_S_GBPA_UPDATE | SMMU_S_GBPA_ABORT);
68*91f16700Schasinglulu 
69*91f16700Schasinglulu 	return smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U);
70*91f16700Schasinglulu }
71*91f16700Schasinglulu 
72*91f16700Schasinglulu /*
73*91f16700Schasinglulu  * Initialize the SMMU by invalidating all secure caches and TLBs.
74*91f16700Schasinglulu  * Abort all incoming transactions in order to implement a default
75*91f16700Schasinglulu  * deny policy on reset
76*91f16700Schasinglulu  */
77*91f16700Schasinglulu int __init smmuv3_init(uintptr_t smmu_base)
78*91f16700Schasinglulu {
79*91f16700Schasinglulu 	/* Abort all incoming transactions */
80*91f16700Schasinglulu 	if (smmuv3_security_init(smmu_base) != 0)
81*91f16700Schasinglulu 		return -1;
82*91f16700Schasinglulu 
83*91f16700Schasinglulu #if ENABLE_RME
84*91f16700Schasinglulu 
85*91f16700Schasinglulu 	if (get_armv9_2_feat_rme_support() != 0U) {
86*91f16700Schasinglulu 		if ((mmio_read_32(smmu_base + SMMU_ROOT_IDR0) &
87*91f16700Schasinglulu 				  SMMU_ROOT_IDR0_ROOT_IMPL) == 0U) {
88*91f16700Schasinglulu 			WARN("Skip SMMU GPC configuration.\n");
89*91f16700Schasinglulu 		} else {
90*91f16700Schasinglulu 			uint64_t gpccr_el3 = read_gpccr_el3();
91*91f16700Schasinglulu 			uint64_t gptbr_el3 = read_gptbr_el3();
92*91f16700Schasinglulu 
93*91f16700Schasinglulu 			/* SMMU_ROOT_GPT_BASE_CFG[16] is RES0. */
94*91f16700Schasinglulu 			gpccr_el3 &= ~(1UL << 16);
95*91f16700Schasinglulu 
96*91f16700Schasinglulu 			/*
97*91f16700Schasinglulu 			 * TODO: SMMU_ROOT_GPT_BASE_CFG is 64b in the spec,
98*91f16700Schasinglulu 			 * but SMMU model only accepts 32b access.
99*91f16700Schasinglulu 			 */
100*91f16700Schasinglulu 			mmio_write_32(smmu_base + SMMU_ROOT_GPT_BASE_CFG,
101*91f16700Schasinglulu 				      gpccr_el3);
102*91f16700Schasinglulu 
103*91f16700Schasinglulu 			/*
104*91f16700Schasinglulu 			 * pa_gpt_table_base[51:12] maps to GPTBR_EL3[39:0]
105*91f16700Schasinglulu 			 * whereas it maps to SMMU_ROOT_GPT_BASE[51:12]
106*91f16700Schasinglulu 			 * hence needs a 12 bit left shit.
107*91f16700Schasinglulu 			 */
108*91f16700Schasinglulu 			mmio_write_64(smmu_base + SMMU_ROOT_GPT_BASE,
109*91f16700Schasinglulu 				      gptbr_el3 << 12);
110*91f16700Schasinglulu 
111*91f16700Schasinglulu 			/*
112*91f16700Schasinglulu 			 * ACCESSEN=1: SMMU- and client-originated accesses are
113*91f16700Schasinglulu 			 *             not terminated by this mechanism.
114*91f16700Schasinglulu 			 * GPCEN=1: All clients and SMMU-originated accesses,
115*91f16700Schasinglulu 			 *          except GPT-walks, are subject to GPC.
116*91f16700Schasinglulu 			 */
117*91f16700Schasinglulu 			mmio_setbits_32(smmu_base + SMMU_ROOT_CR0,
118*91f16700Schasinglulu 					SMMU_ROOT_CR0_GPCEN |
119*91f16700Schasinglulu 					SMMU_ROOT_CR0_ACCESSEN);
120*91f16700Schasinglulu 
121*91f16700Schasinglulu 			/* Poll for ACCESSEN and GPCEN ack bits. */
122*91f16700Schasinglulu 			if (smmuv3_poll(smmu_base + SMMU_ROOT_CR0ACK,
123*91f16700Schasinglulu 					SMMU_ROOT_CR0_GPCEN |
124*91f16700Schasinglulu 					SMMU_ROOT_CR0_ACCESSEN,
125*91f16700Schasinglulu 					SMMU_ROOT_CR0_GPCEN |
126*91f16700Schasinglulu 					SMMU_ROOT_CR0_ACCESSEN) != 0) {
127*91f16700Schasinglulu 				WARN("Failed enabling SMMU GPC.\n");
128*91f16700Schasinglulu 
129*91f16700Schasinglulu 				/*
130*91f16700Schasinglulu 				 * Do not return in error, but fall back to
131*91f16700Schasinglulu 				 * invalidating all entries through the secure
132*91f16700Schasinglulu 				 * register file.
133*91f16700Schasinglulu 				 */
134*91f16700Schasinglulu 			}
135*91f16700Schasinglulu 		}
136*91f16700Schasinglulu 	}
137*91f16700Schasinglulu 
138*91f16700Schasinglulu #endif /* ENABLE_RME */
139*91f16700Schasinglulu 
140*91f16700Schasinglulu 	/*
141*91f16700Schasinglulu 	 * Initiate invalidation of secure caches and TLBs if the SMMU
142*91f16700Schasinglulu 	 * supports secure state. If not, it's implementation defined
143*91f16700Schasinglulu 	 * as to how SMMU_S_INIT register is accessed.
144*91f16700Schasinglulu 	 * Arm SMMU Arch RME supplement, section 3.4: all SMMU registers
145*91f16700Schasinglulu 	 * specified to be accessible only in secure physical address space are
146*91f16700Schasinglulu 	 * additionally accessible in root physical address space in an SMMU
147*91f16700Schasinglulu 	 * with RME.
148*91f16700Schasinglulu 	 * Section 3.3: as GPT information is permitted to be cached in a TLB,
149*91f16700Schasinglulu 	 * the SMMU_S_INIT.INV_ALL mechanism also invalidates GPT information
150*91f16700Schasinglulu 	 * cached in TLBs.
151*91f16700Schasinglulu 	 */
152*91f16700Schasinglulu 	mmio_write_32(smmu_base + SMMU_S_INIT, SMMU_S_INIT_INV_ALL);
153*91f16700Schasinglulu 
154*91f16700Schasinglulu 	/* Wait for global invalidation operation to finish */
155*91f16700Schasinglulu 	return smmuv3_poll(smmu_base + SMMU_S_INIT,
156*91f16700Schasinglulu 				SMMU_S_INIT_INV_ALL, 0U);
157*91f16700Schasinglulu }
158*91f16700Schasinglulu 
159*91f16700Schasinglulu int smmuv3_ns_set_abort_all(uintptr_t smmu_base)
160*91f16700Schasinglulu {
161*91f16700Schasinglulu 	/* Attribute update has completed when SMMU_GBPA.Update bit is 0 */
162*91f16700Schasinglulu 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) {
163*91f16700Schasinglulu 		return -1;
164*91f16700Schasinglulu 	}
165*91f16700Schasinglulu 
166*91f16700Schasinglulu 	/*
167*91f16700Schasinglulu 	 * Set GBPA's ABORT bit. Other GBPA fields are presumably ignored then,
168*91f16700Schasinglulu 	 * so simply preserve their value.
169*91f16700Schasinglulu 	 */
170*91f16700Schasinglulu 	mmio_setbits_32(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT);
171*91f16700Schasinglulu 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) {
172*91f16700Schasinglulu 		return -1;
173*91f16700Schasinglulu 	}
174*91f16700Schasinglulu 
175*91f16700Schasinglulu 	/* Disable the SMMU to engage the GBPA fields previously configured. */
176*91f16700Schasinglulu 	mmio_clrbits_32(smmu_base + SMMU_CR0, SMMU_CR0_SMMUEN);
177*91f16700Schasinglulu 	if (smmuv3_poll(smmu_base + SMMU_CR0ACK, SMMU_CR0_SMMUEN, 0U) != 0U) {
178*91f16700Schasinglulu 		return -1;
179*91f16700Schasinglulu 	}
180*91f16700Schasinglulu 
181*91f16700Schasinglulu 	return 0;
182*91f16700Schasinglulu }
183