xref: /rk3399_ARM-atf/drivers/arm/smmu/smmu_v3.c (revision 6c5c5320511ab8202fb9eccce9e66b4e4e0d9a33)
11154586bSJeenu Viswambharan /*
252a314afSOlivier Deprez  * Copyright (c) 2017-2022, ARM Limited and Contributors. All rights reserved.
31154586bSJeenu Viswambharan  *
41154586bSJeenu Viswambharan  * SPDX-License-Identifier: BSD-3-Clause
51154586bSJeenu Viswambharan  */
61154586bSJeenu Viswambharan 
7ccd4d475SAlexei Fedorov #include <common/debug.h>
8c9263e62SDaniel Boulby #include <cdefs.h>
909d40e0eSAntonio Nino Diaz #include <drivers/arm/smmu_v3.h>
10620dd58bSDeepika Bhavnani #include <drivers/delay_timer.h>
1109d40e0eSAntonio Nino Diaz #include <lib/mmio.h>
1252a314afSOlivier Deprez #include <arch_features.h>
1309d40e0eSAntonio Nino Diaz 
14ccd4d475SAlexei Fedorov /* SMMU poll number of retries */
15620dd58bSDeepika Bhavnani #define SMMU_POLL_TIMEOUT_US	U(1000)
161154586bSJeenu Viswambharan 
17*6c5c5320SLucian Paul-Trifu static int smmuv3_poll(uintptr_t smmu_reg, uint32_t mask,
18ccd4d475SAlexei Fedorov 				uint32_t value)
191154586bSJeenu Viswambharan {
20620dd58bSDeepika Bhavnani 	uint32_t reg_val;
21620dd58bSDeepika Bhavnani 	uint64_t timeout;
221154586bSJeenu Viswambharan 
23620dd58bSDeepika Bhavnani 	/* Set 1ms timeout value */
24620dd58bSDeepika Bhavnani 	timeout = timeout_init_us(SMMU_POLL_TIMEOUT_US);
25ccd4d475SAlexei Fedorov 	do {
26ccd4d475SAlexei Fedorov 		reg_val = mmio_read_32(smmu_reg);
27ccd4d475SAlexei Fedorov 		if ((reg_val & mask) == value)
28ccd4d475SAlexei Fedorov 			return 0;
29620dd58bSDeepika Bhavnani 	} while (!timeout_elapsed(timeout));
301154586bSJeenu Viswambharan 
31620dd58bSDeepika Bhavnani 	ERROR("Timeout polling SMMUv3 register @%p\n", (void *)smmu_reg);
32ccd4d475SAlexei Fedorov 	ERROR("Read value 0x%x, expected 0x%x\n", reg_val,
33ccd4d475SAlexei Fedorov 		value == 0U ? reg_val & ~mask : reg_val | mask);
34ccd4d475SAlexei Fedorov 	return -1;
356d5f0631SAntonio Nino Diaz }
366d5f0631SAntonio Nino Diaz 
371154586bSJeenu Viswambharan /*
381461ad9fSAlexei Fedorov  * Abort all incoming transactions in order to implement a default
391461ad9fSAlexei Fedorov  * deny policy on reset.
401461ad9fSAlexei Fedorov  */
411461ad9fSAlexei Fedorov int __init smmuv3_security_init(uintptr_t smmu_base)
421461ad9fSAlexei Fedorov {
431461ad9fSAlexei Fedorov 	/* Attribute update has completed when SMMU_(S)_GBPA.Update bit is 0 */
441461ad9fSAlexei Fedorov 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
451461ad9fSAlexei Fedorov 		return -1;
461461ad9fSAlexei Fedorov 
471461ad9fSAlexei Fedorov 	/*
481461ad9fSAlexei Fedorov 	 * SMMU_(S)_CR0 resets to zero with all streams bypassing the SMMU,
491461ad9fSAlexei Fedorov 	 * so just abort all incoming transactions.
501461ad9fSAlexei Fedorov 	 */
511461ad9fSAlexei Fedorov 	mmio_setbits_32(smmu_base + SMMU_GBPA,
521461ad9fSAlexei Fedorov 			SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT);
531461ad9fSAlexei Fedorov 
541461ad9fSAlexei Fedorov 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U)
551461ad9fSAlexei Fedorov 		return -1;
561461ad9fSAlexei Fedorov 
571461ad9fSAlexei Fedorov 	/* Check if the SMMU supports secure state */
581461ad9fSAlexei Fedorov 	if ((mmio_read_32(smmu_base + SMMU_S_IDR1) &
591461ad9fSAlexei Fedorov 				SMMU_S_IDR1_SECURE_IMPL) == 0U)
601461ad9fSAlexei Fedorov 		return 0;
611461ad9fSAlexei Fedorov 
621461ad9fSAlexei Fedorov 	/* Abort all incoming secure transactions */
631461ad9fSAlexei Fedorov 	if (smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U) != 0U)
641461ad9fSAlexei Fedorov 		return -1;
651461ad9fSAlexei Fedorov 
661461ad9fSAlexei Fedorov 	mmio_setbits_32(smmu_base + SMMU_S_GBPA,
671461ad9fSAlexei Fedorov 			SMMU_S_GBPA_UPDATE | SMMU_S_GBPA_ABORT);
681461ad9fSAlexei Fedorov 
691461ad9fSAlexei Fedorov 	return smmuv3_poll(smmu_base + SMMU_S_GBPA, SMMU_S_GBPA_UPDATE, 0U);
701461ad9fSAlexei Fedorov }
711461ad9fSAlexei Fedorov 
721461ad9fSAlexei Fedorov /*
731154586bSJeenu Viswambharan  * Initialize the SMMU by invalidating all secure caches and TLBs.
74ccd4d475SAlexei Fedorov  * Abort all incoming transactions in order to implement a default
75ccd4d475SAlexei Fedorov  * deny policy on reset
761154586bSJeenu Viswambharan  */
77c9263e62SDaniel Boulby int __init smmuv3_init(uintptr_t smmu_base)
781154586bSJeenu Viswambharan {
791461ad9fSAlexei Fedorov 	/* Abort all incoming transactions */
801461ad9fSAlexei Fedorov 	if (smmuv3_security_init(smmu_base) != 0)
811461ad9fSAlexei Fedorov 		return -1;
82ccd4d475SAlexei Fedorov 
8352a314afSOlivier Deprez #if ENABLE_RME
8452a314afSOlivier Deprez 
8552a314afSOlivier Deprez 	if (get_armv9_2_feat_rme_support() != 0U) {
8652a314afSOlivier Deprez 		if ((mmio_read_32(smmu_base + SMMU_ROOT_IDR0) &
8752a314afSOlivier Deprez 				  SMMU_ROOT_IDR0_ROOT_IMPL) == 0U) {
8852a314afSOlivier Deprez 			WARN("Skip SMMU GPC configuration.\n");
8952a314afSOlivier Deprez 		} else {
9052a314afSOlivier Deprez 			uint64_t gpccr_el3 = read_gpccr_el3();
9152a314afSOlivier Deprez 			uint64_t gptbr_el3 = read_gptbr_el3();
9252a314afSOlivier Deprez 
9352a314afSOlivier Deprez 			/* SMMU_ROOT_GPT_BASE_CFG[16] is RES0. */
9452a314afSOlivier Deprez 			gpccr_el3 &= ~(1UL << 16);
9552a314afSOlivier Deprez 
9652a314afSOlivier Deprez 			/*
9752a314afSOlivier Deprez 			 * TODO: SMMU_ROOT_GPT_BASE_CFG is 64b in the spec,
9852a314afSOlivier Deprez 			 * but SMMU model only accepts 32b access.
9952a314afSOlivier Deprez 			 */
10052a314afSOlivier Deprez 			mmio_write_32(smmu_base + SMMU_ROOT_GPT_BASE_CFG,
10152a314afSOlivier Deprez 				      gpccr_el3);
10252a314afSOlivier Deprez 
10352a314afSOlivier Deprez 			/*
10452a314afSOlivier Deprez 			 * pa_gpt_table_base[51:12] maps to GPTBR_EL3[39:0]
10552a314afSOlivier Deprez 			 * whereas it maps to SMMU_ROOT_GPT_BASE[51:12]
10652a314afSOlivier Deprez 			 * hence needs a 12 bit left shit.
10752a314afSOlivier Deprez 			 */
10852a314afSOlivier Deprez 			mmio_write_64(smmu_base + SMMU_ROOT_GPT_BASE,
10952a314afSOlivier Deprez 				      gptbr_el3 << 12);
11052a314afSOlivier Deprez 
11152a314afSOlivier Deprez 			/*
11252a314afSOlivier Deprez 			 * ACCESSEN=1: SMMU- and client-originated accesses are
11352a314afSOlivier Deprez 			 *             not terminated by this mechanism.
11452a314afSOlivier Deprez 			 * GPCEN=1: All clients and SMMU-originated accesses,
11552a314afSOlivier Deprez 			 *          except GPT-walks, are subject to GPC.
11652a314afSOlivier Deprez 			 */
11752a314afSOlivier Deprez 			mmio_setbits_32(smmu_base + SMMU_ROOT_CR0,
11852a314afSOlivier Deprez 					SMMU_ROOT_CR0_GPCEN |
11952a314afSOlivier Deprez 					SMMU_ROOT_CR0_ACCESSEN);
12052a314afSOlivier Deprez 
12152a314afSOlivier Deprez 			/* Poll for ACCESSEN and GPCEN ack bits. */
12252a314afSOlivier Deprez 			if (smmuv3_poll(smmu_base + SMMU_ROOT_CR0ACK,
12352a314afSOlivier Deprez 					SMMU_ROOT_CR0_GPCEN |
12452a314afSOlivier Deprez 					SMMU_ROOT_CR0_ACCESSEN,
12552a314afSOlivier Deprez 					SMMU_ROOT_CR0_GPCEN |
12652a314afSOlivier Deprez 					SMMU_ROOT_CR0_ACCESSEN) != 0) {
12752a314afSOlivier Deprez 				WARN("Failed enabling SMMU GPC.\n");
12852a314afSOlivier Deprez 
12952a314afSOlivier Deprez 				/*
13052a314afSOlivier Deprez 				 * Do not return in error, but fall back to
13152a314afSOlivier Deprez 				 * invalidating all entries through the secure
13252a314afSOlivier Deprez 				 * register file.
13352a314afSOlivier Deprez 				 */
13452a314afSOlivier Deprez 			}
13552a314afSOlivier Deprez 		}
13652a314afSOlivier Deprez 	}
13752a314afSOlivier Deprez 
13852a314afSOlivier Deprez #endif /* ENABLE_RME */
13952a314afSOlivier Deprez 
1401461ad9fSAlexei Fedorov 	/*
1411461ad9fSAlexei Fedorov 	 * Initiate invalidation of secure caches and TLBs if the SMMU
1421461ad9fSAlexei Fedorov 	 * supports secure state. If not, it's implementation defined
1431461ad9fSAlexei Fedorov 	 * as to how SMMU_S_INIT register is accessed.
14452a314afSOlivier Deprez 	 * Arm SMMU Arch RME supplement, section 3.4: all SMMU registers
14552a314afSOlivier Deprez 	 * specified to be accessible only in secure physical address space are
14652a314afSOlivier Deprez 	 * additionally accessible in root physical address space in an SMMU
14752a314afSOlivier Deprez 	 * with RME.
14852a314afSOlivier Deprez 	 * Section 3.3: as GPT information is permitted to be cached in a TLB,
14952a314afSOlivier Deprez 	 * the SMMU_S_INIT.INV_ALL mechanism also invalidates GPT information
15052a314afSOlivier Deprez 	 * cached in TLBs.
1511461ad9fSAlexei Fedorov 	 */
152ccd4d475SAlexei Fedorov 	mmio_write_32(smmu_base + SMMU_S_INIT, SMMU_S_INIT_INV_ALL);
153ccd4d475SAlexei Fedorov 
154ccd4d475SAlexei Fedorov 	/* Wait for global invalidation operation to finish */
155ccd4d475SAlexei Fedorov 	return smmuv3_poll(smmu_base + SMMU_S_INIT,
156ccd4d475SAlexei Fedorov 				SMMU_S_INIT_INV_ALL, 0U);
1571154586bSJeenu Viswambharan }
158*6c5c5320SLucian Paul-Trifu 
159*6c5c5320SLucian Paul-Trifu int smmuv3_ns_set_abort_all(uintptr_t smmu_base)
160*6c5c5320SLucian Paul-Trifu {
161*6c5c5320SLucian Paul-Trifu 	/* Attribute update has completed when SMMU_GBPA.Update bit is 0 */
162*6c5c5320SLucian Paul-Trifu 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) {
163*6c5c5320SLucian Paul-Trifu 		return -1;
164*6c5c5320SLucian Paul-Trifu 	}
165*6c5c5320SLucian Paul-Trifu 
166*6c5c5320SLucian Paul-Trifu 	/*
167*6c5c5320SLucian Paul-Trifu 	 * Set GBPA's ABORT bit. Other GBPA fields are presumably ignored then,
168*6c5c5320SLucian Paul-Trifu 	 * so simply preserve their value.
169*6c5c5320SLucian Paul-Trifu 	 */
170*6c5c5320SLucian Paul-Trifu 	mmio_setbits_32(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE | SMMU_GBPA_ABORT);
171*6c5c5320SLucian Paul-Trifu 	if (smmuv3_poll(smmu_base + SMMU_GBPA, SMMU_GBPA_UPDATE, 0U) != 0U) {
172*6c5c5320SLucian Paul-Trifu 		return -1;
173*6c5c5320SLucian Paul-Trifu 	}
174*6c5c5320SLucian Paul-Trifu 
175*6c5c5320SLucian Paul-Trifu 	/* Disable the SMMU to engage the GBPA fields previously configured. */
176*6c5c5320SLucian Paul-Trifu 	mmio_clrbits_32(smmu_base + SMMU_CR0, SMMU_CR0_SMMUEN);
177*6c5c5320SLucian Paul-Trifu 	if (smmuv3_poll(smmu_base + SMMU_CR0ACK, SMMU_CR0_SMMUEN, 0U) != 0U) {
178*6c5c5320SLucian Paul-Trifu 		return -1;
179*6c5c5320SLucian Paul-Trifu 	}
180*6c5c5320SLucian Paul-Trifu 
181*6c5c5320SLucian Paul-Trifu 	return 0;
182*6c5c5320SLucian Paul-Trifu }
183